code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
import inspect
import unittest
import torch
import torch.nn as nn
from accelerate.hooks import (
AlignDevicesHook,
ModelHook,
SequentialHook,
add_hook_to_module,
attach_align_device_hook,
remove_hook_from_module,
remove_hook_from_submodules,
)
from accelerate.test_utils import require_multi_gpu
class lowerCamelCase_ ( nn.Module ):
def __init__( self : Optional[int] ):
super().__init__()
__A : Optional[int] = nn.Linear(3 , 4 )
__A : Optional[Any] = nn.BatchNormad(4 )
__A : Optional[int] = nn.Linear(4 , 5 )
def lowerCAmelCase_ ( self : str , __A : Any ):
return self.lineara(self.batchnorm(self.lineara(__A ) ) )
class lowerCamelCase_ ( _lowercase ):
def lowerCAmelCase_ ( self : Union[str, Any] , __A : int , *__A : List[Any] , **__A : Any ):
return (args[0] + 1,) + args[1:], kwargs
class lowerCamelCase_ ( _lowercase ):
def lowerCAmelCase_ ( self : Optional[int] , __A : List[str] , __A : str ):
return output + 1
class lowerCamelCase_ ( unittest.TestCase ):
def lowerCAmelCase_ ( self : List[str] ):
__A : Union[str, Any] = ModelForTest()
__A : Union[str, Any] = ModelHook()
add_hook_to_module(__A , __A )
self.assertEqual(test_model._hf_hook , __A )
self.assertTrue(hasattr(__A , """_old_forward""" ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , """forward""" )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ["""x"""] )
remove_hook_from_module(__A )
self.assertFalse(hasattr(__A , """_hf_hook""" ) )
self.assertFalse(hasattr(__A , """_old_forward""" ) )
def lowerCAmelCase_ ( self : Dict ):
__A : str = ModelForTest()
__A : Tuple = ModelHook()
add_hook_to_module(__A , __A )
add_hook_to_module(__A , __A , append=__A )
self.assertEqual(isinstance(test_model._hf_hook , __A ) , __A )
self.assertEqual(len(test_model._hf_hook.hooks ) , 2 )
self.assertTrue(hasattr(__A , """_old_forward""" ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , """forward""" )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ["""x"""] )
remove_hook_from_module(__A )
self.assertFalse(hasattr(__A , """_hf_hook""" ) )
self.assertFalse(hasattr(__A , """_old_forward""" ) )
def lowerCAmelCase_ ( self : int ):
__A : int = ModelForTest()
__A : List[str] = torch.randn(2 , 3 )
__A : Tuple = test_model(x + 1 )
__A : List[str] = test_model(x + 2 )
__A : int = PreForwardHook()
add_hook_to_module(__A , __A )
__A : Union[str, Any] = test_model(__A )
self.assertTrue(torch.allclose(__A , __A , atol=1e-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
__A : Dict = PreForwardHook()
add_hook_to_module(__A , __A )
__A : Union[str, Any] = test_model(__A )
self.assertTrue(torch.allclose(__A , __A , atol=1e-5 ) )
# You need to use the sequential hook to chain two or more hooks
__A : List[Any] = SequentialHook(PreForwardHook() , PreForwardHook() )
add_hook_to_module(__A , __A )
__A : List[str] = test_model(__A )
assert torch.allclose(__A , __A , atol=1e-5 )
def lowerCAmelCase_ ( self : Dict ):
__A : Any = ModelForTest()
__A : List[str] = torch.randn(2 , 3 )
__A : Union[str, Any] = test_model(__A )
__A : Dict = PostForwardHook()
add_hook_to_module(__A , __A )
__A : int = test_model(__A )
self.assertTrue(torch.allclose(__A , output + 1 , atol=1e-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
__A : Optional[int] = PostForwardHook()
add_hook_to_module(__A , __A )
__A : int = test_model(__A )
self.assertTrue(torch.allclose(__A , output + 1 , atol=1e-5 ) )
# You need to use the sequential hook to chain two or more hooks
__A : Dict = SequentialHook(PostForwardHook() , PostForwardHook() )
add_hook_to_module(__A , __A )
__A : Union[str, Any] = test_model(__A )
assert torch.allclose(__A , output + 2 , atol=1e-5 )
def lowerCAmelCase_ ( self : Tuple ):
__A : Optional[Any] = ModelForTest()
__A : List[Any] = torch.randn(2 , 3 )
__A : int = test_model(__A )
__A : Tuple = PostForwardHook()
add_hook_to_module(__A , __A )
__A : List[Any] = test_model(__A )
self.assertTrue(torch.allclose(__A , output + 1 ) )
self.assertTrue(outputa.requires_grad )
__A : int = True
__A : Any = test_model(__A )
self.assertFalse(outputa.requires_grad )
@require_multi_gpu
def lowerCAmelCase_ ( self : str ):
__A : Union[str, Any] = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=1 ) )
self.assertEqual(model.lineara.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device(0 ) )
self.assertEqual(model.lineara.weight.device , torch.device(1 ) )
# We can still make a forward pass. The input does not need to be on any particular device
__A : Tuple = torch.randn(2 , 3 )
__A : Tuple = model(__A )
self.assertEqual(output.device , torch.device(1 ) )
# We can add a general hook to put back output on same device as input.
add_hook_to_module(__A , AlignDevicesHook(io_same_device=__A ) )
__A : List[str] = torch.randn(2 , 3 ).to(0 )
__A : Tuple = model(__A )
self.assertEqual(output.device , torch.device(0 ) )
def lowerCAmelCase_ ( self : Any ):
__A : str = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
__A : Optional[int] = {"""execution_device""": 0 if torch.cuda.is_available() else """cpu""", """offload""": True}
add_hook_to_module(model.lineara , AlignDevicesHook(**__A ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**__A ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**__A ) )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
__A : List[str] = torch.device(hook_kwargs["""execution_device"""] )
self.assertEqual(model.batchnorm.running_mean.device , __A )
__A : Optional[Any] = torch.randn(2 , 3 )
__A : Optional[int] = model(__A )
self.assertEqual(output.device , __A )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# Now test with buffers included in the offload
__A : str = {
"""execution_device""": 0 if torch.cuda.is_available() else """cpu""",
"""offload""": True,
"""offload_buffers""": True,
}
add_hook_to_module(model.lineara , AlignDevicesHook(**__A ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**__A ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**__A ) )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) )
__A : int = torch.randn(2 , 3 )
__A : Optional[Any] = model(__A )
self.assertEqual(output.device , __A )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
def lowerCAmelCase_ ( self : List[str] ):
__A : Dict = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
__A : str = 0 if torch.cuda.is_available() else """cpu"""
attach_align_device_hook(__A , execution_device=__A , offload=__A )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
__A : List[Any] = torch.device(__A )
self.assertEqual(model.batchnorm.running_mean.device , __A )
__A : List[str] = torch.randn(2 , 3 )
__A : Dict = model(__A )
self.assertEqual(output.device , __A )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__A )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# Now test with buffers included in the offload
attach_align_device_hook(__A , execution_device=__A , offload=__A , offload_buffers=__A )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) )
__A : Dict = torch.randn(2 , 3 )
__A : List[str] = model(__A )
self.assertEqual(output.device , __A )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__A )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
def lowerCAmelCase_ ( self : Optional[int] ):
__A : Any = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
__A : int = 0 if torch.cuda.is_available() else """cpu"""
attach_align_device_hook(
__A , execution_device=__A , offload=__A , weights_map=model.state_dict() )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
__A : Optional[int] = torch.device(__A )
self.assertEqual(model.batchnorm.running_mean.device , __A )
__A : Optional[Any] = torch.randn(2 , 3 )
__A : Any = model(__A )
self.assertEqual(output.device , __A )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__A )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# Now test with buffers included in the offload
attach_align_device_hook(
__A , execution_device=__A , offload=__A , weights_map=model.state_dict() , offload_buffers=__A , )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) )
__A : Any = torch.randn(2 , 3 )
__A : int = model(__A )
self.assertEqual(output.device , __A )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__A )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
| 17
|
from math import sqrt
def lowerCAmelCase_ ( __lowerCamelCase = 1_0_0_0_0_0_0 ):
__snake_case : int = 0
__snake_case : int = 0
__snake_case : int
while num_cuboids <= limit:
max_cuboid_size += 1
for sum_shortest_sides in range(2 , 2 * max_cuboid_size + 1 ):
if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer():
num_cuboids += (
min(__lowerCamelCase , sum_shortest_sides // 2 )
- max(1 , sum_shortest_sides - max_cuboid_size )
+ 1
)
return max_cuboid_size
if __name__ == "__main__":
print(f'''{solution() = }''')
| 81
| 0
|
import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
__lowerCamelCase : Dict = logging.get_logger(__name__)
class lowerCAmelCase__ ( UpperCamelCase__ ):
A = """linear"""
A = """cosine"""
A = """cosine_with_restarts"""
A = """polynomial"""
A = """constant"""
A = """constant_with_warmup"""
A = """piecewise_constant"""
def __snake_case (__UpperCAmelCase , __UpperCAmelCase = -1 ):
"""simple docstring"""
return LambdaLR(__UpperCAmelCase , lambda __UpperCAmelCase : 1 , last_epoch=__UpperCAmelCase )
def __snake_case (__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = -1 ):
"""simple docstring"""
def lr_lambda(__UpperCAmelCase ):
if current_step < num_warmup_steps:
return float(__UpperCAmelCase ) / float(max(1.0 , __UpperCAmelCase ) )
return 1.0
return LambdaLR(__UpperCAmelCase , __UpperCAmelCase , last_epoch=__UpperCAmelCase )
def __snake_case (__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = -1 ):
"""simple docstring"""
lowerCamelCase_ : Union[str, Any] = {}
lowerCamelCase_ : Union[str, Any] = step_rules.split(''',''' )
for rule_str in rule_list[:-1]:
lowerCamelCase_ , lowerCamelCase_ : Optional[int] = rule_str.split(''':''' )
lowerCamelCase_ : Any = int(__UpperCAmelCase )
lowerCamelCase_ : List[Any] = float(__UpperCAmelCase )
lowerCamelCase_ : Optional[int] = value
lowerCamelCase_ : Tuple = float(rule_list[-1] )
def create_rules_function(__UpperCAmelCase , __UpperCAmelCase ):
def rule_func(__UpperCAmelCase ) -> float:
lowerCamelCase_ : Optional[int] = sorted(rules_dict.keys() )
for i, sorted_step in enumerate(__UpperCAmelCase ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
lowerCamelCase_ : Any = create_rules_function(__UpperCAmelCase , __UpperCAmelCase )
return LambdaLR(__UpperCAmelCase , __UpperCAmelCase , last_epoch=__UpperCAmelCase )
def __snake_case (__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=-1 ):
"""simple docstring"""
def lr_lambda(__UpperCAmelCase ):
if current_step < num_warmup_steps:
return float(__UpperCAmelCase ) / float(max(1 , __UpperCAmelCase ) )
return max(
0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) )
return LambdaLR(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def __snake_case (__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = 0.5 , __UpperCAmelCase = -1 ):
"""simple docstring"""
def lr_lambda(__UpperCAmelCase ):
if current_step < num_warmup_steps:
return float(__UpperCAmelCase ) / float(max(1 , __UpperCAmelCase ) )
lowerCamelCase_ : Optional[int] = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(__UpperCAmelCase ) * 2.0 * progress )) )
return LambdaLR(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def __snake_case (__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = 1 , __UpperCAmelCase = -1 ):
"""simple docstring"""
def lr_lambda(__UpperCAmelCase ):
if current_step < num_warmup_steps:
return float(__UpperCAmelCase ) / float(max(1 , __UpperCAmelCase ) )
lowerCamelCase_ : List[str] = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(__UpperCAmelCase ) * progress) % 1.0) )) )
return LambdaLR(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def __snake_case (__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=1E-7 , __UpperCAmelCase=1.0 , __UpperCAmelCase=-1 ):
"""simple docstring"""
lowerCamelCase_ : int = optimizer.defaults['''lr''']
if not (lr_init > lr_end):
raise ValueError(F"""lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})""" )
def lr_lambda(__UpperCAmelCase ):
if current_step < num_warmup_steps:
return float(__UpperCAmelCase ) / float(max(1 , __UpperCAmelCase ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
lowerCamelCase_ : Optional[int] = lr_init - lr_end
lowerCamelCase_ : List[str] = num_training_steps - num_warmup_steps
lowerCamelCase_ : int = 1 - (current_step - num_warmup_steps) / decay_steps
lowerCamelCase_ : str = lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
__lowerCamelCase : Any = {
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def __snake_case (__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = 1 , __UpperCAmelCase = 1.0 , __UpperCAmelCase = -1 , ):
"""simple docstring"""
lowerCamelCase_ : Optional[int] = SchedulerType(__UpperCAmelCase )
lowerCamelCase_ : Union[str, Any] = TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(__UpperCAmelCase , last_epoch=__UpperCAmelCase )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(__UpperCAmelCase , step_rules=__UpperCAmelCase , last_epoch=__UpperCAmelCase )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(F"""{name} requires `num_warmup_steps`, please provide that argument.""" )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(__UpperCAmelCase , num_warmup_steps=__UpperCAmelCase , last_epoch=__UpperCAmelCase )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(F"""{name} requires `num_training_steps`, please provide that argument.""" )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
__UpperCAmelCase , num_warmup_steps=__UpperCAmelCase , num_training_steps=__UpperCAmelCase , num_cycles=__UpperCAmelCase , last_epoch=__UpperCAmelCase , )
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
__UpperCAmelCase , num_warmup_steps=__UpperCAmelCase , num_training_steps=__UpperCAmelCase , power=__UpperCAmelCase , last_epoch=__UpperCAmelCase , )
return schedule_func(
__UpperCAmelCase , num_warmup_steps=__UpperCAmelCase , num_training_steps=__UpperCAmelCase , last_epoch=__UpperCAmelCase )
| 717
|
'''simple docstring'''
__lowerCamelCase : int = [
999,
800,
799,
600,
599,
500,
400,
399,
377,
355,
333,
311,
288,
266,
244,
222,
200,
199,
177,
155,
133,
111,
88,
66,
44,
22,
0,
]
__lowerCamelCase : Dict = [
999,
976,
952,
928,
905,
882,
858,
857,
810,
762,
715,
714,
572,
429,
428,
286,
285,
238,
190,
143,
142,
118,
95,
71,
47,
24,
0,
]
__lowerCamelCase : str = [
999,
988,
977,
966,
955,
944,
933,
922,
911,
900,
899,
879,
859,
840,
820,
800,
799,
766,
733,
700,
699,
650,
600,
599,
500,
499,
400,
399,
350,
300,
299,
266,
233,
200,
199,
179,
159,
140,
120,
100,
99,
88,
77,
66,
55,
44,
33,
22,
11,
0,
]
__lowerCamelCase : Tuple = [
999,
995,
992,
989,
985,
981,
978,
975,
971,
967,
964,
961,
957,
956,
951,
947,
942,
937,
933,
928,
923,
919,
914,
913,
908,
903,
897,
892,
887,
881,
876,
871,
870,
864,
858,
852,
846,
840,
834,
828,
827,
820,
813,
806,
799,
792,
785,
784,
777,
770,
763,
756,
749,
742,
741,
733,
724,
716,
707,
699,
698,
688,
677,
666,
656,
655,
645,
634,
623,
613,
612,
598,
584,
570,
569,
555,
541,
527,
526,
505,
484,
483,
462,
440,
439,
396,
395,
352,
351,
308,
307,
264,
263,
220,
219,
176,
132,
88,
44,
0,
]
__lowerCamelCase : Dict = [
999,
997,
995,
992,
990,
988,
986,
984,
981,
979,
977,
975,
972,
970,
968,
966,
964,
961,
959,
957,
956,
954,
951,
949,
946,
944,
941,
939,
936,
934,
931,
929,
926,
924,
921,
919,
916,
914,
913,
910,
907,
905,
902,
899,
896,
893,
891,
888,
885,
882,
879,
877,
874,
871,
870,
867,
864,
861,
858,
855,
852,
849,
846,
843,
840,
837,
834,
831,
828,
827,
824,
821,
817,
814,
811,
808,
804,
801,
798,
795,
791,
788,
785,
784,
780,
777,
774,
770,
766,
763,
760,
756,
752,
749,
746,
742,
741,
737,
733,
730,
726,
722,
718,
714,
710,
707,
703,
699,
698,
694,
690,
685,
681,
677,
673,
669,
664,
660,
656,
655,
650,
646,
641,
636,
632,
627,
622,
618,
613,
612,
607,
602,
596,
591,
586,
580,
575,
570,
569,
563,
557,
551,
545,
539,
533,
527,
526,
519,
512,
505,
498,
491,
484,
483,
474,
466,
457,
449,
440,
439,
428,
418,
407,
396,
395,
381,
366,
352,
351,
330,
308,
307,
286,
264,
263,
242,
220,
219,
176,
175,
132,
131,
88,
44,
0,
]
__lowerCamelCase : Tuple = [
999,
991,
982,
974,
966,
958,
950,
941,
933,
925,
916,
908,
900,
899,
874,
850,
825,
800,
799,
700,
600,
500,
400,
300,
200,
100,
0,
]
__lowerCamelCase : Tuple = [
999,
992,
985,
978,
971,
964,
957,
949,
942,
935,
928,
921,
914,
907,
900,
899,
879,
859,
840,
820,
800,
799,
766,
733,
700,
699,
650,
600,
599,
500,
499,
400,
399,
300,
299,
200,
199,
100,
99,
0,
]
__lowerCamelCase : List[Any] = [
999,
996,
992,
989,
985,
982,
979,
975,
972,
968,
965,
961,
958,
955,
951,
948,
944,
941,
938,
934,
931,
927,
924,
920,
917,
914,
910,
907,
903,
900,
899,
891,
884,
876,
869,
861,
853,
846,
838,
830,
823,
815,
808,
800,
799,
788,
777,
766,
755,
744,
733,
722,
711,
700,
699,
688,
677,
666,
655,
644,
633,
622,
611,
600,
599,
585,
571,
557,
542,
528,
514,
500,
499,
485,
471,
457,
442,
428,
414,
400,
399,
379,
359,
340,
320,
300,
299,
279,
259,
240,
220,
200,
199,
166,
133,
100,
99,
66,
33,
0,
]
| 418
| 0
|
import os
import re
import shutil
from argparse import ArgumentParser, Namespace
from datasets.commands import BaseDatasetsCLICommand
from datasets.utils.logging import get_logger
_lowercase : Tuple = "<<<<<<< This should probably be modified because it mentions: "
_lowercase : str = "=======\n>>>>>>>\n"
_lowercase : Any = [
"TextEncoderConfig",
"ByteTextEncoder",
"SubwordTextEncoder",
"encoder_config",
"maybe_build_from_corpus",
"manual_dir",
]
_lowercase : Union[str, Any] = [
# (pattern, replacement)
# Order is important here for some replacements
(R"tfds\.core", R"datasets"),
(R"tf\.io\.gfile\.GFile", R"open"),
(R"tf\.([\w\d]+)", R"datasets.Value('\1')"),
(R"tfds\.features\.Text\(\)", R"datasets.Value('string')"),
(R"tfds\.features\.Text\(", R"datasets.Value('string'),"),
(R"features\s*=\s*tfds.features.FeaturesDict\(", R"features=datasets.Features("),
(R"tfds\.features\.FeaturesDict\(", R"dict("),
(R"The TensorFlow Datasets Authors", R"The TensorFlow Datasets Authors and the HuggingFace Datasets Authors"),
(R"tfds\.", R"datasets."),
(R"dl_manager\.manual_dir", R"self.config.data_dir"),
(R"self\.builder_config", R"self.config"),
]
def _lowerCAmelCase ( UpperCamelCase__: Namespace ) -> Optional[Any]:
"""simple docstring"""
return ConvertCommand(args.tfds_path , args.datasets_directory )
class _UpperCamelCase ( __snake_case ):
"""simple docstring"""
@staticmethod
def _UpperCAmelCase ( a__ ) -> List[str]:
A = parser.add_parser(
"""convert""" , help="""Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset.""" , )
train_parser.add_argument(
"""--tfds_path""" , type=a__ , required=a__ , help="""Path to a TensorFlow Datasets folder to convert or a single tfds file to convert.""" , )
train_parser.add_argument(
"""--datasets_directory""" , type=a__ , required=a__ , help="""Path to the HuggingFace Datasets folder.""" )
train_parser.set_defaults(func=a__ )
def __init__( self , a__ , a__ , *a__ ) -> Dict:
A = get_logger("""datasets-cli/converting""" )
A = tfds_path
A = datasets_directory
def _UpperCAmelCase ( self ) -> List[str]:
if os.path.isdir(self._tfds_path ):
A = os.path.abspath(self._tfds_path )
elif os.path.isfile(self._tfds_path ):
A = os.path.dirname(self._tfds_path )
else:
raise ValueError("""--tfds_path is neither a directory nor a file. Please check path.""" )
A = os.path.abspath(self._datasets_directory )
self._logger.info(f'Converting datasets from {abs_tfds_path} to {abs_datasets_path}' )
A = []
A = []
A = {}
if os.path.isdir(self._tfds_path ):
A = os.listdir(a__ )
else:
A = [os.path.basename(self._tfds_path )]
for f_name in file_names:
self._logger.info(f'Looking at file {f_name}' )
A = os.path.join(a__ , a__ )
A = os.path.join(a__ , a__ )
if not os.path.isfile(a__ ) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name:
self._logger.info("""Skipping file""" )
continue
with open(a__ , encoding="""utf-8""" ) as f:
A = f.readlines()
A = []
A = False
A = False
A = []
for line in lines:
A = line
# Convert imports
if "import tensorflow.compat.v2 as tf" in out_line:
continue
elif "@tfds.core" in out_line:
continue
elif "builder=self" in out_line:
continue
elif "import tensorflow_datasets.public_api as tfds" in out_line:
A = """import datasets\n"""
elif "import tensorflow" in out_line:
# order is important here
A = """"""
continue
elif "from absl import logging" in out_line:
A = """from datasets import logging\n"""
elif "getLogger" in out_line:
A = out_line.replace("""getLogger""" , """get_logger""" )
elif any(expression in out_line for expression in TO_HIGHLIGHT ):
A = True
A = list(filter(lambda a__ : e in out_line , a__ ) )
out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(a__ ) + """\n""" )
out_lines.append(a__ )
out_lines.append(a__ )
continue
else:
for pattern, replacement in TO_CONVERT:
A = re.sub(a__ , a__ , a__ )
# Take care of saving utilities (to later move them together with main script)
if "tensorflow_datasets" in out_line:
A = re.match(r"""from\stensorflow_datasets.*import\s([^\.\r\n]+)""" , a__ )
tfds_imports.extend(imp.strip() for imp in match.group(1 ).split(""",""" ) )
A = """from . import """ + match.group(1 )
# Check we have not forget anything
if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line:
raise ValueError(f'Error converting {out_line.strip()}' )
if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line:
A = True
out_lines.append(a__ )
if is_builder or "wmt" in f_name:
# We create a new directory for each dataset
A = f_name.replace(""".py""" , """""" )
A = os.path.join(a__ , a__ )
A = os.path.join(a__ , a__ )
os.makedirs(a__ , exist_ok=a__ )
self._logger.info(f'Adding directory {output_dir}' )
imports_to_builder_map.update({imp: output_dir for imp in tfds_imports} )
else:
# Utilities will be moved at the end
utils_files.append(a__ )
if needs_manual_update:
with_manual_update.append(a__ )
with open(a__ , """w""" , encoding="""utf-8""" ) as f:
f.writelines(a__ )
self._logger.info(f'Converted in {output_file}' )
for utils_file in utils_files:
try:
A = os.path.basename(a__ )
A = imports_to_builder_map[f_name.replace(""".py""" , """""" )]
self._logger.info(f'Moving {dest_folder} to {utils_file}' )
shutil.copy(a__ , a__ )
except KeyError:
self._logger.error(f'Cannot find destination folder for {utils_file}. Please copy manually.' )
if with_manual_update:
for file_path in with_manual_update:
self._logger.warning(
f'You need to manually update file {file_path} to remove configurations using \'TextEncoderConfig\'.' )
| 641
|
import numpy as np
from transformers import Pipeline
def _lowerCAmelCase ( UpperCamelCase__: Optional[Any] ) -> Optional[int]:
"""simple docstring"""
A = np.max(UpperCamelCase__ , axis=-1 , keepdims=UpperCamelCase__ )
A = np.exp(outputs - maxes )
return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=UpperCamelCase__ )
class _UpperCamelCase ( __snake_case ):
"""simple docstring"""
def _UpperCAmelCase ( self , **a__ ) -> Union[str, Any]:
A = {}
if "second_text" in kwargs:
A = kwargs["""second_text"""]
return preprocess_kwargs, {}, {}
def _UpperCAmelCase ( self , a__ , a__=None ) -> str:
return self.tokenizer(a__ , text_pair=a__ , return_tensors=self.framework )
def _UpperCAmelCase ( self , a__ ) -> Tuple:
return self.model(**a__ )
def _UpperCAmelCase ( self , a__ ) -> Optional[int]:
A = model_outputs.logits[0].numpy()
A = softmax(a__ )
A = np.argmax(a__ )
A = self.model.config.idalabel[best_class]
A = probabilities[best_class].item()
A = logits.tolist()
return {"label": label, "score": score, "logits": logits}
| 641
| 1
|
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionInstructPixaPixPipeline,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.utils import floats_tensor, load_image, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ =StableDiffusionInstructPixaPixPipeline
UpperCAmelCase_ =TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width", "cross_attention_kwargs"}
UpperCAmelCase_ =TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
UpperCAmelCase_ =IMAGE_TO_IMAGE_IMAGE_PARAMS
UpperCAmelCase_ =IMAGE_TO_IMAGE_IMAGE_PARAMS
def _UpperCamelCase ( self ) -> Optional[Any]:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=8 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
SCREAMING_SNAKE_CASE_ = PNDMScheduler(skip_prk_steps=_A )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
SCREAMING_SNAKE_CASE_ = CLIPTextModel(_A )
SCREAMING_SNAKE_CASE_ = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
SCREAMING_SNAKE_CASE_ = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def _UpperCamelCase ( self , _A , _A=0 ) -> List[str]:
SCREAMING_SNAKE_CASE_ = floats_tensor((1, 3, 32, 32) , rng=random.Random(_A ) ).to(_A )
SCREAMING_SNAKE_CASE_ = image.cpu().permute(0 , 2 , 3 , 1 )[0]
SCREAMING_SNAKE_CASE_ = Image.fromarray(np.uinta(_A ) ).convert('''RGB''' )
if str(_A ).startswith('''mps''' ):
SCREAMING_SNAKE_CASE_ = torch.manual_seed(_A )
else:
SCREAMING_SNAKE_CASE_ = torch.Generator(device=_A ).manual_seed(_A )
SCREAMING_SNAKE_CASE_ = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''image_guidance_scale''': 1,
'''output_type''': '''numpy''',
}
return inputs
def _UpperCamelCase ( self ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = '''cpu''' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE_ = self.get_dummy_components()
SCREAMING_SNAKE_CASE_ = StableDiffusionInstructPixaPixPipeline(**_A )
SCREAMING_SNAKE_CASE_ = sd_pipe.to(_A )
sd_pipe.set_progress_bar_config(disable=_A )
SCREAMING_SNAKE_CASE_ = self.get_dummy_inputs(_A )
SCREAMING_SNAKE_CASE_ = sd_pipe(**_A ).images
SCREAMING_SNAKE_CASE_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
SCREAMING_SNAKE_CASE_ = np.array([0.7526, 0.3750, 0.4547, 0.6117, 0.5866, 0.5016, 0.4327, 0.5642, 0.4815] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def _UpperCamelCase ( self ) -> Tuple:
SCREAMING_SNAKE_CASE_ = '''cpu''' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE_ = self.get_dummy_components()
SCREAMING_SNAKE_CASE_ = StableDiffusionInstructPixaPixPipeline(**_A )
SCREAMING_SNAKE_CASE_ = sd_pipe.to(_A )
sd_pipe.set_progress_bar_config(disable=_A )
SCREAMING_SNAKE_CASE_ = self.get_dummy_inputs(_A )
SCREAMING_SNAKE_CASE_ = '''french fries'''
SCREAMING_SNAKE_CASE_ = sd_pipe(**_A , negative_prompt=_A )
SCREAMING_SNAKE_CASE_ = output.images
SCREAMING_SNAKE_CASE_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
SCREAMING_SNAKE_CASE_ = np.array([0.7511, 0.3642, 0.4553, 0.6236, 0.5797, 0.5013, 0.4343, 0.5611, 0.4831] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def _UpperCamelCase ( self ) -> str:
SCREAMING_SNAKE_CASE_ = '''cpu''' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE_ = self.get_dummy_components()
SCREAMING_SNAKE_CASE_ = StableDiffusionInstructPixaPixPipeline(**_A )
SCREAMING_SNAKE_CASE_ = sd_pipe.to(_A )
sd_pipe.set_progress_bar_config(disable=_A )
SCREAMING_SNAKE_CASE_ = self.get_dummy_inputs(_A )
SCREAMING_SNAKE_CASE_ = [inputs['''prompt''']] * 2
SCREAMING_SNAKE_CASE_ = np.array(inputs['''image'''] ).astype(np.floataa ) / 255.0
SCREAMING_SNAKE_CASE_ = torch.from_numpy(_A ).unsqueeze(0 ).to(_A )
SCREAMING_SNAKE_CASE_ = image / 2 + 0.5
SCREAMING_SNAKE_CASE_ = image.permute(0 , 3 , 1 , 2 )
SCREAMING_SNAKE_CASE_ = image.repeat(2 , 1 , 1 , 1 )
SCREAMING_SNAKE_CASE_ = sd_pipe(**_A ).images
SCREAMING_SNAKE_CASE_ = image[-1, -3:, -3:, -1]
assert image.shape == (2, 32, 32, 3)
SCREAMING_SNAKE_CASE_ = np.array([0.5812, 0.5748, 0.5222, 0.5908, 0.5695, 0.7174, 0.6804, 0.5523, 0.5579] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def _UpperCamelCase ( self ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = '''cpu''' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE_ = self.get_dummy_components()
SCREAMING_SNAKE_CASE_ = EulerAncestralDiscreteScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule='''scaled_linear''' )
SCREAMING_SNAKE_CASE_ = StableDiffusionInstructPixaPixPipeline(**_A )
SCREAMING_SNAKE_CASE_ = sd_pipe.to(_A )
sd_pipe.set_progress_bar_config(disable=_A )
SCREAMING_SNAKE_CASE_ = self.get_dummy_inputs(_A )
SCREAMING_SNAKE_CASE_ = sd_pipe(**_A ).images
SCREAMING_SNAKE_CASE_ = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE_ = [round(_A , 4 ) for x in image_slice.flatten().tolist()]
print(''','''.join([str(_A ) for x in slice] ) )
assert image.shape == (1, 32, 32, 3)
SCREAMING_SNAKE_CASE_ = np.array([0.7417, 0.3842, 0.4732, 0.5776, 0.5891, 0.5139, 0.4052, 0.5673, 0.4986] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def _UpperCamelCase ( self ) -> Dict:
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
def _UpperCamelCase ( self ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = self.get_dummy_components()
SCREAMING_SNAKE_CASE_ = StableDiffusionInstructPixaPixPipeline(**_A )
SCREAMING_SNAKE_CASE_ = VaeImageProcessor(do_resize=_A , do_normalize=_A )
SCREAMING_SNAKE_CASE_ = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
SCREAMING_SNAKE_CASE_ = pipe(**self.get_dummy_inputs_by_type(_A , input_image_type='''pt''' ) )[0]
SCREAMING_SNAKE_CASE_ = components['''vae''']
SCREAMING_SNAKE_CASE_ = self.get_dummy_inputs_by_type(_A , input_image_type='''pt''' )
for image_param in self.image_latents_params:
if image_param in inputs.keys():
SCREAMING_SNAKE_CASE_ = vae.encode(inputs[image_param] ).latent_dist.mode()
SCREAMING_SNAKE_CASE_ = pipe(**_A )[0]
SCREAMING_SNAKE_CASE_ = np.abs(out - out_latents_inputs ).max()
self.assertLess(_A , 1E-4 , '''passing latents as image input generate different result from passing image''' )
@slow
@require_torch_gpu
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def _UpperCamelCase ( self ) -> Optional[Any]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _UpperCamelCase ( self , _A=0 ) -> Dict:
SCREAMING_SNAKE_CASE_ = torch.manual_seed(_A )
SCREAMING_SNAKE_CASE_ = load_image(
'''https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_pix2pix/example.jpg''' )
SCREAMING_SNAKE_CASE_ = {
'''prompt''': '''turn him into a cyborg''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 3,
'''guidance_scale''': 7.5,
'''image_guidance_scale''': 1.0,
'''output_type''': '''numpy''',
}
return inputs
def _UpperCamelCase ( self ) -> str:
SCREAMING_SNAKE_CASE_ = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'''timbrooks/instruct-pix2pix''' , safety_checker=_A )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
pipe.enable_attention_slicing()
SCREAMING_SNAKE_CASE_ = self.get_inputs()
SCREAMING_SNAKE_CASE_ = pipe(**_A ).images
SCREAMING_SNAKE_CASE_ = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE_ = np.array([0.5902, 0.6015, 0.6027, 0.5983, 0.6092, 0.6061, 0.5765, 0.5785, 0.5555] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def _UpperCamelCase ( self ) -> List[str]:
SCREAMING_SNAKE_CASE_ = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'''timbrooks/instruct-pix2pix''' , safety_checker=_A )
SCREAMING_SNAKE_CASE_ = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
pipe.enable_attention_slicing()
SCREAMING_SNAKE_CASE_ = self.get_inputs()
SCREAMING_SNAKE_CASE_ = pipe(**_A ).images
SCREAMING_SNAKE_CASE_ = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE_ = np.array([0.6578, 0.6817, 0.6972, 0.6761, 0.6856, 0.6916, 0.6428, 0.6516, 0.6301] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def _UpperCamelCase ( self ) -> int:
SCREAMING_SNAKE_CASE_ = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'''timbrooks/instruct-pix2pix''' , safety_checker=_A )
SCREAMING_SNAKE_CASE_ = DDIMScheduler.from_config(pipe.scheduler.config )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
pipe.enable_attention_slicing()
SCREAMING_SNAKE_CASE_ = self.get_inputs()
SCREAMING_SNAKE_CASE_ = pipe(**_A ).images
SCREAMING_SNAKE_CASE_ = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE_ = np.array([0.3828, 0.3834, 0.3818, 0.3792, 0.3865, 0.3752, 0.3792, 0.3847, 0.3753] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def _UpperCamelCase ( self ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = 0
def callback_fn(_A , _A , _A ) -> None:
SCREAMING_SNAKE_CASE_ = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
SCREAMING_SNAKE_CASE_ = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
SCREAMING_SNAKE_CASE_ = latents[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE_ = np.array([-0.2463, -0.4644, -0.9756, 1.5176, 1.4414, 0.7866, 0.9897, 0.8521, 0.7983] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
elif step == 2:
SCREAMING_SNAKE_CASE_ = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
SCREAMING_SNAKE_CASE_ = latents[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE_ = np.array([-0.2644, -0.4626, -0.9653, 1.5176, 1.4551, 0.7686, 0.9805, 0.8452, 0.8115] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'''timbrooks/instruct-pix2pix''' , safety_checker=_A , torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE_ = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
pipe.enable_attention_slicing()
SCREAMING_SNAKE_CASE_ = self.get_inputs()
pipe(**_A , callback=_A , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def _UpperCamelCase ( self ) -> Optional[int]:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
SCREAMING_SNAKE_CASE_ = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'''timbrooks/instruct-pix2pix''' , safety_checker=_A , torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE_ = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
SCREAMING_SNAKE_CASE_ = self.get_inputs()
SCREAMING_SNAKE_CASE_ = pipe(**_A )
SCREAMING_SNAKE_CASE_ = torch.cuda.max_memory_allocated()
# make sure that less than 2.2 GB is allocated
assert mem_bytes < 2.2 * 10**9
def _UpperCamelCase ( self ) -> Tuple:
SCREAMING_SNAKE_CASE_ = self.get_inputs()
# resize to resolution that is divisible by 8 but not 16 or 32
SCREAMING_SNAKE_CASE_ = inputs['''image'''].resize((504, 504) )
SCREAMING_SNAKE_CASE_ = '''timbrooks/instruct-pix2pix'''
SCREAMING_SNAKE_CASE_ = StableDiffusionInstructPixaPixPipeline.from_pretrained(
_A , safety_checker=_A , )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
pipe.enable_attention_slicing()
SCREAMING_SNAKE_CASE_ = pipe(**_A )
SCREAMING_SNAKE_CASE_ = output.images[0]
SCREAMING_SNAKE_CASE_ = image[255:258, 383:386, -1]
assert image.shape == (504, 504, 3)
SCREAMING_SNAKE_CASE_ = np.array([0.2726, 0.2529, 0.2664, 0.2655, 0.2641, 0.2642, 0.2591, 0.2649, 0.2590] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-3
| 597
|
def A__ ( __lowerCamelCase, __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = len(__lowerCamelCase )
SCREAMING_SNAKE_CASE_ = [[False] * (required_sum + 1) for _ in range(arr_len + 1 )]
# for each arr value, a sum of zero(0) can be formed by not taking any element
# hence True/1
for i in range(arr_len + 1 ):
SCREAMING_SNAKE_CASE_ = True
# sum is not zero and set is empty then false
for i in range(1, required_sum + 1 ):
SCREAMING_SNAKE_CASE_ = False
for i in range(1, arr_len + 1 ):
for j in range(1, required_sum + 1 ):
if arr[i - 1] > j:
SCREAMING_SNAKE_CASE_ = subset[i - 1][j]
if arr[i - 1] <= j:
SCREAMING_SNAKE_CASE_ = subset[i - 1][j] or subset[i - 1][j - arr[i - 1]]
return subset[arr_len][required_sum]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 597
| 1
|
def UpperCamelCase ( __magic_name__ : list , __magic_name__ : list , __magic_name__ : int ) -> int:
"""simple docstring"""
if len(__magic_name__ ) != len(__magic_name__ ):
raise ValueError("""The length of profit and weight must be same.""" )
if max_weight <= 0:
raise ValueError("""max_weight must greater than zero.""" )
if any(p < 0 for p in profit ):
raise ValueError("""Profit can not be negative.""" )
if any(w < 0 for w in weight ):
raise ValueError("""Weight can not be negative.""" )
# List created to store profit gained for the 1kg in case of each weight
# respectively. Calculate and append profit/weight for each element.
lowercase__ = [p / w for p, w in zip(__magic_name__ , __magic_name__ )]
# Creating a copy of the list and sorting profit/weight in ascending order
lowercase__ = sorted(__magic_name__ )
# declaring useful variables
lowercase__ = len(__magic_name__ )
lowercase__ = 0
lowercase__ = 0
lowercase__ = 0
# loop till the total weight do not reach max limit e.g. 15 kg and till i<length
while limit <= max_weight and i < length:
# flag value for encountered greatest element in sorted_profit_by_weight
lowercase__ = sorted_profit_by_weight[length - i - 1]
lowercase__ = profit_by_weight.index(__magic_name__ )
lowercase__ = -1
# check if the weight encountered is less than the total weight
# encountered before.
if max_weight - limit >= weight[index]:
limit += weight[index]
# Adding profit gained for the given weight 1 ===
# weight[index]/weight[index]
gain += 1 * profit[index]
else:
# Since the weight encountered is greater than limit, therefore take the
# required number of remaining kgs and calculate profit for it.
# weight remaining / weight[index]
gain += (max_weight - limit) / weight[index] * profit[index]
break
i += 1
return gain
if __name__ == "__main__":
print(
'Input profits, weights, and then max_weight (all positive ints) separated by '
'spaces.'
)
A : Tuple = [int(x) for x in input('Input profits separated by spaces: ').split()]
A : Optional[int] = [int(x) for x in input('Input weights separated by spaces: ').split()]
A : Optional[int] = int(input('Max weight allowed: '))
# Function Call
calc_profit(profit, weight, max_weight)
| 15
|
'''simple docstring'''
def _lowerCAmelCase ( lowercase : int ) ->List[Any]:
"""simple docstring"""
lowercase__ = []
lowercase__ = []
lowercase__ = {
'''^''': 3,
'''*''': 2,
'''/''': 2,
'''%''': 2,
'''+''': 1,
'''-''': 1,
} # Priority of each operator
lowercase__ = len(lowercase ) if (len(lowercase ) > 7) else 7
# Print table header for output
print(
'''Symbol'''.center(8 ) , '''Stack'''.center(lowercase ) , '''Postfix'''.center(lowercase ) , sep=''' | ''' , )
print('''-''' * (print_width * 3 + 7) )
for x in infix:
if x.isalpha() or x.isdigit():
post_fix.append(lowercase ) # if x is Alphabet / Digit, add it to Postfix
elif x == "(":
stack.append(lowercase ) # if x is "(" push to Stack
elif x == ")": # if x is ")" pop stack until "(" is encountered
while stack[-1] != "(":
post_fix.append(stack.pop() ) # Pop stack & add the content to Postfix
stack.pop()
else:
if len(lowercase ) == 0:
stack.append(lowercase ) # If stack is empty, push x to stack
else: # while priority of x is not > priority of element in the stack
while len(lowercase ) > 0 and priority[x] <= priority[stack[-1]]:
post_fix.append(stack.pop() ) # pop stack & add to Postfix
stack.append(lowercase ) # push x to stack
print(
x.center(8 ) , (''''''.join(lowercase )).ljust(lowercase ) , (''''''.join(lowercase )).ljust(lowercase ) , sep=''' | ''' , ) # Output in tabular format
while len(lowercase ) > 0: # while stack is not empty
post_fix.append(stack.pop() ) # pop stack & add to Postfix
print(
''' '''.center(8 ) , (''''''.join(lowercase )).ljust(lowercase ) , (''''''.join(lowercase )).ljust(lowercase ) , sep=''' | ''' , ) # Output in tabular format
return "".join(lowercase ) # return Postfix as str
def _lowerCAmelCase ( lowercase : Tuple ) ->int:
"""simple docstring"""
lowercase__ = list(infix[::-1] ) # reverse the infix equation
for i in range(len(lowercase ) ):
if infix[i] == "(":
lowercase__ = ''')''' # change "(" to ")"
elif infix[i] == ")":
lowercase__ = '''(''' # change ")" to "("
return (infix_2_postfix(''''''.join(lowercase ) ))[
::-1
] # call infix_2_postfix on Infix, return reverse of Postfix
if __name__ == "__main__":
_lowerCAmelCase = input("\nEnter an Infix Equation = ") # Input an Infix equation
_lowerCAmelCase = "".join(Infix.split()) # Remove spaces from the input
print("\n\t", Infix, "(Infix) -> ", infix_2_prefix(Infix), "(Prefix)")
| 161
| 0
|
"""simple docstring"""
def lowerCAmelCase__ ( lowerCamelCase__ ) -> float:
if not nums: # Makes sure that the list is not empty
raise ValueError('List is empty' )
A = sum(lowerCamelCase__ ) / len(lowerCamelCase__ ) # Calculate the average
return sum(abs(x - average ) for x in nums ) / len(lowerCamelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 109
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
A = {
'configuration_gpt_neo': ['GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GPTNeoConfig', 'GPTNeoOnnxConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = [
'GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST',
'GPTNeoForCausalLM',
'GPTNeoForQuestionAnswering',
'GPTNeoForSequenceClassification',
'GPTNeoForTokenClassification',
'GPTNeoModel',
'GPTNeoPreTrainedModel',
'load_tf_weights_in_gpt_neo',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = [
'FlaxGPTNeoForCausalLM',
'FlaxGPTNeoModel',
'FlaxGPTNeoPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_gpt_neo import GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoConfig, GPTNeoOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neo import (
GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoForCausalLM,
GPTNeoForQuestionAnswering,
GPTNeoForSequenceClassification,
GPTNeoForTokenClassification,
GPTNeoModel,
GPTNeoPreTrainedModel,
load_tf_weights_in_gpt_neo,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_gpt_neo import FlaxGPTNeoForCausalLM, FlaxGPTNeoModel, FlaxGPTNeoPreTrainedModel
else:
import sys
A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 109
| 1
|
"""simple docstring"""
import unittest
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
if is_torch_available():
import torch
from transformers import AutoModelForImageClassification
if is_vision_available():
from transformers import AutoImageProcessor
@require_torch
@require_vision
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Any = AutoImageProcessor.from_pretrained('microsoft/dit-base-finetuned-rvlcdip' )
lowerCAmelCase__ :str = AutoModelForImageClassification.from_pretrained('microsoft/dit-base-finetuned-rvlcdip' )
model.to(lowerCAmelCase__ )
from datasets import load_dataset
lowerCAmelCase__ :Any = load_dataset('nielsr/rvlcdip-demo' )
lowerCAmelCase__ :List[Any] = dataset["train"][0]["image"].convert('RGB' )
lowerCAmelCase__ :Optional[int] = image_processor(lowerCAmelCase__ , return_tensors='pt' ).to(lowerCAmelCase__ )
# forward pass
with torch.no_grad():
lowerCAmelCase__ :Union[str, Any] = model(**lowerCAmelCase__ )
lowerCAmelCase__ :Dict = outputs.logits
lowerCAmelCase__ :int = torch.Size((1, 1_6) )
self.assertEqual(logits.shape , lowerCAmelCase__ )
lowerCAmelCase__ :List[Any] = torch.tensor(
[-0.41_58, -0.40_92, -0.43_47] , device=lowerCAmelCase__ , dtype=torch.float , )
self.assertTrue(torch.allclose(logits[0, :3] , lowerCAmelCase__ , atol=1E-4 ) )
| 93
|
from typing import TYPE_CHECKING
from ....utils import _LazyModule
_lowercase = {'''tokenization_tapex''': ['''TapexTokenizer''']}
if TYPE_CHECKING:
from .tokenization_tapex import TapexTokenizer
else:
import sys
_lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 659
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
__a = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = ["MLukeTokenizer"]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mluke import MLukeTokenizer
else:
import sys
__a = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 310
|
"""simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.utils import ComputeEnvironment
from .cluster import get_cluster_input
from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401
from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401
from .sagemaker import get_sagemaker_input
__a = "Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine"
def A_ ( ):
'''simple docstring'''
snake_case_ :Dict = _ask_options(
"""In which compute environment are you running?""", ["""This machine""", """AWS (Amazon SageMaker)"""], _convert_compute_environment, )
if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
snake_case_ :Optional[int] = get_sagemaker_input()
else:
snake_case_ :int = get_cluster_input()
return config
def A_ ( _lowercase=None ):
'''simple docstring'''
if subparsers is not None:
snake_case_ :Dict = subparsers.add_parser("""config""", description=_lowercase )
else:
snake_case_ :int = argparse.ArgumentParser("""Accelerate config command""", description=_lowercase )
parser.add_argument(
"""--config_file""", default=_lowercase, help=(
"""The path to use to store the config file. Will default to a file named default_config.yaml in the cache """
"""location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have """
"""such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed """
"""with 'huggingface'."""
), )
if subparsers is not None:
parser.set_defaults(func=_lowercase )
return parser
def A_ ( _lowercase ):
'''simple docstring'''
snake_case_ :Optional[int] = get_user_input()
if args.config_file is not None:
snake_case_ :Dict = args.config_file
else:
if not os.path.isdir(_lowercase ):
os.makedirs(_lowercase )
snake_case_ :Dict = default_yaml_config_file
if config_file.endswith(""".json""" ):
config.to_json_file(_lowercase )
else:
config.to_yaml_file(_lowercase )
print(f"""accelerate configuration saved at {config_file}""" )
def A_ ( ):
'''simple docstring'''
snake_case_ :List[str] = config_command_parser()
snake_case_ :Tuple = parser.parse_args()
config_command(_lowercase )
if __name__ == "__main__":
main()
| 310
| 1
|
from typing import Optional
from .. import Features, NamedSplit
from ..packaged_modules.text.text import Text
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class _lowerCamelCase ( UpperCamelCase ):
"""simple docstring"""
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , )->List[str]:
'''simple docstring'''
super().__init__(
SCREAMING_SNAKE_CASE_ , split=SCREAMING_SNAKE_CASE_ , features=SCREAMING_SNAKE_CASE_ , cache_dir=SCREAMING_SNAKE_CASE_ , keep_in_memory=SCREAMING_SNAKE_CASE_ , streaming=SCREAMING_SNAKE_CASE_ , num_proc=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
A_ : Any = path_or_paths if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else {self.split: path_or_paths}
A_ : str = Text(
cache_dir=SCREAMING_SNAKE_CASE_ , data_files=SCREAMING_SNAKE_CASE_ , features=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
def _snake_case ( self )->Optional[int]:
'''simple docstring'''
if self.streaming:
A_ : List[str] = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
A_ : int = None
A_ : Tuple = None
A_ : str = None
A_ : List[str] = None
self.builder.download_and_prepare(
download_config=SCREAMING_SNAKE_CASE_ , download_mode=SCREAMING_SNAKE_CASE_ , verification_mode=SCREAMING_SNAKE_CASE_ , base_path=SCREAMING_SNAKE_CASE_ , num_proc=self.num_proc , )
A_ : Any = self.builder.as_dataset(
split=self.split , verification_mode=SCREAMING_SNAKE_CASE_ , in_memory=self.keep_in_memory )
return dataset
| 590
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowercase : Any = logging.get_logger(__name__)
__lowercase : str = {
'''google/mobilenet_v1_1.0_224''': '''https://huggingface.co/google/mobilenet_v1_1.0_224/resolve/main/config.json''',
'''google/mobilenet_v1_0.75_192''': '''https://huggingface.co/google/mobilenet_v1_0.75_192/resolve/main/config.json''',
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
}
class _A ( snake_case ):
'''simple docstring'''
__lowerCamelCase : Dict = '''mobilenet_v1'''
def __init__( self ,SCREAMING_SNAKE_CASE_=3 ,SCREAMING_SNAKE_CASE_=224 ,SCREAMING_SNAKE_CASE_=1.0 ,SCREAMING_SNAKE_CASE_=8 ,SCREAMING_SNAKE_CASE_="relu6" ,SCREAMING_SNAKE_CASE_=True ,SCREAMING_SNAKE_CASE_=0.9_99 ,SCREAMING_SNAKE_CASE_=0.02 ,SCREAMING_SNAKE_CASE_=0.0_01 ,**SCREAMING_SNAKE_CASE_ ,):
'''simple docstring'''
super().__init__(**SCREAMING_SNAKE_CASE_ )
if depth_multiplier <= 0:
raise ValueError("""depth_multiplier must be greater than zero.""" )
snake_case : List[Any] = num_channels
snake_case : str = image_size
snake_case : List[Any] = depth_multiplier
snake_case : Optional[int] = min_depth
snake_case : Union[str, Any] = hidden_act
snake_case : int = tf_padding
snake_case : Optional[int] = classifier_dropout_prob
snake_case : Tuple = initializer_range
snake_case : List[str] = layer_norm_eps
class _A ( snake_case ):
'''simple docstring'''
__lowerCamelCase : Optional[Any] = version.parse('''1.11''' )
@property
def snake_case_ ( self ):
'''simple docstring'''
return OrderedDict([("""pixel_values""", {0: """batch"""})] )
@property
def snake_case_ ( self ):
'''simple docstring'''
if self.task == "image-classification":
return OrderedDict([("""logits""", {0: """batch"""})] )
else:
return OrderedDict([("""last_hidden_state""", {0: """batch"""}), ("""pooler_output""", {0: """batch"""})] )
@property
def snake_case_ ( self ):
'''simple docstring'''
return 1E-4
| 36
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ....utils import _LazyModule
lowercase_ = {'''tokenization_tapex''': ['''TapexTokenizer''']}
if TYPE_CHECKING:
from .tokenization_tapex import TapexTokenizer
else:
import sys
lowercase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 703
|
import unittest
import numpy as np
from transformers.testing_utils import require_flax, require_tf, require_torch
from transformers.utils import (
expand_dims,
flatten_dict,
is_flax_available,
is_tf_available,
is_torch_available,
reshape,
squeeze,
transpose,
)
if is_flax_available():
import jax.numpy as jnp
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
class A__ ( unittest.TestCase ):
def lowercase ( self ) -> Dict:
"""simple docstring"""
__magic_name__ : List[Any] = {
'''task_specific_params''': {
'''summarization''': {'''length_penalty''': 1.0, '''max_length''': 128, '''min_length''': 12, '''num_beams''': 4},
'''summarization_cnn''': {'''length_penalty''': 2.0, '''max_length''': 142, '''min_length''': 56, '''num_beams''': 4},
'''summarization_xsum''': {'''length_penalty''': 1.0, '''max_length''': 62, '''min_length''': 11, '''num_beams''': 6},
}
}
__magic_name__ : int = {
'''task_specific_params.summarization.length_penalty''': 1.0,
'''task_specific_params.summarization.max_length''': 128,
'''task_specific_params.summarization.min_length''': 12,
'''task_specific_params.summarization.num_beams''': 4,
'''task_specific_params.summarization_cnn.length_penalty''': 2.0,
'''task_specific_params.summarization_cnn.max_length''': 142,
'''task_specific_params.summarization_cnn.min_length''': 56,
'''task_specific_params.summarization_cnn.num_beams''': 4,
'''task_specific_params.summarization_xsum.length_penalty''': 1.0,
'''task_specific_params.summarization_xsum.max_length''': 62,
'''task_specific_params.summarization_xsum.min_length''': 11,
'''task_specific_params.summarization_xsum.num_beams''': 6,
}
self.assertEqual(flatten_dict(lowerCamelCase ) , lowerCamelCase )
def lowercase ( self ) -> Tuple:
"""simple docstring"""
__magic_name__ : Optional[Any] = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(transpose(lowerCamelCase ) , x.transpose() ) )
__magic_name__ : Union[str, Any] = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(transpose(lowerCamelCase , axes=(1, 2, 0) ) , x.transpose((1, 2, 0) ) ) )
@require_torch
def lowercase ( self ) -> Optional[int]:
"""simple docstring"""
__magic_name__ : Union[str, Any] = np.random.randn(3 , 4 )
__magic_name__ : List[str] = torch.tensor(lowerCamelCase )
self.assertTrue(np.allclose(transpose(lowerCamelCase ) , transpose(lowerCamelCase ).numpy() ) )
__magic_name__ : int = np.random.randn(3 , 4 , 5 )
__magic_name__ : Union[str, Any] = torch.tensor(lowerCamelCase )
self.assertTrue(np.allclose(transpose(lowerCamelCase , axes=(1, 2, 0) ) , transpose(lowerCamelCase , axes=(1, 2, 0) ).numpy() ) )
@require_tf
def lowercase ( self ) -> Tuple:
"""simple docstring"""
__magic_name__ : Dict = np.random.randn(3 , 4 )
__magic_name__ : Any = tf.constant(lowerCamelCase )
self.assertTrue(np.allclose(transpose(lowerCamelCase ) , transpose(lowerCamelCase ).numpy() ) )
__magic_name__ : str = np.random.randn(3 , 4 , 5 )
__magic_name__ : Optional[int] = tf.constant(lowerCamelCase )
self.assertTrue(np.allclose(transpose(lowerCamelCase , axes=(1, 2, 0) ) , transpose(lowerCamelCase , axes=(1, 2, 0) ).numpy() ) )
@require_flax
def lowercase ( self ) -> int:
"""simple docstring"""
__magic_name__ : Union[str, Any] = np.random.randn(3 , 4 )
__magic_name__ : Optional[Any] = jnp.array(lowerCamelCase )
self.assertTrue(np.allclose(transpose(lowerCamelCase ) , np.asarray(transpose(lowerCamelCase ) ) ) )
__magic_name__ : int = np.random.randn(3 , 4 , 5 )
__magic_name__ : Tuple = jnp.array(lowerCamelCase )
self.assertTrue(np.allclose(transpose(lowerCamelCase , axes=(1, 2, 0) ) , np.asarray(transpose(lowerCamelCase , axes=(1, 2, 0) ) ) ) )
def lowercase ( self ) -> Optional[Any]:
"""simple docstring"""
__magic_name__ : Dict = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(reshape(lowerCamelCase , (4, 3) ) , np.reshape(lowerCamelCase , (4, 3) ) ) )
__magic_name__ : Optional[int] = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(reshape(lowerCamelCase , (12, 5) ) , np.reshape(lowerCamelCase , (12, 5) ) ) )
@require_torch
def lowercase ( self ) -> int:
"""simple docstring"""
__magic_name__ : Tuple = np.random.randn(3 , 4 )
__magic_name__ : List[Any] = torch.tensor(lowerCamelCase )
self.assertTrue(np.allclose(reshape(lowerCamelCase , (4, 3) ) , reshape(lowerCamelCase , (4, 3) ).numpy() ) )
__magic_name__ : List[str] = np.random.randn(3 , 4 , 5 )
__magic_name__ : Tuple = torch.tensor(lowerCamelCase )
self.assertTrue(np.allclose(reshape(lowerCamelCase , (12, 5) ) , reshape(lowerCamelCase , (12, 5) ).numpy() ) )
@require_tf
def lowercase ( self ) -> Union[str, Any]:
"""simple docstring"""
__magic_name__ : Union[str, Any] = np.random.randn(3 , 4 )
__magic_name__ : List[str] = tf.constant(lowerCamelCase )
self.assertTrue(np.allclose(reshape(lowerCamelCase , (4, 3) ) , reshape(lowerCamelCase , (4, 3) ).numpy() ) )
__magic_name__ : Optional[Any] = np.random.randn(3 , 4 , 5 )
__magic_name__ : str = tf.constant(lowerCamelCase )
self.assertTrue(np.allclose(reshape(lowerCamelCase , (12, 5) ) , reshape(lowerCamelCase , (12, 5) ).numpy() ) )
@require_flax
def lowercase ( self ) -> Tuple:
"""simple docstring"""
__magic_name__ : Dict = np.random.randn(3 , 4 )
__magic_name__ : Optional[Any] = jnp.array(lowerCamelCase )
self.assertTrue(np.allclose(reshape(lowerCamelCase , (4, 3) ) , np.asarray(reshape(lowerCamelCase , (4, 3) ) ) ) )
__magic_name__ : Union[str, Any] = np.random.randn(3 , 4 , 5 )
__magic_name__ : List[Any] = jnp.array(lowerCamelCase )
self.assertTrue(np.allclose(reshape(lowerCamelCase , (12, 5) ) , np.asarray(reshape(lowerCamelCase , (12, 5) ) ) ) )
def lowercase ( self ) -> Dict:
"""simple docstring"""
__magic_name__ : Optional[Any] = np.random.randn(1 , 3 , 4 )
self.assertTrue(np.allclose(squeeze(lowerCamelCase ) , np.squeeze(lowerCamelCase ) ) )
__magic_name__ : int = np.random.randn(1 , 4 , 1 , 5 )
self.assertTrue(np.allclose(squeeze(lowerCamelCase , axis=2 ) , np.squeeze(lowerCamelCase , axis=2 ) ) )
@require_torch
def lowercase ( self ) -> List[Any]:
"""simple docstring"""
__magic_name__ : Any = np.random.randn(1 , 3 , 4 )
__magic_name__ : List[str] = torch.tensor(lowerCamelCase )
self.assertTrue(np.allclose(squeeze(lowerCamelCase ) , squeeze(lowerCamelCase ).numpy() ) )
__magic_name__ : Union[str, Any] = np.random.randn(1 , 4 , 1 , 5 )
__magic_name__ : Tuple = torch.tensor(lowerCamelCase )
self.assertTrue(np.allclose(squeeze(lowerCamelCase , axis=2 ) , squeeze(lowerCamelCase , axis=2 ).numpy() ) )
@require_tf
def lowercase ( self ) -> str:
"""simple docstring"""
__magic_name__ : Optional[int] = np.random.randn(1 , 3 , 4 )
__magic_name__ : Any = tf.constant(lowerCamelCase )
self.assertTrue(np.allclose(squeeze(lowerCamelCase ) , squeeze(lowerCamelCase ).numpy() ) )
__magic_name__ : int = np.random.randn(1 , 4 , 1 , 5 )
__magic_name__ : str = tf.constant(lowerCamelCase )
self.assertTrue(np.allclose(squeeze(lowerCamelCase , axis=2 ) , squeeze(lowerCamelCase , axis=2 ).numpy() ) )
@require_flax
def lowercase ( self ) -> List[Any]:
"""simple docstring"""
__magic_name__ : str = np.random.randn(1 , 3 , 4 )
__magic_name__ : List[str] = jnp.array(lowerCamelCase )
self.assertTrue(np.allclose(squeeze(lowerCamelCase ) , np.asarray(squeeze(lowerCamelCase ) ) ) )
__magic_name__ : Optional[int] = np.random.randn(1 , 4 , 1 , 5 )
__magic_name__ : Optional[int] = jnp.array(lowerCamelCase )
self.assertTrue(np.allclose(squeeze(lowerCamelCase , axis=2 ) , np.asarray(squeeze(lowerCamelCase , axis=2 ) ) ) )
def lowercase ( self ) -> Optional[int]:
"""simple docstring"""
__magic_name__ : Tuple = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(expand_dims(lowerCamelCase , axis=1 ) , np.expand_dims(lowerCamelCase , axis=1 ) ) )
@require_torch
def lowercase ( self ) -> List[Any]:
"""simple docstring"""
__magic_name__ : Union[str, Any] = np.random.randn(3 , 4 )
__magic_name__ : str = torch.tensor(lowerCamelCase )
self.assertTrue(np.allclose(expand_dims(lowerCamelCase , axis=1 ) , expand_dims(lowerCamelCase , axis=1 ).numpy() ) )
@require_tf
def lowercase ( self ) -> Any:
"""simple docstring"""
__magic_name__ : List[str] = np.random.randn(3 , 4 )
__magic_name__ : Union[str, Any] = tf.constant(lowerCamelCase )
self.assertTrue(np.allclose(expand_dims(lowerCamelCase , axis=1 ) , expand_dims(lowerCamelCase , axis=1 ).numpy() ) )
@require_flax
def lowercase ( self ) -> Optional[Any]:
"""simple docstring"""
__magic_name__ : List[Any] = np.random.randn(3 , 4 )
__magic_name__ : int = jnp.array(lowerCamelCase )
self.assertTrue(np.allclose(expand_dims(lowerCamelCase , axis=1 ) , np.asarray(expand_dims(lowerCamelCase , axis=1 ) ) ) )
| 336
| 0
|
"""simple docstring"""
from math import cos, sin, sqrt, tau
from audio_filters.iir_filter import IIRFilter
def _snake_case ( _snake_case : int , _snake_case : int , _snake_case : float = 1 / sqrt(2 ) ) -> IIRFilter:
'''simple docstring'''
_A = tau * frequency / samplerate
_A = sin(_snake_case )
_A = cos(_snake_case )
_A = _sin / (2 * q_factor)
_A = (1 - _cos) / 2
_A = 1 - _cos
_A = 1 + alpha
_A = -2 * _cos
_A = 1 - alpha
_A = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def _snake_case ( _snake_case : int , _snake_case : int , _snake_case : float = 1 / sqrt(2 ) ) -> IIRFilter:
'''simple docstring'''
_A = tau * frequency / samplerate
_A = sin(_snake_case )
_A = cos(_snake_case )
_A = _sin / (2 * q_factor)
_A = (1 + _cos) / 2
_A = -1 - _cos
_A = 1 + alpha
_A = -2 * _cos
_A = 1 - alpha
_A = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def _snake_case ( _snake_case : int , _snake_case : int , _snake_case : float = 1 / sqrt(2 ) ) -> IIRFilter:
'''simple docstring'''
_A = tau * frequency / samplerate
_A = sin(_snake_case )
_A = cos(_snake_case )
_A = _sin / (2 * q_factor)
_A = _sin / 2
_A = 0
_A = -ba
_A = 1 + alpha
_A = -2 * _cos
_A = 1 - alpha
_A = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def _snake_case ( _snake_case : int , _snake_case : int , _snake_case : float = 1 / sqrt(2 ) ) -> IIRFilter:
'''simple docstring'''
_A = tau * frequency / samplerate
_A = sin(_snake_case )
_A = cos(_snake_case )
_A = _sin / (2 * q_factor)
_A = 1 - alpha
_A = -2 * _cos
_A = 1 + alpha
_A = IIRFilter(2 )
filt.set_coefficients([ba, ba, ba] , [ba, ba, ba] )
return filt
def _snake_case ( _snake_case : int , _snake_case : int , _snake_case : float , _snake_case : float = 1 / sqrt(2 ) , ) -> IIRFilter:
'''simple docstring'''
_A = tau * frequency / samplerate
_A = sin(_snake_case )
_A = cos(_snake_case )
_A = _sin / (2 * q_factor)
_A = 10 ** (gain_db / 40)
_A = 1 + alpha * big_a
_A = -2 * _cos
_A = 1 - alpha * big_a
_A = 1 + alpha / big_a
_A = -2 * _cos
_A = 1 - alpha / big_a
_A = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def _snake_case ( _snake_case : int , _snake_case : int , _snake_case : float , _snake_case : float = 1 / sqrt(2 ) , ) -> IIRFilter:
'''simple docstring'''
_A = tau * frequency / samplerate
_A = sin(_snake_case )
_A = cos(_snake_case )
_A = _sin / (2 * q_factor)
_A = 10 ** (gain_db / 40)
_A = (big_a + 1) - (big_a - 1) * _cos
_A = (big_a + 1) + (big_a - 1) * _cos
_A = (big_a - 1) - (big_a + 1) * _cos
_A = (big_a - 1) + (big_a + 1) * _cos
_A = 2 * sqrt(_snake_case ) * alpha
_A = big_a * (pmc + aaa)
_A = 2 * big_a * mpc
_A = big_a * (pmc - aaa)
_A = ppmc + aaa
_A = -2 * pmpc
_A = ppmc - aaa
_A = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def _snake_case ( _snake_case : int , _snake_case : int , _snake_case : float , _snake_case : float = 1 / sqrt(2 ) , ) -> IIRFilter:
'''simple docstring'''
_A = tau * frequency / samplerate
_A = sin(_snake_case )
_A = cos(_snake_case )
_A = _sin / (2 * q_factor)
_A = 10 ** (gain_db / 40)
_A = (big_a + 1) - (big_a - 1) * _cos
_A = (big_a + 1) + (big_a - 1) * _cos
_A = (big_a - 1) - (big_a + 1) * _cos
_A = (big_a - 1) + (big_a + 1) * _cos
_A = 2 * sqrt(_snake_case ) * alpha
_A = big_a * (ppmc + aaa)
_A = -2 * big_a * pmpc
_A = big_a * (ppmc - aaa)
_A = pmc + aaa
_A = 2 * mpc
_A = pmc - aaa
_A = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
| 7
|
"""simple docstring"""
import torch
from diffusers import KDPMaDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase : Optional[int] = (KDPMaDiscreteScheduler,)
UpperCAmelCase : Any = 10
def lowerCAmelCase_ ( self : Dict , **_UpperCAmelCase : Optional[Any] ):
_A = {
'num_train_timesteps': 1_100,
'beta_start': 0.0001,
'beta_end': 0.02,
'beta_schedule': 'linear',
}
config.update(**_UpperCAmelCase )
return config
def lowerCAmelCase_ ( self : Any ):
for timesteps in [10, 50, 100, 1_000]:
self.check_over_configs(num_train_timesteps=_UpperCAmelCase )
def lowerCAmelCase_ ( self : Dict ):
for beta_start, beta_end in zip([0.0_0001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ):
self.check_over_configs(beta_start=_UpperCAmelCase , beta_end=_UpperCAmelCase )
def lowerCAmelCase_ ( self : Tuple ):
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=_UpperCAmelCase )
def lowerCAmelCase_ ( self : Optional[int] ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_UpperCAmelCase )
def lowerCAmelCase_ ( self : Optional[int] ):
_A = self.scheduler_classes[0]
_A = self.get_scheduler_config(prediction_type='v_prediction' )
_A = scheduler_class(**_UpperCAmelCase )
scheduler.set_timesteps(self.num_inference_steps )
_A = self.dummy_model()
_A = self.dummy_sample_deter * scheduler.init_noise_sigma
_A = sample.to(_UpperCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
_A = scheduler.scale_model_input(_UpperCAmelCase , _UpperCAmelCase )
_A = model(_UpperCAmelCase , _UpperCAmelCase )
_A = scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
_A = output.prev_sample
_A = torch.sum(torch.abs(_UpperCAmelCase ) )
_A = torch.mean(torch.abs(_UpperCAmelCase ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 4.6_9_3_4E-0_7 ) < 1E-2
assert abs(result_mean.item() - 6.1_1_1_2E-1_0 ) < 1E-3
else:
# CUDA
assert abs(result_sum.item() - 4.6_9_3_4_2_8_6_5_0_1_7_0_9_7_2E-0_7 ) < 1E-2
assert abs(result_mean.item() - 0.0002 ) < 1E-3
def lowerCAmelCase_ ( self : Optional[Any] ):
if torch_device == "mps":
return
_A = self.scheduler_classes[0]
_A = self.get_scheduler_config()
_A = scheduler_class(**_UpperCAmelCase )
scheduler.set_timesteps(self.num_inference_steps )
_A = self.dummy_model()
_A = self.dummy_sample_deter * scheduler.init_noise_sigma
_A = sample.to(_UpperCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
_A = scheduler.scale_model_input(_UpperCAmelCase , _UpperCAmelCase )
_A = model(_UpperCAmelCase , _UpperCAmelCase )
_A = scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
_A = output.prev_sample
_A = torch.sum(torch.abs(_UpperCAmelCase ) )
_A = torch.mean(torch.abs(_UpperCAmelCase ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 20.4125 ) < 1E-2
assert abs(result_mean.item() - 0.0266 ) < 1E-3
else:
# CUDA
assert abs(result_sum.item() - 20.4125 ) < 1E-2
assert abs(result_mean.item() - 0.0266 ) < 1E-3
def lowerCAmelCase_ ( self : Any ):
if torch_device == "mps":
return
_A = self.scheduler_classes[0]
_A = self.get_scheduler_config()
_A = scheduler_class(**_UpperCAmelCase )
scheduler.set_timesteps(self.num_inference_steps , device=_UpperCAmelCase )
_A = self.dummy_model()
_A = self.dummy_sample_deter.to(_UpperCAmelCase ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
_A = scheduler.scale_model_input(_UpperCAmelCase , _UpperCAmelCase )
_A = model(_UpperCAmelCase , _UpperCAmelCase )
_A = scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
_A = output.prev_sample
_A = torch.sum(torch.abs(_UpperCAmelCase ) )
_A = torch.mean(torch.abs(_UpperCAmelCase ) )
if str(_UpperCAmelCase ).startswith('cpu' ):
# The following sum varies between 148 and 156 on mps. Why?
assert abs(result_sum.item() - 20.4125 ) < 1E-2
assert abs(result_mean.item() - 0.0266 ) < 1E-3
else:
# CUDA
assert abs(result_sum.item() - 20.4125 ) < 1E-2
assert abs(result_mean.item() - 0.0266 ) < 1E-3
| 7
| 1
|
'''simple docstring'''
import pprint
import requests
SCREAMING_SNAKE_CASE_: Dict ='https://zenquotes.io/api'
def lowerCAmelCase_ ( ) -> list:
'''simple docstring'''
return requests.get(API_ENDPOINT_URL + "/today" ).json()
def lowerCAmelCase_ ( ) -> list:
'''simple docstring'''
return requests.get(API_ENDPOINT_URL + "/random" ).json()
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_: List[str] =random_quotes()
pprint.pprint(response)
| 415
|
'''simple docstring'''
from typing import Optional, Tuple
import jax
import jax.numpy as jnp
from flax import linen as nn
from flax.core.frozen_dict import FrozenDict
from transformers import CLIPConfig, FlaxPreTrainedModel
from transformers.models.clip.modeling_flax_clip import FlaxCLIPVisionModule
def lowerCAmelCase_ ( snake_case_ : Dict , snake_case_ : List[Any] , snake_case_ : List[Any]=1E-1_2 ) -> Any:
'''simple docstring'''
UpperCAmelCase_ = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(snake_case_ , axis=1 ) , a_min=snake_case_ ) ).T
UpperCAmelCase_ = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(snake_case_ , axis=1 ) , a_min=snake_case_ ) ).T
return jnp.matmul(snake_case_ , norm_emb_a.T )
class __A ( nn.Module ):
a__ : CLIPConfig
a__ : jnp.dtype = jnp.floataa
def _lowercase (self : str ):
UpperCAmelCase_ = FlaxCLIPVisionModule(self.config.vision_config )
UpperCAmelCase_ = nn.Dense(self.config.projection_dim , use_bias=__a , dtype=self.dtype )
UpperCAmelCase_ = self.param("concept_embeds" , jax.nn.initializers.ones , (17, self.config.projection_dim) )
UpperCAmelCase_ = self.param(
"special_care_embeds" , jax.nn.initializers.ones , (3, self.config.projection_dim) )
UpperCAmelCase_ = self.param("concept_embeds_weights" , jax.nn.initializers.ones , (17,) )
UpperCAmelCase_ = self.param("special_care_embeds_weights" , jax.nn.initializers.ones , (3,) )
def __call__(self : int , __a : Union[str, Any] ):
UpperCAmelCase_ = self.vision_model(__a )[1]
UpperCAmelCase_ = self.visual_projection(__a )
UpperCAmelCase_ = jax_cosine_distance(__a , self.special_care_embeds )
UpperCAmelCase_ = jax_cosine_distance(__a , self.concept_embeds )
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign image inputs
UpperCAmelCase_ = 0.0
UpperCAmelCase_ = special_cos_dist - self.special_care_embeds_weights[None, :] + adjustment
UpperCAmelCase_ = jnp.round(__a , 3 )
UpperCAmelCase_ = jnp.any(special_scores > 0 , axis=1 , keepdims=__a )
# Use a lower threshold if an image has any special care concept
UpperCAmelCase_ = is_special_care * 0.01
UpperCAmelCase_ = cos_dist - self.concept_embeds_weights[None, :] + special_adjustment
UpperCAmelCase_ = jnp.round(__a , 3 )
UpperCAmelCase_ = jnp.any(concept_scores > 0 , axis=1 )
return has_nsfw_concepts
class __A ( UpperCamelCase__ ):
a__ : str = CLIPConfig
a__ : int = """clip_input"""
a__ : str = FlaxStableDiffusionSafetyCheckerModule
def __init__(self : Optional[int] , __a : CLIPConfig , __a : Optional[Tuple] = None , __a : int = 0 , __a : jnp.dtype = jnp.floataa , __a : bool = True , **__a : Optional[int] , ):
if input_shape is None:
UpperCAmelCase_ = (1, 224, 224, 3)
UpperCAmelCase_ = self.module_class(config=__a , dtype=__a , **__a )
super().__init__(__a , __a , input_shape=__a , seed=__a , dtype=__a , _do_init=_do_init )
def _lowercase (self : Tuple , __a : jax.random.KeyArray , __a : Tuple , __a : FrozenDict = None ):
# init input tensor
UpperCAmelCase_ = jax.random.normal(__a , __a )
UpperCAmelCase_ , UpperCAmelCase_ = jax.random.split(__a )
UpperCAmelCase_ = {"params": params_rng, "dropout": dropout_rng}
UpperCAmelCase_ = self.module.init(__a , __a )["params"]
return random_params
def __call__(self : Optional[int] , __a : Any , __a : dict = None , ):
UpperCAmelCase_ = jnp.transpose(__a , (0, 2, 3, 1) )
return self.module.apply(
{"params": params or self.params} , jnp.array(__a , dtype=jnp.floataa ) , rngs={} , )
| 415
| 1
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase =logging.get_logger(__name__)
__UpperCAmelCase ={
"""google/canine-s""": """https://huggingface.co/google/canine-s/resolve/main/config.json""",
# See all CANINE models at https://huggingface.co/models?filter=canine
}
class lowerCAmelCase__ ( UpperCAmelCase_ ):
lowercase__ : Optional[Any] = 'canine'
def __init__( self , UpperCamelCase__=7_68 , UpperCamelCase__=12 , UpperCamelCase__=12 , UpperCamelCase__=30_72 , UpperCamelCase__="gelu" , UpperCamelCase__=0.1 , UpperCamelCase__=0.1 , UpperCamelCase__=1_63_84 , UpperCamelCase__=16 , UpperCamelCase__=0.02 , UpperCamelCase__=1e-12 , UpperCamelCase__=0 , UpperCamelCase__=0Xe000 , UpperCamelCase__=0Xe001 , UpperCamelCase__=4 , UpperCamelCase__=4 , UpperCamelCase__=8 , UpperCamelCase__=1_63_84 , UpperCamelCase__=1_28 , **UpperCamelCase__ , ):
'''simple docstring'''
super().__init__(pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , **UpperCamelCase__ )
A__ = max_position_embeddings
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = intermediate_size
A__ = hidden_act
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = initializer_range
A__ = type_vocab_size
A__ = layer_norm_eps
# Character config:
A__ = downsampling_rate
A__ = upsampling_kernel_size
A__ = num_hash_functions
A__ = num_hash_buckets
A__ = local_transformer_stride
| 337
|
from __future__ import annotations
import numpy as np
def UpperCamelCase_( _A :list[float] )-> Optional[Any]:
return np.maximum(0 , _A )
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| 551
| 0
|
"""simple docstring"""
import torch
from diffusers import DPMSolverSDEScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import require_torchsde
from .test_schedulers import SchedulerCommonTest
@require_torchsde
class __A (snake_case__):
'''simple docstring'''
__lowercase: int = (DPMSolverSDEScheduler,)
__lowercase: Optional[Any] = 10
def lowerCAmelCase ( self : Any , **UpperCAmelCase_ : List[Any] ) ->int:
"""simple docstring"""
snake_case_ = {
"""num_train_timesteps""": 1_100,
"""beta_start""": 0.0_001,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
"""noise_sampler_seed""": 0,
}
config.update(**UpperCAmelCase_ )
return config
def lowerCAmelCase ( self : Tuple ) ->str:
"""simple docstring"""
for timesteps in [10, 50, 100, 1_000]:
self.check_over_configs(num_train_timesteps=UpperCAmelCase_ )
def lowerCAmelCase ( self : Any ) ->Union[str, Any]:
"""simple docstring"""
for beta_start, beta_end in zip([0.00_001, 0.0_001, 0.001] , [0.0_002, 0.002, 0.02] ):
self.check_over_configs(beta_start=UpperCAmelCase_ , beta_end=UpperCAmelCase_ )
def lowerCAmelCase ( self : Optional[int] ) ->Union[str, Any]:
"""simple docstring"""
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=UpperCAmelCase_ )
def lowerCAmelCase ( self : Tuple ) ->Optional[Any]:
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=UpperCAmelCase_ )
def lowerCAmelCase ( self : Optional[int] ) ->Dict:
"""simple docstring"""
snake_case_ = self.scheduler_classes[0]
snake_case_ = self.get_scheduler_config()
snake_case_ = scheduler_class(**UpperCAmelCase_ )
scheduler.set_timesteps(self.num_inference_steps )
snake_case_ = self.dummy_model()
snake_case_ = self.dummy_sample_deter * scheduler.init_noise_sigma
snake_case_ = sample.to(UpperCAmelCase_ )
for i, t in enumerate(scheduler.timesteps ):
snake_case_ = scheduler.scale_model_input(UpperCAmelCase_ , UpperCAmelCase_ )
snake_case_ = model(UpperCAmelCase_ , UpperCAmelCase_ )
snake_case_ = scheduler.step(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
snake_case_ = output.prev_sample
snake_case_ = torch.sum(torch.abs(UpperCAmelCase_ ) )
snake_case_ = torch.mean(torch.abs(UpperCAmelCase_ ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.47_821_044_921_875 ) < 1E-2
assert abs(result_mean.item() - 0.2_178_705_964_565_277 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59_352_111_816_406 ) < 1E-2
assert abs(result_mean.item() - 0.22_342_906_892_299_652 ) < 1E-3
else:
assert abs(result_sum.item() - 162.52_383_422_851_562 ) < 1E-2
assert abs(result_mean.item() - 0.211_619_570_851_326 ) < 1E-3
def lowerCAmelCase ( self : int ) ->Optional[int]:
"""simple docstring"""
snake_case_ = self.scheduler_classes[0]
snake_case_ = self.get_scheduler_config(prediction_type="""v_prediction""" )
snake_case_ = scheduler_class(**UpperCAmelCase_ )
scheduler.set_timesteps(self.num_inference_steps )
snake_case_ = self.dummy_model()
snake_case_ = self.dummy_sample_deter * scheduler.init_noise_sigma
snake_case_ = sample.to(UpperCAmelCase_ )
for i, t in enumerate(scheduler.timesteps ):
snake_case_ = scheduler.scale_model_input(UpperCAmelCase_ , UpperCAmelCase_ )
snake_case_ = model(UpperCAmelCase_ , UpperCAmelCase_ )
snake_case_ = scheduler.step(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
snake_case_ = output.prev_sample
snake_case_ = torch.sum(torch.abs(UpperCAmelCase_ ) )
snake_case_ = torch.mean(torch.abs(UpperCAmelCase_ ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 124.77_149_200_439_453 ) < 1E-2
assert abs(result_mean.item() - 0.16_226_289_014_816_284 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 128.1_663_360_595_703 ) < 1E-2
assert abs(result_mean.item() - 0.16_688_326_001_167_297 ) < 1E-3
else:
assert abs(result_sum.item() - 119.8_487_548_828_125 ) < 1E-2
assert abs(result_mean.item() - 0.1_560_530_662_536_621 ) < 1E-3
def lowerCAmelCase ( self : str ) ->str:
"""simple docstring"""
snake_case_ = self.scheduler_classes[0]
snake_case_ = self.get_scheduler_config()
snake_case_ = scheduler_class(**UpperCAmelCase_ )
scheduler.set_timesteps(self.num_inference_steps , device=UpperCAmelCase_ )
snake_case_ = self.dummy_model()
snake_case_ = self.dummy_sample_deter.to(UpperCAmelCase_ ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
snake_case_ = scheduler.scale_model_input(UpperCAmelCase_ , UpperCAmelCase_ )
snake_case_ = model(UpperCAmelCase_ , UpperCAmelCase_ )
snake_case_ = scheduler.step(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
snake_case_ = output.prev_sample
snake_case_ = torch.sum(torch.abs(UpperCAmelCase_ ) )
snake_case_ = torch.mean(torch.abs(UpperCAmelCase_ ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.46_957_397_460_938 ) < 1E-2
assert abs(result_mean.item() - 0.21_805_934_607_982_635 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59_353_637_695_312 ) < 1E-2
assert abs(result_mean.item() - 0.22_342_908_382_415_771 ) < 1E-3
else:
assert abs(result_sum.item() - 162.52_383_422_851_562 ) < 1E-2
assert abs(result_mean.item() - 0.211_619_570_851_326 ) < 1E-3
def lowerCAmelCase ( self : int ) ->Optional[int]:
"""simple docstring"""
snake_case_ = self.scheduler_classes[0]
snake_case_ = self.get_scheduler_config()
snake_case_ = scheduler_class(**UpperCAmelCase_ , use_karras_sigmas=UpperCAmelCase_ )
scheduler.set_timesteps(self.num_inference_steps , device=UpperCAmelCase_ )
snake_case_ = self.dummy_model()
snake_case_ = self.dummy_sample_deter.to(UpperCAmelCase_ ) * scheduler.init_noise_sigma
snake_case_ = sample.to(UpperCAmelCase_ )
for t in scheduler.timesteps:
snake_case_ = scheduler.scale_model_input(UpperCAmelCase_ , UpperCAmelCase_ )
snake_case_ = model(UpperCAmelCase_ , UpperCAmelCase_ )
snake_case_ = scheduler.step(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
snake_case_ = output.prev_sample
snake_case_ = torch.sum(torch.abs(UpperCAmelCase_ ) )
snake_case_ = torch.mean(torch.abs(UpperCAmelCase_ ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 176.66_974_135_742_188 ) < 1E-2
assert abs(result_mean.item() - 0.23_003_872_730_981_811 ) < 1E-2
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 177.63_653_564_453_125 ) < 1E-2
assert abs(result_mean.item() - 0.23_003_872_730_981_811 ) < 1E-2
else:
assert abs(result_sum.item() - 170.3_135_223_388_672 ) < 1E-2
assert abs(result_mean.item() - 0.23_003_872_730_981_811 ) < 1E-2
| 2
|
"""simple docstring"""
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> int:
if index == number_of_items:
return 0
snake_case_ = 0
snake_case_ = 0
snake_case_ = knapsack(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , index + 1 )
if weights[index] <= max_weight:
snake_case_ = values[index] + knapsack(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , max_weight - weights[index] , index + 1 )
return max(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 2
| 1
|
'''simple docstring'''
def A__ ( A : list[list[int]] , A : int , A : int , A : set):
'''simple docstring'''
UpperCamelCase , UpperCamelCase : int = len(A), len(grid[0])
if (
min(A , A) < 0
or row == row_length
or col == col_length
or (row, col) in visit
or grid[row][col] == 1
):
return 0
if row == row_length - 1 and col == col_length - 1:
return 1
visit.add((row, col))
UpperCamelCase : str = 0
count += depth_first_search(A , row + 1 , A , A)
count += depth_first_search(A , row - 1 , A , A)
count += depth_first_search(A , A , col + 1 , A)
count += depth_first_search(A , A , col - 1 , A)
visit.remove((row, col))
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 173
|
'''simple docstring'''
from abc import ABC, abstractmethod
from typing import Optional, Union
from .. import Dataset, DatasetDict, Features, IterableDataset, IterableDatasetDict, NamedSplit
from ..utils.typing import NestedDataStructureLike, PathLike
class UpperCAmelCase_ ( lowerCamelCase_ ):
"""simple docstring"""
def __init__( self , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = False , lowerCamelCase = False , lowerCamelCase = None , **lowerCamelCase , ) -> Dict:
'''simple docstring'''
UpperCamelCase : Optional[Any] = path_or_paths
UpperCamelCase : List[str] = split if split or isinstance(lowerCamelCase , lowerCamelCase ) else "train"
UpperCamelCase : Any = features
UpperCamelCase : Optional[int] = cache_dir
UpperCamelCase : str = keep_in_memory
UpperCamelCase : str = streaming
UpperCamelCase : List[Any] = num_proc
UpperCamelCase : Optional[Any] = kwargs
@abstractmethod
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[Dataset, DatasetDict, IterableDataset, IterableDatasetDict]:
'''simple docstring'''
pass
class UpperCAmelCase_ ( lowerCamelCase_ ):
"""simple docstring"""
def __init__( self , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = False , lowerCamelCase = False , lowerCamelCase = None , **lowerCamelCase , ) -> str:
'''simple docstring'''
UpperCamelCase : Tuple = features
UpperCamelCase : str = cache_dir
UpperCamelCase : List[Any] = keep_in_memory
UpperCamelCase : int = streaming
UpperCamelCase : int = num_proc
UpperCamelCase : Tuple = kwargs
@abstractmethod
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[Dataset, IterableDataset]:
'''simple docstring'''
pass
| 173
| 1
|
'''simple docstring'''
import argparse
import torch
from transformers import OpenAIGPTConfig, OpenAIGPTModel, load_tf_weights_in_openai_gpt
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def lowercase_ ( lowercase__ , lowercase__ , lowercase__ ) ->Union[str, Any]:
# Construct model
if openai_config_file == "":
_snake_case: Union[str, Any] = OpenAIGPTConfig()
else:
_snake_case: Optional[int] = OpenAIGPTConfig.from_json_file(lowercase__ )
_snake_case: List[Any] = OpenAIGPTModel(lowercase__ )
# Load weights from numpy
load_tf_weights_in_openai_gpt(lowercase__ , lowercase__ , lowercase__ )
# Save pytorch-model
_snake_case: Tuple = pytorch_dump_folder_path + '/' + WEIGHTS_NAME
_snake_case: int = pytorch_dump_folder_path + '/' + CONFIG_NAME
print(F'''Save PyTorch model to {pytorch_weights_dump_path}''' )
torch.save(model.state_dict() , lowercase__ )
print(F'''Save configuration file to {pytorch_config_dump_path}''' )
with open(lowercase__ , 'w' , encoding='utf-8' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
A : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--openai_checkpoint_folder_path',
default=None,
type=str,
required=True,
help='Path to the TensorFlow checkpoint path.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--openai_config_file',
default='',
type=str,
help=(
'An optional config json file corresponding to the pre-trained OpenAI model. \n'
'This specifies the model architecture.'
),
)
A : Union[str, Any] = parser.parse_args()
convert_openai_checkpoint_to_pytorch(
args.openai_checkpoint_folder_path, args.openai_config_file, args.pytorch_dump_folder_path
)
| 273
|
'''simple docstring'''
def lowercase_ ( lowercase__ = 50 ) ->int:
_snake_case: Union[str, Any] = [[0] * 3 for _ in range(length + 1 )]
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
different_colour_ways_number[row_length][tile_length - 2] += (
different_colour_ways_number[row_length - tile_start - tile_length][
tile_length - 2
]
+ 1
)
return sum(different_colour_ways_number[length] )
if __name__ == "__main__":
print(F'{solution() = }')
| 273
| 1
|
'''simple docstring'''
import unittest
from transformers import RoFormerTokenizer, RoFormerTokenizerFast
from transformers.testing_utils import require_rjieba, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_rjieba
@require_tokenizers
class __lowerCAmelCase ( __magic_name__ , unittest.TestCase ):
"""simple docstring"""
_snake_case : Tuple = RoFormerTokenizer
_snake_case : Union[str, Any] = RoFormerTokenizerFast
_snake_case : Optional[Any] = True
_snake_case : Dict = True
def snake_case__ ( self : Any ) -> str:
'''simple docstring'''
super().setUp()
def snake_case__ ( self : List[str] , **lowerCAmelCase__ : Dict ) -> List[Any]:
'''simple docstring'''
return self.tokenizer_class.from_pretrained('''junnyu/roformer_chinese_base''' , **lowerCAmelCase__ )
def snake_case__ ( self : int , **lowerCAmelCase__ : Optional[int] ) -> List[Any]:
'''simple docstring'''
return self.rust_tokenizer_class.from_pretrained('''junnyu/roformer_chinese_base''' , **lowerCAmelCase__ )
def snake_case__ ( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = '''永和服装饰品有限公司,今天天气非常好'''
_UpperCamelCase = '''永和 服装 饰品 有限公司 , 今 天 天 气 非常 好'''
return input_text, output_text
def snake_case__ ( self : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = self.get_tokenizer()
_UpperCamelCase , _UpperCamelCase = self.get_chinese_input_output_texts()
_UpperCamelCase = tokenizer.tokenize(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , output_text.split() )
_UpperCamelCase = tokens + [tokenizer.unk_token]
_UpperCamelCase = [22943, 21332, 34431, 45904, 117, 306, 1231, 1231, 2653, 33994, 1266, 100]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase__ ) , lowerCAmelCase__ )
def snake_case__ ( self : int ) -> str:
'''simple docstring'''
_UpperCamelCase = self.get_rust_tokenizer()
_UpperCamelCase , _UpperCamelCase = self.get_chinese_input_output_texts()
_UpperCamelCase = tokenizer.tokenize(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , output_text.split() )
_UpperCamelCase = tokens + [tokenizer.unk_token]
_UpperCamelCase = [22943, 21332, 34431, 45904, 117, 306, 1231, 1231, 2653, 33994, 1266, 100]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase__ ) , lowerCAmelCase__ )
def snake_case__ ( self : str ) -> int:
'''simple docstring'''
pass
def snake_case__ ( self : Optional[int] ) -> List[Any]:
'''simple docstring'''
pass
def snake_case__ ( self : Optional[int] ) -> Any:
'''simple docstring'''
pass
| 98
|
'''simple docstring'''
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipaConfig,
BlipaForConditionalGeneration,
BlipaProcessor,
BlipaVisionConfig,
BlipImageProcessor,
OPTConfig,
TaConfig,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def _UpperCamelCase ()-> int:
'''simple docstring'''
__snake_case = '''https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png'''
__snake_case = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw ).convert('''RGB''' )
return image
def _UpperCamelCase (_lowerCamelCase : Union[str, Any] )-> List[Any]:
'''simple docstring'''
__snake_case = []
# fmt: off
# vision encoder
rename_keys.append(('''visual_encoder.cls_token''', '''vision_model.embeddings.class_embedding''') )
rename_keys.append(('''visual_encoder.pos_embed''', '''vision_model.embeddings.position_embedding''') )
rename_keys.append(('''visual_encoder.patch_embed.proj.weight''', '''vision_model.embeddings.patch_embedding.weight''') )
rename_keys.append(('''visual_encoder.patch_embed.proj.bias''', '''vision_model.embeddings.patch_embedding.bias''') )
rename_keys.append(('''ln_vision.weight''', '''vision_model.post_layernorm.weight''') )
rename_keys.append(('''ln_vision.bias''', '''vision_model.post_layernorm.bias''') )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((f'''visual_encoder.blocks.{i}.norm1.weight''', f'''vision_model.encoder.layers.{i}.layer_norm1.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.norm1.bias''', f'''vision_model.encoder.layers.{i}.layer_norm1.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.norm2.weight''', f'''vision_model.encoder.layers.{i}.layer_norm2.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.norm2.bias''', f'''vision_model.encoder.layers.{i}.layer_norm2.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.attn.qkv.weight''', f'''vision_model.encoder.layers.{i}.self_attn.qkv.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.attn.proj.weight''', f'''vision_model.encoder.layers.{i}.self_attn.projection.weight''',) )
rename_keys.append((f'''visual_encoder.blocks.{i}.attn.proj.bias''', f'''vision_model.encoder.layers.{i}.self_attn.projection.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc1.weight''', f'''vision_model.encoder.layers.{i}.mlp.fc1.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc1.bias''', f'''vision_model.encoder.layers.{i}.mlp.fc1.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc2.weight''', f'''vision_model.encoder.layers.{i}.mlp.fc2.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc2.bias''', f'''vision_model.encoder.layers.{i}.mlp.fc2.bias''') )
# QFormer
rename_keys.append(('''Qformer.bert.embeddings.LayerNorm.weight''', '''qformer.layernorm.weight''') )
rename_keys.append(('''Qformer.bert.embeddings.LayerNorm.bias''', '''qformer.layernorm.bias''') )
# fmt: on
return rename_keys
def _UpperCamelCase (_lowerCamelCase : Dict , _lowerCamelCase : List[str] , _lowerCamelCase : Optional[int] )-> Tuple:
'''simple docstring'''
__snake_case = dct.pop(_lowerCamelCase )
__snake_case = val
def _UpperCamelCase (_lowerCamelCase : List[Any] , _lowerCamelCase : Tuple )-> str:
'''simple docstring'''
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
__snake_case = state_dict.pop(f'''visual_encoder.blocks.{i}.attn.q_bias''' )
__snake_case = state_dict.pop(f'''visual_encoder.blocks.{i}.attn.v_bias''' )
# next, set bias in the state dict
__snake_case = torch.cat((q_bias, torch.zeros_like(_lowerCamelCase , requires_grad=_lowerCamelCase ), v_bias) )
__snake_case = qkv_bias
def _UpperCamelCase (_lowerCamelCase : Tuple , _lowerCamelCase : Tuple )-> Dict:
'''simple docstring'''
__snake_case = 3_64 if '''coco''' in model_name else 2_24
__snake_case = BlipaVisionConfig(image_size=_lowerCamelCase ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "opt-2.7b" in model_name:
__snake_case = OPTConfig.from_pretrained('''facebook/opt-2.7b''' , eos_token_id=_lowerCamelCase ).to_dict()
elif "opt-6.7b" in model_name:
__snake_case = OPTConfig.from_pretrained('''facebook/opt-6.7b''' , eos_token_id=_lowerCamelCase ).to_dict()
elif "t5-xl" in model_name:
__snake_case = TaConfig.from_pretrained('''google/flan-t5-xl''' , dense_act_fn='''gelu''' , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
__snake_case = TaConfig.from_pretrained('''google/flan-t5-xxl''' , dense_act_fn='''gelu''' , bos_token_id=1 ).to_dict()
__snake_case = BlipaConfig(vision_config=_lowerCamelCase , text_config=_lowerCamelCase )
return config, image_size
@torch.no_grad()
def _UpperCamelCase (_lowerCamelCase : List[Any] , _lowerCamelCase : Union[str, Any]=None , _lowerCamelCase : Any=False )-> Dict:
'''simple docstring'''
__snake_case = (
AutoTokenizer.from_pretrained('''facebook/opt-2.7b''' )
if '''opt''' in model_name
else AutoTokenizer.from_pretrained('''google/flan-t5-xl''' )
)
__snake_case = tokenizer('''\n''' , add_special_tokens=_lowerCamelCase ).input_ids[0]
__snake_case , __snake_case = get_blipa_config(_lowerCamelCase , eos_token_id=_lowerCamelCase )
__snake_case = BlipaForConditionalGeneration(_lowerCamelCase ).eval()
__snake_case = {
'''blip2-opt-2.7b''': ('''blip2_opt''', '''pretrain_opt2.7b'''),
'''blip2-opt-6.7b''': ('''blip2_opt''', '''pretrain_opt6.7b'''),
'''blip2-opt-2.7b-coco''': ('''blip2_opt''', '''caption_coco_opt2.7b'''),
'''blip2-opt-6.7b-coco''': ('''blip2_opt''', '''caption_coco_opt6.7b'''),
'''blip2-flan-t5-xl''': ('''blip2_t5''', '''pretrain_flant5xl'''),
'''blip2-flan-t5-xl-coco''': ('''blip2_t5''', '''caption_coco_flant5xl'''),
'''blip2-flan-t5-xxl''': ('''blip2_t5''', '''pretrain_flant5xxl'''),
}
__snake_case , __snake_case = model_name_to_original[model_name]
# load original model
print('''Loading original model...''' )
__snake_case = '''cuda''' if torch.cuda.is_available() else '''cpu'''
__snake_case , __snake_case , __snake_case = load_model_and_preprocess(
name=_lowerCamelCase , model_type=_lowerCamelCase , is_eval=_lowerCamelCase , device=_lowerCamelCase )
original_model.eval()
print('''Done!''' )
# update state dict keys
__snake_case = original_model.state_dict()
__snake_case = create_rename_keys(_lowerCamelCase )
for src, dest in rename_keys:
rename_key(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
__snake_case = state_dict.pop(_lowerCamelCase )
if key.startswith('''Qformer.bert''' ):
__snake_case = key.replace('''Qformer.bert''' , '''qformer''' )
if "attention.self" in key:
__snake_case = key.replace('''self''' , '''attention''' )
if "opt_proj" in key:
__snake_case = key.replace('''opt_proj''' , '''language_projection''' )
if "t5_proj" in key:
__snake_case = key.replace('''t5_proj''' , '''language_projection''' )
if key.startswith('''opt''' ):
__snake_case = key.replace('''opt''' , '''language''' )
if key.startswith('''t5''' ):
__snake_case = key.replace('''t5''' , '''language''' )
__snake_case = val
# read in qv biases
read_in_q_v_bias(_lowerCamelCase , _lowerCamelCase )
__snake_case , __snake_case = hf_model.load_state_dict(_lowerCamelCase , strict=_lowerCamelCase )
assert len(_lowerCamelCase ) == 0
assert unexpected_keys == ["qformer.embeddings.position_ids"]
__snake_case = load_demo_image()
__snake_case = vis_processors['''eval'''](_lowerCamelCase ).unsqueeze(0 ).to(_lowerCamelCase )
__snake_case = tokenizer(['''\n'''] , return_tensors='''pt''' ).input_ids.to(_lowerCamelCase )
# create processor
__snake_case = BlipImageProcessor(
size={'''height''': image_size, '''width''': image_size} , image_mean=_lowerCamelCase , image_std=_lowerCamelCase )
__snake_case = BlipaProcessor(image_processor=_lowerCamelCase , tokenizer=_lowerCamelCase )
__snake_case = processor(images=_lowerCamelCase , return_tensors='''pt''' ).pixel_values.to(_lowerCamelCase )
# make sure processor creates exact same pixel values
assert torch.allclose(_lowerCamelCase , _lowerCamelCase )
original_model.to(_lowerCamelCase )
hf_model.to(_lowerCamelCase )
with torch.no_grad():
if "opt" in model_name:
__snake_case = original_model({'''image''': original_pixel_values, '''text_input''': ['''''']} ).logits
__snake_case = hf_model(_lowerCamelCase , _lowerCamelCase ).logits
else:
__snake_case = original_model(
{'''image''': original_pixel_values, '''text_input''': ['''\n'''], '''text_output''': ['''\n''']} ).logits
__snake_case = input_ids.masked_fill(input_ids == tokenizer.pad_token_id , -1_00 )
__snake_case = hf_model(_lowerCamelCase , _lowerCamelCase , labels=_lowerCamelCase ).logits
assert original_logits.shape == logits.shape
print('''First values of original logits:''' , original_logits[0, :3, :3] )
print('''First values of HF logits:''' , logits[0, :3, :3] )
# assert values
if model_name == "blip2-flan-t5-xl":
__snake_case = torch.tensor(
[[-41.5850, -4.4440, -8.9922], [-47.4322, -5.9143, -1.7340]] , device=_lowerCamelCase )
assert torch.allclose(logits[0, :3, :3] , _lowerCamelCase , atol=1E-4 )
elif model_name == "blip2-flan-t5-xl-coco":
__snake_case = torch.tensor(
[[-57.0109, -9.8967, -12.6280], [-68.6578, -12.7191, -10.5065]] , device=_lowerCamelCase )
else:
# cast to same type
__snake_case = logits.dtype
assert torch.allclose(original_logits.to(_lowerCamelCase ) , _lowerCamelCase , atol=1E-2 )
print('''Looks ok!''' )
print('''Generating a caption...''' )
__snake_case = ''''''
__snake_case = tokenizer(_lowerCamelCase , return_tensors='''pt''' ).input_ids.to(_lowerCamelCase )
__snake_case = original_model.generate({'''image''': original_pixel_values} )
__snake_case = hf_model.generate(
_lowerCamelCase , _lowerCamelCase , do_sample=_lowerCamelCase , num_beams=5 , max_length=30 , min_length=1 , top_p=0.9 , repetition_penalty=1.0 , length_penalty=1.0 , temperature=1 , )
print('''Original generation:''' , _lowerCamelCase )
__snake_case = input_ids.shape[1]
__snake_case = processor.batch_decode(outputs[:, prompt_length:] , skip_special_tokens=_lowerCamelCase )
__snake_case = [text.strip() for text in output_text]
print('''HF generation:''' , _lowerCamelCase )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(_lowerCamelCase )
hf_model.save_pretrained(_lowerCamelCase )
if push_to_hub:
processor.push_to_hub(f'''nielsr/{model_name}''' )
hf_model.push_to_hub(f'''nielsr/{model_name}''' )
if __name__ == "__main__":
UpperCAmelCase_ : Any = argparse.ArgumentParser()
UpperCAmelCase_ : Tuple = [
'''blip2-opt-2.7b''',
'''blip2-opt-6.7b''',
'''blip2-opt-2.7b-coco''',
'''blip2-opt-6.7b-coco''',
'''blip2-flan-t5-xl''',
'''blip2-flan-t5-xl-coco''',
'''blip2-flan-t5-xxl''',
]
parser.add_argument(
'''--model_name''',
default='''blip2-opt-2.7b''',
choices=choices,
type=str,
help='''Path to hf config.json of model to convert''',
)
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether to push the model and processor to the hub after converting''',
)
UpperCAmelCase_ : List[str] = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 24
| 0
|
"""simple docstring"""
from __future__ import annotations
from collections.abc import Sequence
from typing import Literal
def __A ( a_ :str , a_ :str) -> str | Literal[False]:
__a : Any = list(a_)
__a : Optional[int] = list(a_)
__a : Union[str, Any] = 0
for i in range(len(a_)):
if lista[i] != lista[i]:
count += 1
__a : str = '''_'''
if count > 1:
return False
else:
return "".join(a_)
def __A ( a_ :list[str]) -> list[str]:
__a : Tuple = []
while True:
__a : Optional[int] = ['''$'''] * len(a_)
__a : str = []
for i in range(len(a_)):
for j in range(i + 1 , len(a_)):
__a : Union[str, Any] = compare_string(binary[i] , binary[j])
if k is False:
__a : Tuple = '''*'''
__a : List[str] = '''*'''
temp.append('''X''')
for i in range(len(a_)):
if checka[i] == "$":
pi.append(binary[i])
if len(a_) == 0:
return pi
__a : List[Any] = list(set(a_))
def __A ( a_ :int , a_ :Sequence[float]) -> list[str]:
__a : List[str] = []
for minterm in minterms:
__a : List[Any] = ''''''
for _ in range(a_):
__a : Tuple = str(minterm % 2) + string
minterm //= 2
temp.append(a_)
return temp
def __A ( a_ :str , a_ :str , a_ :int) -> bool:
__a : List[str] = list(a_)
__a : List[str] = list(a_)
__a : List[Any] = 0
for i in range(len(a_)):
if lista[i] != lista[i]:
count_n += 1
return count_n == count
def __A ( a_ :list[list[int]] , a_ :list[str]) -> list[str]:
__a : str = []
__a : Dict = [0] * len(a_)
for i in range(len(chart[0])):
__a : Optional[int] = 0
__a : Optional[int] = -1
for j in range(len(a_)):
if chart[j][i] == 1:
count += 1
__a : List[str] = j
if count == 1:
__a : Optional[Any] = 1
for i in range(len(a_)):
if select[i] == 1:
for j in range(len(chart[0])):
if chart[i][j] == 1:
for k in range(len(a_)):
__a : Tuple = 0
temp.append(prime_implicants[i])
while True:
__a : str = 0
__a : Union[str, Any] = -1
__a : Any = 0
for i in range(len(a_)):
__a : Optional[Any] = chart[i].count(1)
if count_n > max_n:
__a : Dict = count_n
__a : Optional[Any] = i
if max_n == 0:
return temp
temp.append(prime_implicants[rem])
for i in range(len(chart[0])):
if chart[rem][i] == 1:
for j in range(len(a_)):
__a : Dict = 0
def __A ( a_ :list[str] , a_ :list[str]) -> list[list[int]]:
__a : Tuple = [[0 for x in range(len(a_))] for x in range(len(a_))]
for i in range(len(a_)):
__a : Optional[int] = prime_implicants[i].count('''_''')
for j in range(len(a_)):
if is_for_table(prime_implicants[i] , binary[j] , a_):
__a : Union[str, Any] = 1
return chart
def __A ( ) -> None:
__a : str = int(input('''Enter the no. of variables\n'''))
__a : Union[str, Any] = [
float(a_)
for x in input(
'''Enter the decimal representation of Minterms \'Spaces Separated\'\n''').split()
]
__a : Optional[int] = decimal_to_binary(a_ , a_)
__a : Tuple = check(a_)
print('''Prime Implicants are:''')
print(a_)
__a : Optional[Any] = prime_implicant_chart(a_ , a_)
__a : int = selection(a_ , a_)
print('''Essential Prime Implicants are:''')
print(a_)
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 101
|
"""simple docstring"""
def __A ( a_ :float , a_ :float) -> float:
return price * (1 + tax_rate)
if __name__ == "__main__":
print(F'{price_plus_tax(100, 0.25) = }')
print(F'{price_plus_tax(125.50, 0.05) = }')
| 101
| 1
|
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class UpperCAmelCase_ ( metaclass=_a):
lowerCamelCase__ : Dict = ["sentencepiece"]
def __init__( self , *a , **a ) -> List[Any]:
requires_backends(self , ['sentencepiece'] )
class UpperCAmelCase_ ( metaclass=_a):
lowerCamelCase__ : List[Any] = ["sentencepiece"]
def __init__( self , *a , **a ) -> List[str]:
requires_backends(self , ['sentencepiece'] )
class UpperCAmelCase_ ( metaclass=_a):
lowerCamelCase__ : str = ["sentencepiece"]
def __init__( self , *a , **a ) -> Union[str, Any]:
requires_backends(self , ['sentencepiece'] )
class UpperCAmelCase_ ( metaclass=_a):
lowerCamelCase__ : Union[str, Any] = ["sentencepiece"]
def __init__( self , *a , **a ) -> Optional[int]:
requires_backends(self , ['sentencepiece'] )
class UpperCAmelCase_ ( metaclass=_a):
lowerCamelCase__ : Optional[int] = ["sentencepiece"]
def __init__( self , *a , **a ) -> List[str]:
requires_backends(self , ['sentencepiece'] )
class UpperCAmelCase_ ( metaclass=_a):
lowerCamelCase__ : int = ["sentencepiece"]
def __init__( self , *a , **a ) -> Union[str, Any]:
requires_backends(self , ['sentencepiece'] )
class UpperCAmelCase_ ( metaclass=_a):
lowerCamelCase__ : int = ["sentencepiece"]
def __init__( self , *a , **a ) -> Tuple:
requires_backends(self , ['sentencepiece'] )
class UpperCAmelCase_ ( metaclass=_a):
lowerCamelCase__ : Dict = ["sentencepiece"]
def __init__( self , *a , **a ) -> Tuple:
requires_backends(self , ['sentencepiece'] )
class UpperCAmelCase_ ( metaclass=_a):
lowerCamelCase__ : Any = ["sentencepiece"]
def __init__( self , *a , **a ) -> Optional[Any]:
requires_backends(self , ['sentencepiece'] )
class UpperCAmelCase_ ( metaclass=_a):
lowerCamelCase__ : Optional[int] = ["sentencepiece"]
def __init__( self , *a , **a ) -> List[str]:
requires_backends(self , ['sentencepiece'] )
class UpperCAmelCase_ ( metaclass=_a):
lowerCamelCase__ : Any = ["sentencepiece"]
def __init__( self , *a , **a ) -> int:
requires_backends(self , ['sentencepiece'] )
class UpperCAmelCase_ ( metaclass=_a):
lowerCamelCase__ : List[Any] = ["sentencepiece"]
def __init__( self , *a , **a ) -> Any:
requires_backends(self , ['sentencepiece'] )
class UpperCAmelCase_ ( metaclass=_a):
lowerCamelCase__ : Any = ["sentencepiece"]
def __init__( self , *a , **a ) -> Optional[int]:
requires_backends(self , ['sentencepiece'] )
class UpperCAmelCase_ ( metaclass=_a):
lowerCamelCase__ : Optional[int] = ["sentencepiece"]
def __init__( self , *a , **a ) -> Union[str, Any]:
requires_backends(self , ['sentencepiece'] )
class UpperCAmelCase_ ( metaclass=_a):
lowerCamelCase__ : Optional[int] = ["sentencepiece"]
def __init__( self , *a , **a ) -> List[str]:
requires_backends(self , ['sentencepiece'] )
class UpperCAmelCase_ ( metaclass=_a):
lowerCamelCase__ : Optional[int] = ["sentencepiece"]
def __init__( self , *a , **a ) -> Tuple:
requires_backends(self , ['sentencepiece'] )
class UpperCAmelCase_ ( metaclass=_a):
lowerCamelCase__ : Tuple = ["sentencepiece"]
def __init__( self , *a , **a ) -> Optional[Any]:
requires_backends(self , ['sentencepiece'] )
class UpperCAmelCase_ ( metaclass=_a):
lowerCamelCase__ : int = ["sentencepiece"]
def __init__( self , *a , **a ) -> Tuple:
requires_backends(self , ['sentencepiece'] )
class UpperCAmelCase_ ( metaclass=_a):
lowerCamelCase__ : Optional[int] = ["sentencepiece"]
def __init__( self , *a , **a ) -> int:
requires_backends(self , ['sentencepiece'] )
class UpperCAmelCase_ ( metaclass=_a):
lowerCamelCase__ : List[str] = ["sentencepiece"]
def __init__( self , *a , **a ) -> Tuple:
requires_backends(self , ['sentencepiece'] )
class UpperCAmelCase_ ( metaclass=_a):
lowerCamelCase__ : str = ["sentencepiece"]
def __init__( self , *a , **a ) -> int:
requires_backends(self , ['sentencepiece'] )
class UpperCAmelCase_ ( metaclass=_a):
lowerCamelCase__ : int = ["sentencepiece"]
def __init__( self , *a , **a ) -> Any:
requires_backends(self , ['sentencepiece'] )
class UpperCAmelCase_ ( metaclass=_a):
lowerCamelCase__ : Optional[Any] = ["sentencepiece"]
def __init__( self , *a , **a ) -> str:
requires_backends(self , ['sentencepiece'] )
class UpperCAmelCase_ ( metaclass=_a):
lowerCamelCase__ : int = ["sentencepiece"]
def __init__( self , *a , **a ) -> List[Any]:
requires_backends(self , ['sentencepiece'] )
class UpperCAmelCase_ ( metaclass=_a):
lowerCamelCase__ : Dict = ["sentencepiece"]
def __init__( self , *a , **a ) -> Tuple:
requires_backends(self , ['sentencepiece'] )
class UpperCAmelCase_ ( metaclass=_a):
lowerCamelCase__ : Dict = ["sentencepiece"]
def __init__( self , *a , **a ) -> Optional[int]:
requires_backends(self , ['sentencepiece'] )
class UpperCAmelCase_ ( metaclass=_a):
lowerCamelCase__ : List[str] = ["sentencepiece"]
def __init__( self , *a , **a ) -> Optional[int]:
requires_backends(self , ['sentencepiece'] )
class UpperCAmelCase_ ( metaclass=_a):
lowerCamelCase__ : int = ["sentencepiece"]
def __init__( self , *a , **a ) -> Dict:
requires_backends(self , ['sentencepiece'] )
class UpperCAmelCase_ ( metaclass=_a):
lowerCamelCase__ : Any = ["sentencepiece"]
def __init__( self , *a , **a ) -> Optional[Any]:
requires_backends(self , ['sentencepiece'] )
class UpperCAmelCase_ ( metaclass=_a):
lowerCamelCase__ : str = ["sentencepiece"]
def __init__( self , *a , **a ) -> Tuple:
requires_backends(self , ['sentencepiece'] )
class UpperCAmelCase_ ( metaclass=_a):
lowerCamelCase__ : Tuple = ["sentencepiece"]
def __init__( self , *a , **a ) -> Optional[int]:
requires_backends(self , ['sentencepiece'] )
| 599
|
"""simple docstring"""
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
_UpperCamelCase : Tuple = logging.get_logger(__name__)
class UpperCAmelCase_ ( _a):
lowerCamelCase__ : Union[str, Any] = ["input_features"]
def __init__( self , a=8_0 , a=1_6_0_0_0 , a=1_6_0 , a=3_0 , a=4_0_0 , a=0.0 , a=False , **a , ) -> Any:
super().__init__(
feature_size=a , sampling_rate=a , padding_value=a , return_attention_mask=a , **a , )
lowercase__ : Any = n_fft
lowercase__ : List[Any] = hop_length
lowercase__ : str = chunk_length
lowercase__ : Optional[int] = chunk_length * sampling_rate
lowercase__ : Any = self.n_samples // hop_length
lowercase__ : Union[str, Any] = sampling_rate
lowercase__ : str = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=a , min_frequency=0.0 , max_frequency=8_000.0 , sampling_rate=a , norm='slaney' , mel_scale='slaney' , )
def _UpperCAmelCase ( self , a ) -> np.ndarray:
lowercase__ : List[str] = spectrogram(
a , window_function(self.n_fft , 'hann' ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters , log_mel='log10' , )
lowercase__ : int = log_spec[:, :-1]
lowercase__ : List[Any] = np.maximum(a , log_spec.max() - 8.0 )
lowercase__ : Union[str, Any] = (log_spec + 4.0) / 4.0
return log_spec
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def _UpperCAmelCase ( a , a , a = 0.0 ) -> List[np.ndarray]:
if attention_mask is not None:
lowercase__ : Dict = np.array(a , np.intaa )
lowercase__ : Any = []
for vector, length in zip(a , attention_mask.sum(-1 ) ):
lowercase__ : Dict = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1e-7 )
if length < normed_slice.shape[0]:
lowercase__ : Optional[int] = padding_value
normed_input_values.append(a )
else:
lowercase__ : Any = [(x - x.mean()) / np.sqrt(x.var() + 1e-7 ) for x in input_values]
return normed_input_values
def __call__( self , a , a = True , a = None , a = None , a = None , a = "max_length" , a = None , a = None , a = None , **a , ) -> BatchFeature:
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"""The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a"""
f""" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input"""
f""" was sampled with {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
'It is strongly recommended to pass the `sampling_rate` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
lowercase__ : str = isinstance(a , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f"""Only mono-channel audio is supported for input to {self}""" )
lowercase__ : Optional[Any] = is_batched_numpy or (
isinstance(a , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
lowercase__ : Union[str, Any] = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(a , np.ndarray ):
lowercase__ : int = np.asarray(a , dtype=np.floataa )
elif isinstance(a , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
lowercase__ : int = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
lowercase__ : Tuple = [np.asarray([raw_speech] ).T]
lowercase__ : Any = BatchFeature({'input_features': raw_speech} )
# convert into correct format for padding
lowercase__ : int = self.pad(
a , padding=a , max_length=max_length if max_length else self.n_samples , truncation=a , pad_to_multiple_of=a , return_attention_mask=return_attention_mask or do_normalize , )
# zero-mean and unit-variance normalization
if do_normalize:
lowercase__ : Dict = self.zero_mean_unit_var_norm(
padded_inputs['input_features'] , attention_mask=padded_inputs['attention_mask'] , padding_value=self.padding_value , )
lowercase__ : str = np.stack(padded_inputs['input_features'] , axis=0 )
# make sure list is in array format
lowercase__ : Any = padded_inputs.get('input_features' ).transpose(2 , 0 , 1 )
lowercase__ : Union[str, Any] = [self._np_extract_fbank_features(a ) for waveform in input_features[0]]
if isinstance(input_features[0] , a ):
lowercase__ : Union[str, Any] = [np.asarray(a , dtype=np.floataa ) for feature in input_features]
else:
lowercase__ : List[str] = input_features
if return_attention_mask:
# rescale from sample (48000) to feature (3000)
lowercase__ : List[str] = padded_inputs['attention_mask'][:, :: self.hop_length]
if return_tensors is not None:
lowercase__ : Tuple = padded_inputs.convert_to_tensors(a )
return padded_inputs
def _UpperCAmelCase ( self ) -> Dict[str, Any]:
lowercase__ : Tuple = copy.deepcopy(self.__dict__ )
lowercase__ : List[Any] = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
return output
| 599
| 1
|
import argparse
import gc
import json
import os
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
lowerCAmelCase__ = 1_6
lowerCAmelCase__ = 3_2
def __lowerCamelCase ( __a : List[Any] ) -> List[str]:
return int(x / 2**20 )
class _a :
"""simple docstring"""
def __enter__( self ):
gc.collect()
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated() # reset the peak gauge to zero
_lowercase =torch.cuda.memory_allocated()
return self
def __exit__( self , *lowerCAmelCase_ ):
gc.collect()
torch.cuda.empty_cache()
_lowercase =torch.cuda.memory_allocated()
_lowercase =torch.cuda.max_memory_allocated()
_lowercase =bamb(self.end - self.begin )
_lowercase =bamb(self.peak - self.begin )
# print(f"delta used/peak {self.used:4d}/{self.peaked:4d}")
def __lowerCamelCase ( __a : Accelerator , __a : int = 16 , __a : str = "bert-base-cased" , __a : int = 320 , __a : int = 160 , ) -> str:
_lowercase =AutoTokenizer.from_pretrained(__a )
_lowercase =load_dataset(
"glue" , "mrpc" , split={"train": f'''train[:{n_train}]''', "validation": f'''validation[:{n_val}]'''} )
def tokenize_function(__a : Dict ):
# max_length=None => use the model max length (it's actually the default)
_lowercase =tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=__a , max_length=__a )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
_lowercase =datasets.map(
__a , batched=__a , remove_columns=["idx", "sentence1", "sentence2"] , load_from_cache_file=__a )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_lowercase =tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(__a : Optional[Any] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(__a , padding="max_length" , max_length=128 , return_tensors="pt" )
return tokenizer.pad(__a , padding="longest" , return_tensors="pt" )
# Instantiate dataloaders.
_lowercase =DataLoader(
tokenized_datasets["train"] , shuffle=__a , collate_fn=__a , batch_size=__a )
_lowercase =DataLoader(
tokenized_datasets["validation"] , shuffle=__a , collate_fn=__a , batch_size=__a )
return train_dataloader, eval_dataloader
def __lowerCamelCase ( __a : str , __a : Optional[Any] ) -> Optional[int]:
# Initialize accelerator
_lowercase =Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_lowercase =config["lr"]
_lowercase =int(config["num_epochs"] )
_lowercase =int(config["seed"] )
_lowercase =int(config["batch_size"] )
_lowercase =args.model_name_or_path
set_seed(__a )
_lowercase , _lowercase =get_dataloaders(__a , __a , __a , args.n_train , args.n_val )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_lowercase =AutoModelForSequenceClassification.from_pretrained(__a , return_dict=__a )
# Instantiate optimizer
_lowercase =(
AdamW
if accelerator.state.deepspeed_plugin is None
or "optimizer" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
_lowercase =optimizer_cls(params=model.parameters() , lr=__a )
if accelerator.state.deepspeed_plugin is not None:
_lowercase =accelerator.state.deepspeed_plugin.deepspeed_config[
"gradient_accumulation_steps"
]
else:
_lowercase =1
_lowercase =(len(__a ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
_lowercase =get_linear_schedule_with_warmup(
optimizer=__a , num_warmup_steps=0 , num_training_steps=__a , )
else:
_lowercase =DummyScheduler(__a , total_num_steps=__a , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase =accelerator.prepare(
__a , __a , __a , __a , __a )
# We need to keep track of how many total steps we have iterated over
_lowercase =0
# We also need to keep track of the stating epoch so files are named properly
_lowercase =0
# Now we train the model
_lowercase ={}
for epoch in range(__a , __a ):
with TorchTracemalloc() as tracemalloc:
model.train()
for step, batch in enumerate(__a ):
_lowercase =model(**__a )
_lowercase =outputs.loss
_lowercase =loss / gradient_accumulation_steps
accelerator.backward(__a )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
# Printing the GPU memory usage details such as allocated memory, peak memory, and total memory usage
accelerator.print("Memory before entering the train : {}".format(bamb(tracemalloc.begin ) ) )
accelerator.print("Memory consumed at the end of the train (end-begin): {}".format(tracemalloc.used ) )
accelerator.print("Peak Memory consumed during the train (max-begin): {}".format(tracemalloc.peaked ) )
accelerator.print(
"Total Peak Memory consumed during the train (max): {}".format(
tracemalloc.peaked + bamb(tracemalloc.begin ) ) )
_lowercase =tracemalloc.peaked + bamb(tracemalloc.begin )
if args.peak_memory_upper_bound is not None:
assert (
train_total_peak_memory[f'''epoch-{epoch}'''] <= args.peak_memory_upper_bound
), "Peak memory usage exceeded the upper bound"
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , "peak_memory_utilization.json" ) , "w" ) as f:
json.dump(__a , __a )
def __lowerCamelCase ( ) -> Tuple:
_lowercase =argparse.ArgumentParser(description="Simple example of training script tracking peak GPU memory usage." )
parser.add_argument(
"--model_name_or_path" , type=__a , default="bert-base-cased" , help="Path to pretrained model or model identifier from huggingface.co/models." , required=__a , )
parser.add_argument(
"--output_dir" , type=__a , default="." , help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory." , )
parser.add_argument(
"--peak_memory_upper_bound" , type=__a , default=__a , help="The upper bound of peak memory usage in MB. If set, the training will throw an error if the peak memory usage exceeds this value." , )
parser.add_argument(
"--n_train" , type=__a , default=320 , help="Number of training examples to use." , )
parser.add_argument(
"--n_val" , type=__a , default=160 , help="Number of validation examples to use." , )
parser.add_argument(
"--num_epochs" , type=__a , default=1 , help="Number of train epochs." , )
_lowercase =parser.parse_args()
_lowercase ={"lr": 2E-5, "num_epochs": args.num_epochs, "seed": 42, "batch_size": 16}
training_function(__a , __a )
if __name__ == "__main__":
main()
| 703
|
import warnings
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
class _a ( lowerCamelCase_ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = ['input_ids', 'attention_mask']
def __init__( self , lowerCAmelCase_="</s>" , lowerCAmelCase_="<unk>" , lowerCAmelCase_="<pad>" , lowerCAmelCase_=125 , lowerCAmelCase_=None , **lowerCAmelCase_ , ):
# Add extra_ids to the special token list
if extra_ids > 0 and additional_special_tokens is None:
_lowercase =[F'''<extra_id_{i}>''' for i in range(lowerCAmelCase_ )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
_lowercase =len(set(filter(lambda lowerCAmelCase_ : bool("extra_id" in str(lowerCAmelCase_ ) ) , lowerCAmelCase_ ) ) )
if extra_tokens != extra_ids:
raise ValueError(
F'''Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are'''
" provided to ByT5Tokenizer. In this case the additional_special_tokens must include the"
" extra_ids tokens" )
_lowercase =AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else pad_token
_lowercase =AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else eos_token
_lowercase =AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else unk_token
super().__init__(
eos_token=lowerCAmelCase_ , unk_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , extra_ids=lowerCAmelCase_ , additional_special_tokens=lowerCAmelCase_ , **lowerCAmelCase_ , )
_lowercase =extra_ids
_lowercase =2**8 # utf is 8 bits
# define special tokens dict
_lowercase ={
self.pad_token: 0,
self.eos_token: 1,
self.unk_token: 2,
}
_lowercase =len(self.special_tokens_encoder )
_lowercase =len(lowerCAmelCase_ )
for i, token in enumerate(lowerCAmelCase_ ):
_lowercase =self.vocab_size + i - n
_lowercase ={v: k for k, v in self.special_tokens_encoder.items()}
@property
def __lowerCAmelCase ( self ):
return self._utf_vocab_size + self._num_special_tokens + self._extra_ids
def __lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase_ , token_ids_a=lowerCAmelCase_ , already_has_special_tokens=lowerCAmelCase_ )
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(lowerCAmelCase_ )) + [1]
return ([0] * len(lowerCAmelCase_ )) + [1] + ([0] * len(lowerCAmelCase_ )) + [1]
def __lowerCAmelCase ( self , lowerCAmelCase_ ):
if len(lowerCAmelCase_ ) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
F'''This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated'''
" eos tokens being added." )
return token_ids
else:
return token_ids + [self.eos_token_id]
def __lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None ):
_lowercase =[self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def __lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None ):
_lowercase =self._add_eos_if_not_present(lowerCAmelCase_ )
if token_ids_a is None:
return token_ids_a
else:
_lowercase =self._add_eos_if_not_present(lowerCAmelCase_ )
return token_ids_a + token_ids_a
def __lowerCAmelCase ( self , lowerCAmelCase_ ):
_lowercase =[chr(lowerCAmelCase_ ) for i in text.encode("utf-8" )]
return tokens
def __lowerCAmelCase ( self , lowerCAmelCase_ ):
if token in self.special_tokens_encoder:
_lowercase =self.special_tokens_encoder[token]
elif token in self.added_tokens_encoder:
_lowercase =self.added_tokens_encoder[token]
elif len(lowerCAmelCase_ ) != 1:
_lowercase =self.unk_token_id
else:
_lowercase =ord(lowerCAmelCase_ ) + self._num_special_tokens
return token_id
def __lowerCAmelCase ( self , lowerCAmelCase_ ):
if index in self.special_tokens_decoder:
_lowercase =self.special_tokens_decoder[index]
else:
_lowercase =chr(index - self._num_special_tokens )
return token
def __lowerCAmelCase ( self , lowerCAmelCase_ ):
_lowercase =b""
for token in tokens:
if token in self.special_tokens_decoder:
_lowercase =self.special_tokens_decoder[token].encode("utf-8" )
elif token in self.added_tokens_decoder:
_lowercase =self.special_tokens_decoder[token].encode("utf-8" )
elif token in self.special_tokens_encoder:
_lowercase =token.encode("utf-8" )
elif token in self.added_tokens_encoder:
_lowercase =token.encode("utf-8" )
else:
_lowercase =bytes([ord(lowerCAmelCase_ )] )
bstring += tok_string
_lowercase =bstring.decode("utf-8" , errors="ignore" )
return string
def __lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None ):
return ()
| 594
| 0
|
'''simple docstring'''
import inspect
import unittest
from transformers import ViTHybridConfig
from transformers.testing_utils import require_accelerate, require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTHybridForImageClassification, ViTHybridImageProcessor, ViTHybridModel
from transformers.models.vit_hybrid.modeling_vit_hybrid import VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class UpperCamelCase__ :
def __init__( self : List[Any] , lowerCamelCase : Dict , lowerCamelCase : Optional[Any]=1_3 , lowerCamelCase : Tuple=6_4 , lowerCamelCase : Optional[int]=2 , lowerCamelCase : Optional[int]=3 , lowerCamelCase : Union[str, Any]=True , lowerCamelCase : Any=True , lowerCamelCase : Optional[int]=3_2 , lowerCamelCase : Tuple=5 , lowerCamelCase : str=4 , lowerCamelCase : Optional[int]=3_7 , lowerCamelCase : str="gelu" , lowerCamelCase : List[Any]=0.1 , lowerCamelCase : List[Any]=0.1 , lowerCamelCase : int=1_0 , lowerCamelCase : str=0.02 , lowerCamelCase : List[Any]=[1, 1_6, 4, 4] , lowerCamelCase : List[Any]=None , ):
'''simple docstring'''
a__ = parent
a__ = batch_size
a__ = image_size
a__ = patch_size
a__ = num_channels
a__ = is_training
a__ = use_labels
a__ = hidden_size
a__ = num_hidden_layers
a__ = num_attention_heads
a__ = intermediate_size
a__ = hidden_act
a__ = hidden_dropout_prob
a__ = attention_probs_dropout_prob
a__ = type_sequence_label_size
a__ = initializer_range
a__ = scope
a__ = backbone_featmap_shape
# in ViT hybrid, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
# the number of patches is based on the feature map of the backbone, which by default uses an output stride
# of 32, which means that the feature map has a spatial resolution of 1/32 of the input image size
a__ = (self.image_size // 3_2) ** 2
a__ = num_patches + 1
def __a ( self : Any ):
'''simple docstring'''
a__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
a__ = None
if self.use_labels:
a__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
a__ = self.get_config()
return config, pixel_values, labels
def __a ( self : Optional[int] ):
'''simple docstring'''
a__ = {
"global_padding": "same",
"layer_type": "bottleneck",
"depths": [3, 4, 9],
"out_features": ["stage1", "stage2", "stage3"],
"embedding_dynamic_padding": True,
"hidden_sizes": [4, 8, 1_6, 3_2],
"num_groups": 2,
}
return ViTHybridConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCamelCase , initializer_range=self.initializer_range , backbone_featmap_shape=self.backbone_featmap_shape , backbone_config=lowerCamelCase , )
def __a ( self : int , lowerCamelCase : List[Any] , lowerCamelCase : Optional[int] , lowerCamelCase : Optional[Any] ):
'''simple docstring'''
a__ = ViTHybridModel(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
a__ = model(lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __a ( self : Optional[int] , lowerCamelCase : Optional[Any] , lowerCamelCase : Dict , lowerCamelCase : List[Any] ):
'''simple docstring'''
a__ = self.type_sequence_label_size
a__ = ViTHybridForImageClassification(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
a__ = model(lowerCamelCase , labels=lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __a ( self : List[str] ):
'''simple docstring'''
a__ = self.prepare_config_and_inputs()
a__ , a__ , a__ = config_and_inputs
a__ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class UpperCamelCase__ ( __lowerCAmelCase ,__lowerCAmelCase ,unittest.TestCase ):
lowerCAmelCase__ : Tuple = (ViTHybridModel, ViTHybridForImageClassification) if is_torch_available() else ()
lowerCAmelCase__ : str = (
{"feature-extraction": ViTHybridModel, "image-classification": ViTHybridForImageClassification}
if is_torch_available()
else {}
)
lowerCAmelCase__ : Tuple = False
lowerCAmelCase__ : int = False
lowerCAmelCase__ : int = False
def __a ( self : Dict ):
'''simple docstring'''
a__ = ViTHybridModelTester(self )
a__ = ConfigTester(self , config_class=lowerCamelCase , has_text_modality=lowerCamelCase , hidden_size=3_7 )
def __a ( self : Any ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="ViT does not use inputs_embeds" )
def __a ( self : Any ):
'''simple docstring'''
pass
def __a ( self : str ):
'''simple docstring'''
a__ , a__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ = model_class(lowerCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
a__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCamelCase , nn.Linear ) )
def __a ( self : str ):
'''simple docstring'''
a__ , a__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ = model_class(lowerCamelCase )
a__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a__ = [*signature.parameters.keys()]
a__ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowerCamelCase )
def __a ( self : Any ):
'''simple docstring'''
a__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase )
def __a ( self : List[str] ):
'''simple docstring'''
a__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase )
def __a ( self : Optional[Any] ):
'''simple docstring'''
a__ , a__ = self.model_tester.prepare_config_and_inputs_for_common()
a__ = _config_zero_init(lowerCamelCase )
for model_class in self.all_model_classes:
a__ = model_class(config=lowerCamelCase )
# Skip the check for the backbone
for name, module in model.named_modules():
if module.__class__.__name__ == "ViTHybridPatchEmbeddings":
a__ = [F'''{name}.{key}''' for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@slow
def __a ( self : Any ):
'''simple docstring'''
for model_name in VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a__ = ViTHybridModel.from_pretrained(lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
def _lowerCamelCase () -> Tuple:
a__ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class UpperCamelCase__ ( unittest.TestCase ):
@cached_property
def __a ( self : str ):
'''simple docstring'''
return (
ViTHybridImageProcessor.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def __a ( self : List[Any] ):
'''simple docstring'''
a__ = ViTHybridForImageClassification.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
lowerCamelCase )
a__ = self.default_image_processor
a__ = prepare_img()
a__ = image_processor(images=lowerCamelCase , return_tensors="pt" ).to(lowerCamelCase )
# forward pass
with torch.no_grad():
a__ = model(**lowerCamelCase )
# verify the logits
a__ = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , lowerCamelCase )
a__ = torch.tensor([-1.9090, -0.4993, -0.2389] ).to(lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCamelCase , atol=1e-4 ) )
@slow
@require_accelerate
def __a ( self : Union[str, Any] ):
'''simple docstring'''
a__ = ViTHybridImageProcessor.from_pretrained("google/vit-hybrid-base-bit-384" )
a__ = ViTHybridForImageClassification.from_pretrained("google/vit-hybrid-base-bit-384" , device_map="auto" )
a__ = prepare_img()
a__ = image_processor(images=lowerCamelCase , return_tensors="pt" )
a__ = model(**lowerCamelCase )
a__ = outputs.logits
# model predicts one of the 1000 ImageNet classes
a__ = logits.argmax(-1 ).item()
self.assertTrue(model.config.idalabel[predicted_class_idx] , "tabby, tabby cat" )
| 489
|
'''simple docstring'''
import unittest
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BridgeTowerImageProcessor
class UpperCamelCase__ ( unittest.TestCase ):
def __init__( self : Dict , lowerCamelCase : Tuple , lowerCamelCase : bool = True , lowerCamelCase : Dict[str, int] = None , lowerCamelCase : int = 3_2 , lowerCamelCase : bool = True , lowerCamelCase : Union[int, float] = 1 / 2_5_5 , lowerCamelCase : bool = True , lowerCamelCase : bool = True , lowerCamelCase : Optional[Union[float, List[float]]] = [0.48145466, 0.4578275, 0.40821073] , lowerCamelCase : Optional[Union[float, List[float]]] = [0.26862954, 0.26130258, 0.27577711] , lowerCamelCase : bool = True , lowerCamelCase : Any=7 , lowerCamelCase : Any=3_0 , lowerCamelCase : Optional[int]=4_0_0 , lowerCamelCase : Any=3 , ):
'''simple docstring'''
a__ = parent
a__ = do_resize
a__ = size if size is not None else {"shortest_edge": 2_8_8}
a__ = size_divisor
a__ = do_rescale
a__ = rescale_factor
a__ = do_normalize
a__ = do_center_crop
a__ = image_mean
a__ = image_std
a__ = do_pad
a__ = batch_size
a__ = num_channels
a__ = min_resolution
a__ = max_resolution
def __a ( self : int ):
'''simple docstring'''
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"size_divisor": self.size_divisor,
}
def __a ( self : Tuple , lowerCamelCase : List[Any] , lowerCamelCase : Tuple=False ):
'''simple docstring'''
if not batched:
a__ = self.size["shortest_edge"]
a__ = image_inputs[0]
if isinstance(lowerCamelCase , Image.Image ):
a__ , a__ = image.size
else:
a__ , a__ = image.shape[1], image.shape[2]
a__ = size / min(lowerCamelCase , lowerCamelCase )
if h < w:
a__ , a__ = size, scale * w
else:
a__ , a__ = scale * h, size
a__ = int((1_3_3_3 / 8_0_0) * size )
if max(lowerCamelCase , lowerCamelCase ) > max_size:
a__ = max_size / max(lowerCamelCase , lowerCamelCase )
a__ = newh * scale
a__ = neww * scale
a__ , a__ = int(newh + 0.5 ), int(neww + 0.5 )
a__ , a__ = (
newh // self.size_divisor * self.size_divisor,
neww // self.size_divisor * self.size_divisor,
)
else:
a__ = []
for image in image_inputs:
a__ , a__ = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
a__ = max(lowerCamelCase , key=lambda lowerCamelCase : item[0] )[0]
a__ = max(lowerCamelCase , key=lambda lowerCamelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class UpperCamelCase__ ( __lowerCAmelCase ,unittest.TestCase ):
lowerCAmelCase__ : List[Any] = BridgeTowerImageProcessor if is_vision_available() else None
def __a ( self : Optional[Any] ):
'''simple docstring'''
a__ = BridgeTowerImageProcessingTester(self )
@property
def __a ( self : List[str] ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def __a ( self : Union[str, Any] ):
'''simple docstring'''
a__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase , "image_mean" ) )
self.assertTrue(hasattr(lowerCamelCase , "image_std" ) )
self.assertTrue(hasattr(lowerCamelCase , "do_normalize" ) )
self.assertTrue(hasattr(lowerCamelCase , "do_resize" ) )
self.assertTrue(hasattr(lowerCamelCase , "size" ) )
self.assertTrue(hasattr(lowerCamelCase , "size_divisor" ) )
def __a ( self : List[Any] ):
'''simple docstring'''
pass
def __a ( self : Optional[int] ):
'''simple docstring'''
# Initialize image processor
a__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
a__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase , Image.Image )
# Test not batched input
a__ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
a__ , a__ = self.image_processor_tester.get_expected_values(lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
a__ = image_processing(lowerCamelCase , return_tensors="pt" ).pixel_values
a__ , a__ = self.image_processor_tester.get_expected_values(lowerCamelCase , batched=lowerCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __a ( self : List[str] ):
'''simple docstring'''
# Initialize image processor
a__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
a__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase , numpify=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase , np.ndarray )
# Test not batched input
a__ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
a__ , a__ = self.image_processor_tester.get_expected_values(lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
a__ = image_processing(lowerCamelCase , return_tensors="pt" ).pixel_values
a__ , a__ = self.image_processor_tester.get_expected_values(lowerCamelCase , batched=lowerCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __a ( self : List[Any] ):
'''simple docstring'''
# Initialize image processor
a__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
a__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase , torchify=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase , torch.Tensor )
# Test not batched input
a__ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
a__ , a__ = self.image_processor_tester.get_expected_values(lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
a__ = image_processing(lowerCamelCase , return_tensors="pt" ).pixel_values
a__ , a__ = self.image_processor_tester.get_expected_values(lowerCamelCase , batched=lowerCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
| 489
| 1
|
import math
def lowerCamelCase__ ( _lowerCamelCase ) ->bool:
_UpperCAmelCase =math.loga(math.sqrt(4 * positive_integer + 1 ) / 2 + 1 / 2 )
return exponent == int(_lowerCamelCase )
def lowerCamelCase__ ( _lowerCamelCase = 1 / 1_2345 ) ->int:
_UpperCAmelCase =0
_UpperCAmelCase =0
_UpperCAmelCase =3
while True:
_UpperCAmelCase =(integer**2 - 1) / 4
# if candidate is an integer, then there is a partition for k
if partition_candidate == int(_lowerCamelCase ):
_UpperCAmelCase =int(_lowerCamelCase )
total_partitions += 1
if check_partition_perfect(_lowerCamelCase ):
perfect_partitions += 1
if perfect_partitions > 0:
if perfect_partitions / total_partitions < max_proportion:
return int(_lowerCamelCase )
integer += 1
if __name__ == "__main__":
print(F"""{solution() = }""")
| 592
|
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
snake_case__ : Optional[int] = {
'configuration_mctct': ['MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MCTCTConfig'],
'feature_extraction_mctct': ['MCTCTFeatureExtractor'],
'processing_mctct': ['MCTCTProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ : Optional[int] = [
'MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST',
'MCTCTForCTC',
'MCTCTModel',
'MCTCTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig
from .feature_extraction_mctct import MCTCTFeatureExtractor
from .processing_mctct import MCTCTProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel
else:
import sys
snake_case__ : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 592
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case : Dict = logging.get_logger(__name__)
snake_case : Tuple = {
'google/switch-base-8': 'https://huggingface.co/google/switch-base-8/blob/main/config.json',
}
class __lowercase ( UpperCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = "switch_transformers"
SCREAMING_SNAKE_CASE : Union[str, Any] = ["past_key_values"]
SCREAMING_SNAKE_CASE : Dict = {"hidden_size": "d_model", "num_attention_heads": "num_heads", "num_hidden_layers": "num_layers"}
def __init__( self , A_=32128 , A_=768 , A_=64 , A_=2048 , A_=64 , A_=12 , A_=3 , A_=12 , A_=3 , A_=12 , A_=8 , A_=False , A_=0.01 , A_="float32" , A_=False , A_=32 , A_=128 , A_=0.1 , A_=1e-6 , A_=0.001 , A_=0.001 , A_=1.0 , A_="relu" , A_=True , A_=False , A_=True , A_=0 , A_=1 , **A_ , )-> int:
_SCREAMING_SNAKE_CASE = vocab_size
_SCREAMING_SNAKE_CASE = d_model
_SCREAMING_SNAKE_CASE = d_kv
_SCREAMING_SNAKE_CASE = d_ff
_SCREAMING_SNAKE_CASE = num_sparse_encoder_layers
_SCREAMING_SNAKE_CASE = num_layers
_SCREAMING_SNAKE_CASE = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
_SCREAMING_SNAKE_CASE = num_sparse_decoder_layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_encoder_layers > 0:
_SCREAMING_SNAKE_CASE = self.num_layers // self.num_sparse_encoder_layers
else:
_SCREAMING_SNAKE_CASE = self.num_layers # HACK: this will create 0 sparse layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_decoder_layers > 0:
_SCREAMING_SNAKE_CASE = self.num_decoder_layers // self.num_sparse_decoder_layers
else:
_SCREAMING_SNAKE_CASE = self.num_decoder_layers # HACK: this will create 0 sparse layers
_SCREAMING_SNAKE_CASE = num_heads
_SCREAMING_SNAKE_CASE = num_experts
_SCREAMING_SNAKE_CASE = expert_capacity
_SCREAMING_SNAKE_CASE = router_bias
_SCREAMING_SNAKE_CASE = router_jitter_noise
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(F'''`router_dtype` must be one of \'float32\', \'float16\' or \'bfloat16\', got {router_dtype}''' )
_SCREAMING_SNAKE_CASE = router_dtype
_SCREAMING_SNAKE_CASE = router_ignore_padding_tokens
_SCREAMING_SNAKE_CASE = relative_attention_num_buckets
_SCREAMING_SNAKE_CASE = relative_attention_max_distance
_SCREAMING_SNAKE_CASE = dropout_rate
_SCREAMING_SNAKE_CASE = layer_norm_epsilon
_SCREAMING_SNAKE_CASE = initializer_factor
_SCREAMING_SNAKE_CASE = feed_forward_proj
_SCREAMING_SNAKE_CASE = use_cache
_SCREAMING_SNAKE_CASE = add_router_probs
_SCREAMING_SNAKE_CASE = router_z_loss_coef
_SCREAMING_SNAKE_CASE = router_aux_loss_coef
_SCREAMING_SNAKE_CASE = self.feed_forward_proj.split('-' )
_SCREAMING_SNAKE_CASE = act_info[-1]
_SCREAMING_SNAKE_CASE = act_info[0] == 'gated'
if len(A_ ) > 1 and act_info[0] != "gated" or len(A_ ) > 2:
raise ValueError(
F'''`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.'''
'Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '
'\'gated-gelu\' or \'relu\'' )
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
_SCREAMING_SNAKE_CASE = 'gelu_new'
super().__init__(
pad_token_id=A_ , eos_token_id=A_ , is_encoder_decoder=A_ , **A_ , )
| 605
|
def SCREAMING_SNAKE_CASE ( UpperCAmelCase__ = 1000 ):
"""simple docstring"""
return sum(2 * a * ((a - 1) // 2) for a in range(3 ,n + 1 ) )
if __name__ == "__main__":
print(solution())
| 605
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE__ = {'''configuration_xlnet''': ['''XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XLNetConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = ['''XLNetTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = ['''XLNetTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
'''XLNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XLNetForMultipleChoice''',
'''XLNetForQuestionAnswering''',
'''XLNetForQuestionAnsweringSimple''',
'''XLNetForSequenceClassification''',
'''XLNetForTokenClassification''',
'''XLNetLMHeadModel''',
'''XLNetModel''',
'''XLNetPreTrainedModel''',
'''load_tf_weights_in_xlnet''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
'''TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXLNetForMultipleChoice''',
'''TFXLNetForQuestionAnsweringSimple''',
'''TFXLNetForSequenceClassification''',
'''TFXLNetForTokenClassification''',
'''TFXLNetLMHeadModel''',
'''TFXLNetMainLayer''',
'''TFXLNetModel''',
'''TFXLNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet import XLNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet_fast import XLNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlnet import (
XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
XLNetForMultipleChoice,
XLNetForQuestionAnswering,
XLNetForQuestionAnsweringSimple,
XLNetForSequenceClassification,
XLNetForTokenClassification,
XLNetLMHeadModel,
XLNetModel,
XLNetPreTrainedModel,
load_tf_weights_in_xlnet,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlnet import (
TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLNetForMultipleChoice,
TFXLNetForQuestionAnsweringSimple,
TFXLNetForSequenceClassification,
TFXLNetForTokenClassification,
TFXLNetLMHeadModel,
TFXLNetMainLayer,
TFXLNetModel,
TFXLNetPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 52
|
import inspect
import jax
import jax.lax as lax
import jax.numpy as jnp
from ..utils import add_start_docstrings
from ..utils.logging import get_logger
SCREAMING_SNAKE_CASE__ = get_logger(__name__)
SCREAMING_SNAKE_CASE__ = r'''
Args:
input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
scores (`jnp.ndarray` of shape `(batch_size, config.vocab_size)`):
Prediction scores of a language modeling head. These can be logits for each vocabulary when not using beam
search or log softmax for each vocabulary token when using beam search
kwargs (`Dict[str, Any]`, *optional*):
Additional logits processor specific kwargs.
Return:
`jnp.ndarray` of shape `(batch_size, config.vocab_size)`: The processed prediction scores.
'''
class __lowerCAmelCase :
"""simple docstring"""
@add_start_docstrings(_snake_case )
def __call__( self : Optional[int] , _snake_case : jnp.ndarray , _snake_case : jnp.ndarray ):
"""simple docstring"""
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
class __lowerCAmelCase :
"""simple docstring"""
@add_start_docstrings(_snake_case )
def __call__( self : List[Any] , _snake_case : jnp.ndarray , _snake_case : jnp.ndarray ):
"""simple docstring"""
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
@add_start_docstrings(_snake_case )
def __call__( self : Any , _snake_case : jnp.ndarray , _snake_case : jnp.ndarray , _snake_case : int , **_snake_case : Optional[int] ):
"""simple docstring"""
for processor in self:
A__ = inspect.signature(processor.__call__ ).parameters
if len(_snake_case ) > 3:
if not all(arg in kwargs for arg in list(function_args.keys() )[2:] ):
raise ValueError(
F'''Make sure that all the required parameters: {list(function_args.keys() )} for '''
F'''{processor.__class__} are passed to the logits processor.''' )
A__ = processor(_snake_case , _snake_case , _snake_case , **_snake_case )
else:
A__ = processor(_snake_case , _snake_case , _snake_case )
return scores
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self : Any , _snake_case : float ):
"""simple docstring"""
if not isinstance(_snake_case , _snake_case ) or not (temperature > 0):
raise ValueError(F'''`temperature` has to be a strictly positive float, but is {temperature}''' )
A__ = temperature
def __call__( self : str , _snake_case : jnp.ndarray , _snake_case : jnp.ndarray , _snake_case : int ):
"""simple docstring"""
A__ = scores / self.temperature
return scores
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self : Optional[Any] , _snake_case : float , _snake_case : float = -float('Inf' ) , _snake_case : int = 1 ):
"""simple docstring"""
if not isinstance(_snake_case , _snake_case ) or (top_p < 0 or top_p > 1.0):
raise ValueError(F'''`top_p` has to be a float > 0 and < 1, but is {top_p}''' )
if not isinstance(_snake_case , _snake_case ) or (min_tokens_to_keep < 1):
raise ValueError(F'''`min_tokens_to_keep` has to be a positive integer, but is {min_tokens_to_keep}''' )
A__ = top_p
A__ = filter_value
A__ = min_tokens_to_keep
def __call__( self : str , _snake_case : jnp.ndarray , _snake_case : jnp.ndarray , _snake_case : int ):
"""simple docstring"""
A__ , A__ = lax.top_k(_snake_case , scores.shape[-1] )
A__ = jnp.full_like(_snake_case , self.filter_value )
A__ = jax.nn.softmax(_snake_case , axis=-1 ).cumsum(axis=-1 )
A__ = cumulative_probs < self.top_p
# include the token that is higher than top_p as well
A__ = jnp.roll(_snake_case , 1 )
score_mask |= score_mask.at[:, 0].set(_snake_case )
# min tokens to keep
A__ = score_mask.at[:, : self.min_tokens_to_keep].set(_snake_case )
A__ = jnp.where(_snake_case , _snake_case , _snake_case )
A__ = jax.lax.sort_key_val(_snake_case , _snake_case )[-1]
return next_scores
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self : Union[str, Any] , _snake_case : int , _snake_case : float = -float('Inf' ) , _snake_case : int = 1 ):
"""simple docstring"""
if not isinstance(_snake_case , _snake_case ) or top_k <= 0:
raise ValueError(F'''`top_k` has to be a strictly positive integer, but is {top_k}''' )
A__ = max(_snake_case , _snake_case )
A__ = filter_value
def __call__( self : Optional[Any] , _snake_case : jnp.ndarray , _snake_case : jnp.ndarray , _snake_case : int ):
"""simple docstring"""
A__ , A__ = scores.shape
A__ = jnp.full(batch_size * vocab_size , self.filter_value )
A__ = min(self.top_k , scores.shape[-1] ) # Safety check
A__ , A__ = lax.top_k(_snake_case , _snake_case )
A__ = jnp.broadcast_to((jnp.arange(_snake_case ) * vocab_size)[:, None] , (batch_size, topk) ).flatten()
A__ = topk_scores.flatten()
A__ = topk_indices.flatten() + shift
A__ = next_scores_flat.at[topk_indices_flat].set(_snake_case )
A__ = next_scores_flat.reshape(_snake_case , _snake_case )
return next_scores
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self : Any , _snake_case : int ):
"""simple docstring"""
A__ = bos_token_id
def __call__( self : Optional[int] , _snake_case : jnp.ndarray , _snake_case : jnp.ndarray , _snake_case : int ):
"""simple docstring"""
A__ = jnp.full(scores.shape , -float('inf' ) )
A__ = 1 - jnp.bool_(cur_len - 1 )
A__ = jnp.where(_snake_case , new_scores.at[:, self.bos_token_id].set(0 ) , _snake_case )
return scores
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self : Any , _snake_case : int , _snake_case : int ):
"""simple docstring"""
A__ = max_length
A__ = eos_token_id
def __call__( self : List[Any] , _snake_case : jnp.ndarray , _snake_case : jnp.ndarray , _snake_case : int ):
"""simple docstring"""
A__ = jnp.full(scores.shape , -float('inf' ) )
A__ = 1 - jnp.bool_(cur_len - self.max_length + 1 )
A__ = jnp.where(_snake_case , new_scores.at[:, self.eos_token_id].set(0 ) , _snake_case )
return scores
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self : Dict , _snake_case : int , _snake_case : int ):
"""simple docstring"""
if not isinstance(_snake_case , _snake_case ) or min_length < 0:
raise ValueError(F'''`min_length` has to be a positive integer, but is {min_length}''' )
if not isinstance(_snake_case , _snake_case ) or eos_token_id < 0:
raise ValueError(F'''`eos_token_id` has to be a positive integer, but is {eos_token_id}''' )
A__ = min_length
A__ = eos_token_id
def __call__( self : int , _snake_case : jnp.ndarray , _snake_case : jnp.ndarray , _snake_case : int ):
"""simple docstring"""
A__ = 1 - jnp.clip(cur_len - self.min_length , 0 , 1 )
A__ = jnp.where(_snake_case , scores.at[:, self.eos_token_id].set(-float('inf' ) ) , _snake_case )
return scores
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self : int , _snake_case : Tuple , _snake_case : Union[str, Any] ):
"""simple docstring"""
A__ = list(_snake_case )
A__ = begin_index
def __call__( self : Union[str, Any] , _snake_case : Optional[Any] , _snake_case : str , _snake_case : int ):
"""simple docstring"""
A__ = 1 - jnp.bool_(cur_len - self.begin_index )
A__ = jnp.where(_snake_case , scores.at[:, self.begin_suppress_tokens].set(-float('inf' ) ) , _snake_case )
return scores
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self : int , _snake_case : list ):
"""simple docstring"""
A__ = list(_snake_case )
def __call__( self : List[Any] , _snake_case : jnp.ndarray , _snake_case : jnp.ndarray , _snake_case : int ):
"""simple docstring"""
A__ = scores.at[..., self.suppress_tokens].set(-float('inf' ) )
return scores
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self : List[str] , _snake_case : Optional[Any] ):
"""simple docstring"""
A__ = dict(_snake_case )
# Converts the dictionary of format {index: token} containing the tokens to be forced to an array, where the
# index of the array corresponds to the index of the token to be forced, for XLA compatibility.
# Indexes without forced tokens will have a negative value.
A__ = jnp.ones((max(force_token_map.keys() ) + 1) , dtype=jnp.intaa ) * -1
for index, token in force_token_map.items():
if token is not None:
A__ = force_token_array.at[index].set(_snake_case )
A__ = jnp.intaa(_snake_case )
def __call__( self : List[Any] , _snake_case : jnp.ndarray , _snake_case : jnp.ndarray , _snake_case : int ):
"""simple docstring"""
def _force_token(_snake_case : Dict ):
A__ = scores.shape[0]
A__ = self.force_token_array[generation_idx]
A__ = jnp.ones_like(_snake_case , dtype=scores.dtype ) * -float('inf' )
A__ = jnp.zeros((batch_size, 1) , dtype=scores.dtype )
A__ = lax.dynamic_update_slice(_snake_case , _snake_case , (0, current_token) )
return new_scores
A__ = lax.cond(
cur_len >= self.force_token_array.shape[0] , lambda: scores , lambda: lax.cond(
self.force_token_array[cur_len] >= 0 , lambda: _force_token(_snake_case ) , lambda: scores , ) , )
return scores
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self : Union[str, Any] , _snake_case : Union[str, Any] , _snake_case : Dict , _snake_case : List[Any] ):
"""simple docstring"""
A__ = generate_config.eos_token_id
A__ = generate_config.no_timestamps_token_id
A__ = generate_config.no_timestamps_token_id + 1
A__ = decoder_input_length + 1
if generate_config.is_multilingual:
# room for language token and task token
self.begin_index += 2
if hasattr(_snake_case , 'max_initial_timestamp_index' ):
A__ = generate_config.max_initial_timestamp_index
else:
A__ = model_config.vocab_size
if self.max_initial_timestamp_index is None:
A__ = model_config.vocab_size
def __call__( self : Tuple , _snake_case : List[Any] , _snake_case : Dict , _snake_case : Dict ):
"""simple docstring"""
A__ = scores.at[:, self.no_timestamps_token_id].set(-float('inf' ) )
def handle_pairs(_snake_case : Dict , _snake_case : str ):
A__ = jnp.where((cur_len - self.begin_index) >= 1 , _snake_case , _snake_case )
A__ = jnp.where(
input_ids_k[cur_len - 1] >= self.timestamp_begin , True and last_was_timestamp , _snake_case , )
A__ = jnp.where((cur_len - self.begin_index) < 2 , _snake_case , _snake_case )
A__ = jnp.where(
input_ids_k[cur_len - 2] >= self.timestamp_begin , _snake_case , _snake_case , )
return jnp.where(
_snake_case , jnp.where(
penultimate_was_timestamp > 0 , scores_k.at[self.timestamp_begin :].set(-float('inf' ) ) , scores_k.at[: self.eos_token_id].set(-float('inf' ) ) , ) , _snake_case , )
A__ = jax.vmap(_snake_case )(_snake_case , _snake_case )
A__ = jnp.where(cur_len == self.begin_index , _snake_case , _snake_case )
A__ = jnp.where(
self.max_initial_timestamp_index is not None , True and apply_max_initial_timestamp , _snake_case , )
A__ = self.timestamp_begin + self.max_initial_timestamp_index
A__ = jnp.where(
_snake_case , scores.at[:, last_allowed + 1 :].set(-float('inf' ) ) , _snake_case , )
# if sum of probability over timestamps is above any other token, sample timestamp
A__ = jax.nn.log_softmax(_snake_case , axis=-1 )
def handle_cumulative_probs(_snake_case : List[Any] , _snake_case : Union[str, Any] ):
A__ = jax.nn.logsumexp(logprobs_k[self.timestamp_begin :] , axis=-1 )
A__ = jnp.max(logprobs_k[: self.timestamp_begin] )
return jnp.where(
timestamp_logprob > max_text_token_logprob , scores_k.at[: self.timestamp_begin].set(-float('inf' ) ) , _snake_case , )
A__ = jax.vmap(_snake_case )(_snake_case , _snake_case )
return scores
| 52
| 1
|
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
import PIL.Image
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
PILImageResampling,
get_image_size,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
lowerCamelCase = logging.get_logger(__name__)
class _UpperCamelCase ( A ):
'''simple docstring'''
lowerCAmelCase__ = ["""pixel_values"""]
def __init__( self : List[str] , _lowerCAmelCase : bool = True , _lowerCAmelCase : int = 3_2 , _lowerCAmelCase : List[Any]=PILImageResampling.BILINEAR , _lowerCAmelCase : bool = True , **_lowerCAmelCase : str , ):
'''simple docstring'''
__lowercase =do_resize
__lowercase =do_rescale
__lowercase =size_divisor
__lowercase =resample
super().__init__(**_lowerCAmelCase)
def __lowerCamelCase ( self : Any , _lowerCAmelCase : np.ndarray , _lowerCAmelCase : int , _lowerCAmelCase : Any , _lowerCAmelCase : Optional[ChannelDimension] = None , **_lowerCAmelCase : Optional[Any]):
'''simple docstring'''
__lowercase , __lowercase =get_image_size(_lowerCAmelCase)
# Rounds the height and width down to the closest multiple of size_divisor
__lowercase =height // size_divisor * size_divisor
__lowercase =width // size_divisor * size_divisor
__lowercase =resize(_lowerCAmelCase , (new_h, new_w) , resample=_lowerCAmelCase , data_format=_lowerCAmelCase , **_lowerCAmelCase)
return image
def __lowerCamelCase ( self : Dict , _lowerCAmelCase : np.ndarray , _lowerCAmelCase : float , _lowerCAmelCase : Optional[ChannelDimension] = None , **_lowerCAmelCase : Any):
'''simple docstring'''
return rescale(image=_lowerCAmelCase , scale=_lowerCAmelCase , data_format=_lowerCAmelCase , **_lowerCAmelCase)
def __lowerCamelCase ( self : Tuple , _lowerCAmelCase : Union["PIL.Image.Image", TensorType, List["PIL.Image.Image"], List[TensorType]] , _lowerCAmelCase : Optional[bool] = None , _lowerCAmelCase : Optional[int] = None , _lowerCAmelCase : Optional[Any]=None , _lowerCAmelCase : Optional[bool] = None , _lowerCAmelCase : Optional[Union[TensorType, str]] = None , _lowerCAmelCase : ChannelDimension = ChannelDimension.FIRST , **_lowerCAmelCase : Tuple , ):
'''simple docstring'''
__lowercase =do_resize if do_resize is not None else self.do_resize
__lowercase =do_rescale if do_rescale is not None else self.do_rescale
__lowercase =size_divisor if size_divisor is not None else self.size_divisor
__lowercase =resample if resample is not None else self.resample
if do_resize and size_divisor is None:
raise ValueError('size_divisor is required for resizing')
__lowercase =make_list_of_images(_lowerCAmelCase)
if not valid_images(_lowerCAmelCase):
raise ValueError('Invalid image(s)')
# All transformations expect numpy arrays.
__lowercase =[to_numpy_array(_lowerCAmelCase) for img in images]
if do_resize:
__lowercase =[self.resize(_lowerCAmelCase , size_divisor=_lowerCAmelCase , resample=_lowerCAmelCase) for image in images]
if do_rescale:
__lowercase =[self.rescale(_lowerCAmelCase , scale=1 / 2_5_5) for image in images]
__lowercase =[to_channel_dimension_format(_lowerCAmelCase , _lowerCAmelCase) for image in images]
__lowercase ={'pixel_values': images}
return BatchFeature(data=_lowerCAmelCase , tensor_type=_lowerCAmelCase)
| 474
|
'''simple docstring'''
from maths.prime_check import is_prime
def _A ( _lowerCAmelCase ):
"""simple docstring"""
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ):
__lowercase =f"""Input value of [number={number}] must be an integer"""
raise TypeError(_lowerCAmelCase )
if is_prime(_lowerCAmelCase ) and is_prime(number + 2 ):
return number + 2
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 474
| 1
|
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
from multiprocessing import get_context
from pathlib import Path
import datasets
import numpy as np
from datasets import load_dataset
from parameterized import parameterized
from transformers import AutoProcessor
from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available
from ..wavaveca.test_feature_extraction_wavaveca import floats_list
if is_pyctcdecode_available():
from huggingface_hub import snapshot_download
from pyctcdecode import BeamSearchDecoderCTC
from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM
from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput
if is_torch_available():
from transformers import WavaVecaForCTC
@require_pyctcdecode
class __lowerCamelCase ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
UpperCamelCase__ = '| <pad> <unk> <s> </s> a b c d e f g h i j k'.split()
UpperCamelCase__ = dict(zip(UpperCAmelCase_ , range(len(UpperCAmelCase_ ) ) ) )
UpperCamelCase__ = {
'unk_token': '<unk>',
'bos_token': '<s>',
'eos_token': '</s>',
}
UpperCamelCase__ = {
'feature_size': 1,
'padding_value': 0.0,
'sampling_rate': 1_6000,
'return_attention_mask': False,
'do_normalize': True,
}
UpperCamelCase__ = tempfile.mkdtemp()
UpperCamelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
UpperCamelCase__ = os.path.join(self.tmpdirname , UpperCAmelCase_ )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(UpperCAmelCase_ ) + '\n' )
with open(self.feature_extraction_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(UpperCAmelCase_ ) + '\n' )
# load decoder from hub
UpperCamelCase__ = 'hf-internal-testing/ngram-beam-search-decoder'
def SCREAMING_SNAKE_CASE__ ( self , **snake_case_ ) -> int:
UpperCamelCase__ = self.add_kwargs_tokens_map.copy()
kwargs.update(UpperCAmelCase_ )
return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase_ )
def SCREAMING_SNAKE_CASE__ ( self , **snake_case_ ) -> Union[str, Any]:
return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname , **UpperCAmelCase_ )
def SCREAMING_SNAKE_CASE__ ( self , **snake_case_ ) -> List[str]:
return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name , **UpperCAmelCase_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
shutil.rmtree(self.tmpdirname )
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
UpperCamelCase__ = self.get_tokenizer()
UpperCamelCase__ = self.get_feature_extractor()
UpperCamelCase__ = self.get_decoder()
UpperCamelCase__ = WavaVecaProcessorWithLM(tokenizer=UpperCAmelCase_ , feature_extractor=UpperCAmelCase_ , decoder=UpperCAmelCase_ )
processor.save_pretrained(self.tmpdirname )
UpperCamelCase__ = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname )
# tokenizer
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , UpperCAmelCase_ )
# feature extractor
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , UpperCAmelCase_ )
# decoder
self.assertEqual(processor.decoder._alphabet.labels , decoder._alphabet.labels )
self.assertEqual(
processor.decoder.model_container[decoder._model_key]._unigram_set , decoder.model_container[decoder._model_key]._unigram_set , )
self.assertIsInstance(processor.decoder , UpperCAmelCase_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Any:
UpperCamelCase__ = WavaVecaProcessorWithLM(
tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
processor.save_pretrained(self.tmpdirname )
# make sure that error is thrown when decoder alphabet doesn't match
UpperCamelCase__ = WavaVecaProcessorWithLM.from_pretrained(
self.tmpdirname , alpha=5.0 , beta=3.0 , score_boundary=-7.0 , unk_score_offset=3 )
# decoder
self.assertEqual(processor.language_model.alpha , 5.0 )
self.assertEqual(processor.language_model.beta , 3.0 )
self.assertEqual(processor.language_model.score_boundary , -7.0 )
self.assertEqual(processor.language_model.unk_score_offset , 3 )
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
UpperCamelCase__ = self.get_tokenizer()
# add token to trigger raise
tokenizer.add_tokens(['xx'] )
with self.assertRaisesRegex(UpperCAmelCase_ , 'include' ):
WavaVecaProcessorWithLM(
tokenizer=UpperCAmelCase_ , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
UpperCamelCase__ = self.get_feature_extractor()
UpperCamelCase__ = self.get_tokenizer()
UpperCamelCase__ = self.get_decoder()
UpperCamelCase__ = WavaVecaProcessorWithLM(tokenizer=UpperCAmelCase_ , feature_extractor=UpperCAmelCase_ , decoder=UpperCAmelCase_ )
UpperCamelCase__ = floats_list((3, 1000) )
UpperCamelCase__ = feature_extractor(UpperCAmelCase_ , return_tensors='np' )
UpperCamelCase__ = processor(UpperCAmelCase_ , return_tensors='np' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
UpperCamelCase__ = self.get_feature_extractor()
UpperCamelCase__ = self.get_tokenizer()
UpperCamelCase__ = self.get_decoder()
UpperCamelCase__ = WavaVecaProcessorWithLM(tokenizer=UpperCAmelCase_ , feature_extractor=UpperCAmelCase_ , decoder=UpperCAmelCase_ )
UpperCamelCase__ = 'This is a test string'
UpperCamelCase__ = processor(text=UpperCAmelCase_ )
UpperCamelCase__ = tokenizer(UpperCAmelCase_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_=(2, 10, 16) , snake_case_=77 ) -> int:
np.random.seed(UpperCAmelCase_ )
return np.random.rand(*UpperCAmelCase_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
UpperCamelCase__ = self.get_feature_extractor()
UpperCamelCase__ = self.get_tokenizer()
UpperCamelCase__ = self.get_decoder()
UpperCamelCase__ = WavaVecaProcessorWithLM(tokenizer=UpperCAmelCase_ , feature_extractor=UpperCAmelCase_ , decoder=UpperCAmelCase_ )
UpperCamelCase__ = self._get_dummy_logits(shape=(10, 16) , seed=13 )
UpperCamelCase__ = processor.decode(UpperCAmelCase_ )
UpperCamelCase__ = decoder.decode_beams(UpperCAmelCase_ )[0]
self.assertEqual(decoded_decoder[0] , decoded_processor.text )
self.assertEqual('</s> <s> </s>' , decoded_processor.text )
self.assertEqual(decoded_decoder[-2] , decoded_processor.logit_score )
self.assertEqual(decoded_decoder[-1] , decoded_processor.lm_score )
@parameterized.expand([[None], ['fork'], ['spawn']] )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ ) -> List[str]:
UpperCamelCase__ = self.get_feature_extractor()
UpperCamelCase__ = self.get_tokenizer()
UpperCamelCase__ = self.get_decoder()
UpperCamelCase__ = WavaVecaProcessorWithLM(tokenizer=UpperCAmelCase_ , feature_extractor=UpperCAmelCase_ , decoder=UpperCAmelCase_ )
UpperCamelCase__ = self._get_dummy_logits()
# note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM.
# otherwise, the LM won't be available to the pool's sub-processes.
# manual logic used to allow parameterized test for both pool=None and pool=Pool(...)
if pool_context is None:
UpperCamelCase__ = processor.batch_decode(UpperCAmelCase_ )
else:
with get_context(UpperCAmelCase_ ).Pool() as pool:
UpperCamelCase__ = processor.batch_decode(UpperCAmelCase_ , UpperCAmelCase_ )
UpperCamelCase__ = list(UpperCAmelCase_ )
with get_context('fork' ).Pool() as p:
UpperCamelCase__ = decoder.decode_beams_batch(UpperCAmelCase_ , UpperCAmelCase_ )
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = [], [], []
for beams in decoded_beams:
texts_decoder.append(beams[0][0] )
logit_scores_decoder.append(beams[0][-2] )
lm_scores_decoder.append(beams[0][-1] )
self.assertListEqual(UpperCAmelCase_ , decoded_processor.text )
self.assertListEqual(['<s> <s> </s>', '<s> <s> <s>'] , decoded_processor.text )
self.assertListEqual(UpperCAmelCase_ , decoded_processor.logit_score )
self.assertListEqual(UpperCAmelCase_ , decoded_processor.lm_score )
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
UpperCamelCase__ = self.get_feature_extractor()
UpperCamelCase__ = self.get_tokenizer()
UpperCamelCase__ = self.get_decoder()
UpperCamelCase__ = WavaVecaProcessorWithLM(tokenizer=UpperCAmelCase_ , feature_extractor=UpperCAmelCase_ , decoder=UpperCAmelCase_ )
UpperCamelCase__ = self._get_dummy_logits()
UpperCamelCase__ = 15
UpperCamelCase__ = -20.0
UpperCamelCase__ = -4.0
UpperCamelCase__ = processor.batch_decode(
UpperCAmelCase_ , beam_width=UpperCAmelCase_ , beam_prune_logp=UpperCAmelCase_ , token_min_logp=UpperCAmelCase_ , )
UpperCamelCase__ = decoded_processor_out.text
UpperCamelCase__ = list(UpperCAmelCase_ )
with get_context('fork' ).Pool() as pool:
UpperCamelCase__ = decoder.decode_beams_batch(
UpperCAmelCase_ , UpperCAmelCase_ , beam_width=UpperCAmelCase_ , beam_prune_logp=UpperCAmelCase_ , token_min_logp=UpperCAmelCase_ , )
UpperCamelCase__ = [d[0][0] for d in decoded_decoder_out]
UpperCamelCase__ = [d[0][2] for d in decoded_decoder_out]
UpperCamelCase__ = [d[0][3] for d in decoded_decoder_out]
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
self.assertListEqual(['</s> <s> <s>', '<s> <s> <s>'] , UpperCAmelCase_ )
self.assertTrue(np.array_equal(UpperCAmelCase_ , decoded_processor_out.logit_score ) )
self.assertTrue(np.allclose([-20.054, -18.447] , UpperCAmelCase_ , atol=1E-3 ) )
self.assertTrue(np.array_equal(UpperCAmelCase_ , decoded_processor_out.lm_score ) )
self.assertTrue(np.allclose([-15.554, -13.9_474] , UpperCAmelCase_ , atol=1E-3 ) )
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
UpperCamelCase__ = self.get_feature_extractor()
UpperCamelCase__ = self.get_tokenizer()
UpperCamelCase__ = self.get_decoder()
UpperCamelCase__ = WavaVecaProcessorWithLM(tokenizer=UpperCAmelCase_ , feature_extractor=UpperCAmelCase_ , decoder=UpperCAmelCase_ )
UpperCamelCase__ = self._get_dummy_logits()
UpperCamelCase__ = 2.0
UpperCamelCase__ = 5.0
UpperCamelCase__ = -20.0
UpperCamelCase__ = True
UpperCamelCase__ = processor.batch_decode(
UpperCAmelCase_ , alpha=UpperCAmelCase_ , beta=UpperCAmelCase_ , unk_score_offset=UpperCAmelCase_ , lm_score_boundary=UpperCAmelCase_ , )
UpperCamelCase__ = decoded_processor_out.text
UpperCamelCase__ = list(UpperCAmelCase_ )
decoder.reset_params(
alpha=UpperCAmelCase_ , beta=UpperCAmelCase_ , unk_score_offset=UpperCAmelCase_ , lm_score_boundary=UpperCAmelCase_ , )
with get_context('fork' ).Pool() as pool:
UpperCamelCase__ = decoder.decode_beams_batch(
UpperCAmelCase_ , UpperCAmelCase_ , )
UpperCamelCase__ = [d[0][0] for d in decoded_decoder_out]
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
self.assertListEqual(['<s> </s> <s> </s> </s>', '</s> </s> <s> </s> </s>'] , UpperCAmelCase_ )
UpperCamelCase__ = processor.decoder.model_container[processor.decoder._model_key]
self.assertEqual(lm_model.alpha , 2.0 )
self.assertEqual(lm_model.beta , 5.0 )
self.assertEqual(lm_model.unk_score_offset , -20.0 )
self.assertEqual(lm_model.score_boundary , UpperCAmelCase_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
UpperCamelCase__ = WavaVecaProcessorWithLM.from_pretrained('hf-internal-testing/processor_with_lm' )
UpperCamelCase__ = processor.decoder.model_container[processor.decoder._model_key]
UpperCamelCase__ = Path(language_model._kenlm_model.path.decode('utf-8' ) ).parent.parent.absolute()
UpperCamelCase__ = os.listdir(UpperCAmelCase_ )
UpperCamelCase__ = ['alphabet.json', 'language_model']
downloaded_decoder_files.sort()
expected_decoder_files.sort()
# test that only decoder relevant files from
# https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main
# are downloaded and none of the rest (e.g. README.md, ...)
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
UpperCamelCase__ = snapshot_download('hf-internal-testing/processor_with_lm' )
UpperCamelCase__ = WavaVecaProcessorWithLM.from_pretrained(UpperCAmelCase_ )
UpperCamelCase__ = processor.decoder.model_container[processor.decoder._model_key]
UpperCamelCase__ = Path(language_model._kenlm_model.path.decode('utf-8' ) ).parent.parent.absolute()
UpperCamelCase__ = os.listdir(UpperCAmelCase_ )
UpperCamelCase__ = os.listdir(UpperCAmelCase_ )
local_decoder_files.sort()
expected_decoder_files.sort()
# test that both decoder form hub and local files in cache are the same
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[Any]:
UpperCamelCase__ = WavaVecaProcessorWithLM.from_pretrained('hf-internal-testing/processor_with_lm' )
UpperCamelCase__ = AutoProcessor.from_pretrained('hf-internal-testing/processor_with_lm' )
UpperCamelCase__ = floats_list((3, 1000) )
UpperCamelCase__ = processor_wavaveca(UpperCAmelCase_ , return_tensors='np' )
UpperCamelCase__ = processor_auto(UpperCAmelCase_ , return_tensors='np' )
for key in input_wavaveca.keys():
self.assertAlmostEqual(input_wavaveca[key].sum() , input_auto[key].sum() , delta=1E-2 )
UpperCamelCase__ = self._get_dummy_logits()
UpperCamelCase__ = processor_wavaveca.batch_decode(UpperCAmelCase_ )
UpperCamelCase__ = processor_auto.batch_decode(UpperCAmelCase_ )
self.assertListEqual(decoded_wavaveca.text , decoded_auto.text )
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
UpperCamelCase__ = self.get_feature_extractor()
UpperCamelCase__ = self.get_tokenizer()
UpperCamelCase__ = self.get_decoder()
UpperCamelCase__ = WavaVecaProcessorWithLM(tokenizer=UpperCAmelCase_ , feature_extractor=UpperCAmelCase_ , decoder=UpperCAmelCase_ )
self.assertListEqual(
processor.model_input_names , feature_extractor.model_input_names , msg='`processor` and `feature_extractor` model input names do not match' , )
@staticmethod
def SCREAMING_SNAKE_CASE__ ( snake_case_ , snake_case_ ) -> Union[str, Any]:
UpperCamelCase__ = [d[key] for d in offsets]
return retrieved_list
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
UpperCamelCase__ = WavaVecaProcessorWithLM.from_pretrained('hf-internal-testing/processor_with_lm' )
UpperCamelCase__ = self._get_dummy_logits()[0]
UpperCamelCase__ = processor.decode(UpperCAmelCase_ , output_word_offsets=UpperCAmelCase_ )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue('text' in outputs )
self.assertTrue('word_offsets' in outputs )
self.assertTrue(isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) )
self.assertEqual(' '.join(self.get_from_offsets(outputs['word_offsets'] , 'word' ) ) , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'] , 'word' ) , ['<s>', '<s>', '</s>'] )
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'] , 'start_offset' ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'] , 'end_offset' ) , [1, 3, 5] )
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
UpperCamelCase__ = WavaVecaProcessorWithLM.from_pretrained('hf-internal-testing/processor_with_lm' )
UpperCamelCase__ = self._get_dummy_logits()
UpperCamelCase__ = processor.batch_decode(UpperCAmelCase_ , output_word_offsets=UpperCAmelCase_ )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue('text' in outputs )
self.assertTrue('word_offsets' in outputs )
self.assertTrue(isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) )
self.assertListEqual(
[' '.join(self.get_from_offsets(UpperCAmelCase_ , 'word' ) ) for o in outputs['word_offsets']] , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'][0] , 'word' ) , ['<s>', '<s>', '</s>'] )
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'][0] , 'start_offset' ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'][0] , 'end_offset' ) , [1, 3, 5] )
@slow
@require_torch
@require_torchaudio
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
import torch
UpperCamelCase__ = load_dataset('common_voice' , 'en' , split='train' , streaming=UpperCAmelCase_ )
UpperCamelCase__ = ds.cast_column('audio' , datasets.Audio(sampling_rate=1_6000 ) )
UpperCamelCase__ = iter(UpperCAmelCase_ )
UpperCamelCase__ = next(UpperCAmelCase_ )
UpperCamelCase__ = AutoProcessor.from_pretrained('patrickvonplaten/wav2vec2-base-100h-with-lm' )
UpperCamelCase__ = WavaVecaForCTC.from_pretrained('patrickvonplaten/wav2vec2-base-100h-with-lm' )
# compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train
UpperCamelCase__ = processor(sample['audio']['array'] , return_tensors='pt' ).input_values
with torch.no_grad():
UpperCamelCase__ = model(UpperCAmelCase_ ).logits.cpu().numpy()
UpperCamelCase__ = processor.decode(logits[0] , output_word_offsets=UpperCAmelCase_ )
UpperCamelCase__ = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate
UpperCamelCase__ = [
{
'start_time': d['start_offset'] * time_offset,
'end_time': d['end_offset'] * time_offset,
'word': d['word'],
}
for d in output['word_offsets']
]
UpperCamelCase__ = 'WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL'
# output words
self.assertEqual(' '.join(self.get_from_offsets(UpperCAmelCase_ , 'word' ) ) , UpperCAmelCase_ )
self.assertEqual(' '.join(self.get_from_offsets(UpperCAmelCase_ , 'word' ) ) , output.text )
# output times
UpperCamelCase__ = torch.tensor(self.get_from_offsets(UpperCAmelCase_ , 'start_time' ) )
UpperCamelCase__ = torch.tensor(self.get_from_offsets(UpperCAmelCase_ , 'end_time' ) )
# fmt: off
UpperCamelCase__ = torch.tensor([1.4_199, 1.6_599, 2.2_599, 3.0, 3.24, 3.5_999, 3.7_999, 4.0_999, 4.26, 4.94, 5.28, 5.6_599, 5.78, 5.94, 6.32, 6.5_399, 6.6_599] )
UpperCamelCase__ = torch.tensor([1.5_399, 1.8_999, 2.9, 3.16, 3.5_399, 3.72, 4.0_199, 4.1_799, 4.76, 5.1_599, 5.5_599, 5.6_999, 5.86, 6.1_999, 6.38, 6.6_199, 6.94] )
# fmt: on
self.assertTrue(torch.allclose(UpperCAmelCase_ , UpperCAmelCase_ , atol=0.01 ) )
self.assertTrue(torch.allclose(UpperCAmelCase_ , UpperCAmelCase_ , atol=0.01 ) )
| 710
|
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
A__ : Any= logging.get_logger(__name__)
A__ : str= {
"""microsoft/layoutlmv3-base""": """https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json""",
}
class __lowerCamelCase ( _a ):
a : List[str] ="""layoutlmv3"""
def __init__( self , snake_case_=5_0265 , snake_case_=768 , snake_case_=12 , snake_case_=12 , snake_case_=3072 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=512 , snake_case_=2 , snake_case_=0.02 , snake_case_=1E-5 , snake_case_=1 , snake_case_=0 , snake_case_=2 , snake_case_=1024 , snake_case_=128 , snake_case_=128 , snake_case_=True , snake_case_=32 , snake_case_=128 , snake_case_=64 , snake_case_=256 , snake_case_=True , snake_case_=True , snake_case_=True , snake_case_=224 , snake_case_=3 , snake_case_=16 , snake_case_=None , **snake_case_ , ) -> Union[str, Any]:
super().__init__(
vocab_size=snake_case_ , hidden_size=snake_case_ , num_hidden_layers=snake_case_ , num_attention_heads=snake_case_ , intermediate_size=snake_case_ , hidden_act=snake_case_ , hidden_dropout_prob=snake_case_ , attention_probs_dropout_prob=snake_case_ , max_position_embeddings=snake_case_ , type_vocab_size=snake_case_ , initializer_range=snake_case_ , layer_norm_eps=snake_case_ , pad_token_id=snake_case_ , bos_token_id=snake_case_ , eos_token_id=snake_case_ , **snake_case_ , )
UpperCamelCase__ = max_ad_position_embeddings
UpperCamelCase__ = coordinate_size
UpperCamelCase__ = shape_size
UpperCamelCase__ = has_relative_attention_bias
UpperCamelCase__ = rel_pos_bins
UpperCamelCase__ = max_rel_pos
UpperCamelCase__ = has_spatial_attention_bias
UpperCamelCase__ = rel_ad_pos_bins
UpperCamelCase__ = max_rel_ad_pos
UpperCamelCase__ = text_embed
UpperCamelCase__ = visual_embed
UpperCamelCase__ = input_size
UpperCamelCase__ = num_channels
UpperCamelCase__ = patch_size
UpperCamelCase__ = classifier_dropout
class __lowerCamelCase ( _a ):
a : Tuple =version.parse("""1.12""" )
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> Mapping[str, Mapping[int, str]]:
# The order of inputs is different for question answering and sequence classification
if self.task in ["question-answering", "sequence-classification"]:
return OrderedDict(
[
('input_ids', {0: 'batch', 1: 'sequence'}),
('attention_mask', {0: 'batch', 1: 'sequence'}),
('bbox', {0: 'batch', 1: 'sequence'}),
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
else:
return OrderedDict(
[
('input_ids', {0: 'batch', 1: 'sequence'}),
('bbox', {0: 'batch', 1: 'sequence'}),
('attention_mask', {0: 'batch', 1: 'sequence'}),
('pixel_values', {0: 'batch', 1: 'num_channels'}),
] )
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> float:
return 1E-5
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
return 12
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ = -1 , snake_case_ = -1 , snake_case_ = False , snake_case_ = None , snake_case_ = 3 , snake_case_ = 40 , snake_case_ = 40 , ) -> Mapping[str, Any]:
setattr(processor.image_processor , 'apply_ocr' , snake_case_ )
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
UpperCamelCase__ = compute_effective_axis_dimension(
snake_case_ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
UpperCamelCase__ = processor.tokenizer.num_special_tokens_to_add(snake_case_ )
UpperCamelCase__ = compute_effective_axis_dimension(
snake_case_ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=snake_case_ )
# Generate dummy inputs according to compute batch and sequence
UpperCamelCase__ = [[' '.join([processor.tokenizer.unk_token] ) * seq_length]] * batch_size
# Generate dummy bounding boxes
UpperCamelCase__ = [[[48, 84, 73, 128]]] * batch_size
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
# batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch)
UpperCamelCase__ = self._generate_dummy_images(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
UpperCamelCase__ = dict(
processor(
snake_case_ , text=snake_case_ , boxes=snake_case_ , return_tensors=snake_case_ , ) )
return inputs
| 20
| 0
|
import collections
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = '▁'
SCREAMING_SNAKE_CASE = {'vocab_file': 'prophetnet.tokenizer'}
SCREAMING_SNAKE_CASE = {
'vocab_file': {
'microsoft/xprophetnet-large-wiki100-cased': (
'https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/prophetnet.tokenizer'
),
}
}
SCREAMING_SNAKE_CASE = {
'microsoft/xprophetnet-large-wiki100-cased': {'do_lower_case': False},
}
SCREAMING_SNAKE_CASE = {
'microsoft/xprophetnet-large-wiki100-cased': 5_1_2,
}
def a (lowerCAmelCase__ ):
__a = collections.OrderedDict()
with open(lowerCAmelCase__ , """r""" , encoding="""utf-8""" ) as reader:
__a = reader.readlines()
for index, token in enumerate(lowerCAmelCase__ ):
__a = token.rstrip("""\n""" )
__a = index
return vocab
class __UpperCAmelCase ( __A ):
"""simple docstring"""
_lowerCamelCase = VOCAB_FILES_NAMES
_lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCamelCase = ["""input_ids""", """attention_mask"""]
def __init__( self , __A , __A="[SEP]" , __A="[SEP]" , __A="[SEP]" , __A="[UNK]" , __A="[PAD]" , __A="[CLS]" , __A="[MASK]" , __A = None , **__A , ):
__a = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__A , eos_token=__A , sep_token=__A , unk_token=__A , pad_token=__A , cls_token=__A , mask_token=__A , sp_model_kwargs=self.sp_model_kwargs , **__A , )
try:
import sentencepiece as spm
except ImportError:
logger.warning(
"""You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece"""
""" pip install sentencepiece""" )
raise
__a = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__A ) )
__a = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# put special tokens and [unused] tokens into the vocab
__a = {"""[PAD]""": 0, """[CLS]""": 1, """[SEP]""": 2, """[UNK]""": 3, """[MASK]""": 4}
for i in range(10 ):
__a = f'''[unused{i}]'''
__a = 5 + i
# The first "real" token "," has position 15 in the embedding vocab and position 3 in the spm vocab
__a = 12
__a = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
for k in self.fairseq_tokens_to_ids.keys():
self.unique_no_split_tokens.append(__A )
def __getstate__( self ):
__a = self.__dict__.copy()
__a = None
return state
def __setstate__( self , __A ):
__a = d
try:
import sentencepiece as spm
except ImportError:
logger.warning(
"""You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece"""
""" pip install sentencepiece""" )
raise
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
__a = {}
__a = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def snake_case_ ( self , __A , __A = None , __A = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__A , token_ids_a=__A , already_has_special_tokens=__A )
if token_ids_a is None:
return ([0] * len(__A )) + [1]
return ([0] * len(__A )) + [1] + ([0] * len(__A )) + [1]
def snake_case_ ( self , __A , __A = None ):
__a = [self.sep_token_id]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0]
return len(token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def snake_case_ ( self ):
return len(self.sp_model ) + self.fairseq_offset
def snake_case_ ( self ):
__a = {self.convert_ids_to_tokens(__A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def snake_case_ ( self , __A ):
return self.sp_model.encode(__A , out_type=__A )
def snake_case_ ( self , __A ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
__a = self.sp_model.PieceToId(__A )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def snake_case_ ( self , __A ):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def snake_case_ ( self , __A ):
__a = """""".join(__A ).replace(__A , """ """ ).strip()
return out_string
def snake_case_ ( self , __A , __A = None ):
if not os.path.isdir(__A ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
__a = os.path.join(
__A , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__A ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __A )
elif not os.path.isfile(self.vocab_file ):
with open(__A , """wb""" ) as fi:
__a = self.sp_model.serialized_model_proto()
fi.write(__A )
return (out_vocab_file,)
def snake_case_ ( self , __A , __A = None ):
if token_ids_a is None:
return token_ids_a + [self.sep_token_id]
__a = [self.sep_token_id]
return token_ids_a + sep + token_ids_a + sep
| 99
|
import argparse
from copy import deepcopy
import numpy as np
from datasets import ClassLabel, DatasetDict, load_dataset
from evaluate import load
from transformers import (
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
Trainer,
TrainerCallback,
TrainingArguments,
set_seed,
)
def a ():
__a = argparse.ArgumentParser()
parser.add_argument("""--model_ckpt""" , type=lowerCAmelCase__ , default="""microsoft/unixcoder-base-nine""" )
parser.add_argument("""--num_epochs""" , type=lowerCAmelCase__ , default=5 )
parser.add_argument("""--batch_size""" , type=lowerCAmelCase__ , default=6 )
parser.add_argument("""--gradient_accumulation_steps""" , type=lowerCAmelCase__ , default=1 )
parser.add_argument("""--freeze""" , type=lowerCAmelCase__ , default=lowerCAmelCase__ )
parser.add_argument("""--learning_rate""" , type=lowerCAmelCase__ , default=5E-4 )
parser.add_argument("""--seed""" , type=lowerCAmelCase__ , default=0 )
parser.add_argument("""--lr_scheduler_type""" , type=lowerCAmelCase__ , default="""cosine""" )
parser.add_argument("""--num_warmup_steps""" , type=lowerCAmelCase__ , default=10 )
parser.add_argument("""--weight_decay""" , type=lowerCAmelCase__ , default=0.0_1 )
parser.add_argument("""--output_dir""" , type=lowerCAmelCase__ , default="""./results""" )
return parser.parse_args()
SCREAMING_SNAKE_CASE = load('accuracy')
def a (lowerCAmelCase__ ):
__a , __a = eval_pred
__a = np.argmax(lowerCAmelCase__ , axis=1 )
return metric.compute(predictions=lowerCAmelCase__ , references=lowerCAmelCase__ )
class __UpperCAmelCase ( __A ):
"""simple docstring"""
def __init__( self , __A ):
super().__init__()
__a = trainer
def snake_case_ ( self , __A , __A , __A , **__A ):
if control.should_evaluate:
__a = deepcopy(__A )
self._trainer.evaluate(eval_dataset=self._trainer.train_dataset , metric_key_prefix="""train""" )
return control_copy
def a ():
__a = get_args()
set_seed(args.seed )
__a = load_dataset("""codeparrot/codecomplex""" , split="""train""" )
__a = dataset.train_test_split(test_size=0.2 )
__a = train_test["""test"""].train_test_split(test_size=0.5 )
__a = DatasetDict(
{
"""train""": train_test["""train"""],
"""test""": test_validation["""train"""],
"""valid""": test_validation["""test"""],
} )
print("""Loading tokenizer and model""" )
__a = AutoTokenizer.from_pretrained(args.model_ckpt )
__a = tokenizer.eos_token
__a = AutoModelForSequenceClassification.from_pretrained(args.model_ckpt , num_labels=7 )
__a = model.config.eos_token_id
if args.freeze:
for param in model.roberta.parameters():
__a = False
__a = ClassLabel(num_classes=7 , names=list(set(train_test_validation["""train"""]["""complexity"""] ) ) )
def tokenize(lowerCAmelCase__ ):
__a = tokenizer(example["""src"""] , truncation=lowerCAmelCase__ , max_length=1_024 )
__a = labels.straint(example["""complexity"""] )
return {
"input_ids": inputs["input_ids"],
"attention_mask": inputs["attention_mask"],
"label": label,
}
__a = train_test_validation.map(
lowerCAmelCase__ , batched=lowerCAmelCase__ , remove_columns=train_test_validation["""train"""].column_names , )
__a = DataCollatorWithPadding(tokenizer=lowerCAmelCase__ )
__a = TrainingArguments(
output_dir=args.output_dir , learning_rate=args.learning_rate , lr_scheduler_type=args.lr_scheduler_type , evaluation_strategy="""epoch""" , save_strategy="""epoch""" , logging_strategy="""epoch""" , per_device_train_batch_size=args.batch_size , per_device_eval_batch_size=args.batch_size , num_train_epochs=args.num_epochs , gradient_accumulation_steps=args.gradient_accumulation_steps , weight_decay=0.0_1 , metric_for_best_model="""accuracy""" , run_name="""complexity-java""" , report_to="""wandb""" , )
__a = Trainer(
model=lowerCAmelCase__ , args=lowerCAmelCase__ , train_dataset=tokenized_datasets["""train"""] , eval_dataset=tokenized_datasets["""valid"""] , tokenizer=lowerCAmelCase__ , data_collator=lowerCAmelCase__ , compute_metrics=lowerCAmelCase__ , )
print("""Training...""" )
trainer.add_callback(CustomCallback(lowerCAmelCase__ ) )
trainer.train()
if __name__ == "__main__":
main()
| 99
| 1
|
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
__A : str = StableDiffusionInpaintPipeline
__A : Union[str, Any] = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
__A : str = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
__A : Tuple = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
__A : Any = frozenset([] )
def __snake_case ( self : Union[str, Any] ):
'''simple docstring'''
torch.manual_seed(0 )
lowercase :Dict = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=9 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=3_2 , attention_head_dim=(2, 4) , use_linear_projection=snake_case__ , )
lowercase :Any = PNDMScheduler(skip_prk_steps=snake_case__ )
torch.manual_seed(0 )
lowercase :str = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=1_2_8 , )
torch.manual_seed(0 )
lowercase :List[str] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act='''gelu''' , projection_dim=5_1_2 , )
lowercase :int = CLIPTextModel(snake_case__ )
lowercase :List[Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
lowercase :Optional[Any] = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def __snake_case ( self : Optional[int] , snake_case__ : Tuple , snake_case__ : Optional[Any]=0 ):
'''simple docstring'''
lowercase :Dict = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(snake_case__ ) ).to(snake_case__ )
lowercase :Tuple = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowercase :Dict = Image.fromarray(np.uinta(snake_case__ ) ).convert('''RGB''' ).resize((6_4, 6_4) )
lowercase :Optional[int] = Image.fromarray(np.uinta(image + 4 ) ).convert('''RGB''' ).resize((6_4, 6_4) )
if str(snake_case__ ).startswith('''mps''' ):
lowercase :Union[str, Any] = torch.manual_seed(snake_case__ )
else:
lowercase :Dict = torch.Generator(device=snake_case__ ).manual_seed(snake_case__ )
lowercase :Dict = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': init_image,
'''mask_image''': mask_image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def __snake_case ( self : Dict ):
'''simple docstring'''
lowercase :Dict = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowercase :Optional[Any] = self.get_dummy_components()
lowercase :Optional[Any] = StableDiffusionInpaintPipeline(**snake_case__ )
lowercase :List[Any] = sd_pipe.to(snake_case__ )
sd_pipe.set_progress_bar_config(disable=snake_case__ )
lowercase :Optional[int] = self.get_dummy_inputs(snake_case__ )
lowercase :Any = sd_pipe(**snake_case__ ).images
lowercase :Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
lowercase :Dict = np.array([0.47_27, 0.57_35, 0.39_41, 0.54_46, 0.59_26, 0.43_94, 0.50_62, 0.46_54, 0.44_76] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __snake_case ( self : List[str] ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class __magic_name__ ( unittest.TestCase ):
def __snake_case ( self : List[Any] ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __snake_case ( self : Union[str, Any] ):
'''simple docstring'''
lowercase :Optional[int] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''' )
lowercase :Optional[int] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''' )
lowercase :Optional[Any] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'''
'''/yellow_cat_sitting_on_a_park_bench.npy''' )
lowercase :List[Any] = '''stabilityai/stable-diffusion-2-inpainting'''
lowercase :Dict = StableDiffusionInpaintPipeline.from_pretrained(snake_case__ , safety_checker=snake_case__ )
pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
pipe.enable_attention_slicing()
lowercase :Tuple = '''Face of a yellow cat, high resolution, sitting on a park bench'''
lowercase :List[str] = torch.manual_seed(0 )
lowercase :int = pipe(
prompt=snake_case__ , image=snake_case__ , mask_image=snake_case__ , generator=snake_case__ , output_type='''np''' , )
lowercase :Any = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert np.abs(expected_image - image ).max() < 9e-3
def __snake_case ( self : Union[str, Any] ):
'''simple docstring'''
lowercase :Optional[int] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''' )
lowercase :Dict = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''' )
lowercase :int = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'''
'''/yellow_cat_sitting_on_a_park_bench_fp16.npy''' )
lowercase :List[Any] = '''stabilityai/stable-diffusion-2-inpainting'''
lowercase :Union[str, Any] = StableDiffusionInpaintPipeline.from_pretrained(
snake_case__ , torch_dtype=torch.floataa , safety_checker=snake_case__ , )
pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
pipe.enable_attention_slicing()
lowercase :int = '''Face of a yellow cat, high resolution, sitting on a park bench'''
lowercase :Dict = torch.manual_seed(0 )
lowercase :Any = pipe(
prompt=snake_case__ , image=snake_case__ , mask_image=snake_case__ , generator=snake_case__ , output_type='''np''' , )
lowercase :List[str] = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert np.abs(expected_image - image ).max() < 5e-1
def __snake_case ( self : Union[str, Any] ):
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
lowercase :Tuple = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''' )
lowercase :Optional[Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''' )
lowercase :int = '''stabilityai/stable-diffusion-2-inpainting'''
lowercase :Optional[int] = PNDMScheduler.from_pretrained(snake_case__ , subfolder='''scheduler''' )
lowercase :int = StableDiffusionInpaintPipeline.from_pretrained(
snake_case__ , safety_checker=snake_case__ , scheduler=snake_case__ , torch_dtype=torch.floataa , )
pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
lowercase :str = '''Face of a yellow cat, high resolution, sitting on a park bench'''
lowercase :Any = torch.manual_seed(0 )
lowercase :List[Any] = pipe(
prompt=snake_case__ , image=snake_case__ , mask_image=snake_case__ , generator=snake_case__ , num_inference_steps=2 , output_type='''np''' , )
lowercase :int = torch.cuda.max_memory_allocated()
# make sure that less than 2.65 GB is allocated
assert mem_bytes < 2.65 * 1_0**9
| 475
|
"""simple docstring"""
import unittest
import numpy as np
def lowerCamelCase (a_ :np.ndarray , a_ :np.ndarray , a_ :np.ndarray , a_ :np.ndarray | None = None , ) -> np.ndarray:
lowercase :str = np.shape(a_)
lowercase :Dict = np.shape(a_)
lowercase :Optional[int] = np.shape(a_)
if shape_a[0] != shape_b[0]:
lowercase :List[str] = (
'''Expected the same number of rows for A and B. '''
F"""Instead found A of size {shape_a} and B of size {shape_b}"""
)
raise ValueError(a_)
if shape_b[1] != shape_c[1]:
lowercase :List[str] = (
'''Expected the same number of columns for B and C. '''
F"""Instead found B of size {shape_b} and C of size {shape_c}"""
)
raise ValueError(a_)
lowercase :Optional[Any] = pseudo_inv
if a_inv is None:
try:
lowercase :int = np.linalg.inv(a_)
except np.linalg.LinAlgError:
raise ValueError(
'''Input matrix A is not invertible. Cannot compute Schur complement.''')
return mat_c - mat_b.T @ a_inv @ mat_b
class __magic_name__ ( unittest.TestCase ):
def __snake_case ( self : int ):
'''simple docstring'''
lowercase :Dict = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
lowercase :List[str] = np.array([[0, 3], [3, 0], [2, 3]] )
lowercase :List[str] = np.array([[2, 1], [6, 3]] )
lowercase :Any = schur_complement(snake_case__ , snake_case__ , snake_case__ )
lowercase :int = np.block([[a, b], [b.T, c]] )
lowercase :Tuple = np.linalg.det(snake_case__ )
lowercase :Any = np.linalg.det(snake_case__ )
lowercase :List[Any] = np.linalg.det(snake_case__ )
self.assertAlmostEqual(snake_case__ , det_a * det_s )
def __snake_case ( self : Optional[int] ):
'''simple docstring'''
lowercase :Union[str, Any] = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
lowercase :Union[str, Any] = np.array([[0, 3], [3, 0], [2, 3]] )
lowercase :List[str] = np.array([[2, 1], [6, 3]] )
with self.assertRaises(snake_case__ ):
schur_complement(snake_case__ , snake_case__ , snake_case__ )
def __snake_case ( self : List[Any] ):
'''simple docstring'''
lowercase :List[str] = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
lowercase :int = np.array([[0, 3], [3, 0], [2, 3]] )
lowercase :List[Any] = np.array([[2, 1, 3], [6, 3, 5]] )
with self.assertRaises(snake_case__ ):
schur_complement(snake_case__ , snake_case__ , snake_case__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
unittest.main()
| 475
| 1
|
import collections
import gzip
import os
import urllib
import numpy
from tensorflow.python.framework import dtypes, random_seed
from tensorflow.python.platform import gfile
from tensorflow.python.util.deprecation import deprecated
_lowerCamelCase : Dict = collections.namedtuple('''_Datasets''', ['''train''', '''validation''', '''test'''])
# CVDF mirror of http://yann.lecun.com/exdb/mnist/
_lowerCamelCase : List[str] = '''https://storage.googleapis.com/cvdf-datasets/mnist/'''
def a_ ( __lowercase : str ) -> Any:
_snake_case = numpy.dtype(numpy.uintaa ).newbyteorder('>' )
return numpy.frombuffer(bytestream.read(4 ) , dtype=__A )[0]
@deprecated(__A , 'Please use tf.data to implement this functionality.' )
def a_ ( __lowercase : Union[str, Any] ) -> List[str]:
print('Extracting' , f.name )
with gzip.GzipFile(fileobj=__A ) as bytestream:
_snake_case = _readaa(__A )
if magic != 2_051:
raise ValueError(
'Invalid magic number %d in MNIST image file: %s' % (magic, f.name) )
_snake_case = _readaa(__A )
_snake_case = _readaa(__A )
_snake_case = _readaa(__A )
_snake_case = bytestream.read(rows * cols * num_images )
_snake_case = numpy.frombuffer(__A , dtype=numpy.uinta )
_snake_case = data.reshape(__A , __A , __A , 1 )
return data
@deprecated(__A , 'Please use tf.one_hot on tensors.' )
def a_ ( __lowercase : Dict , __lowercase : Any ) -> List[str]:
_snake_case = labels_dense.shape[0]
_snake_case = numpy.arange(__A ) * num_classes
_snake_case = numpy.zeros((num_labels, num_classes) )
_snake_case = 1
return labels_one_hot
@deprecated(__A , 'Please use tf.data to implement this functionality.' )
def a_ ( __lowercase : List[str] , __lowercase : Union[str, Any]=False , __lowercase : Tuple=10 ) -> Optional[int]:
print('Extracting' , f.name )
with gzip.GzipFile(fileobj=__A ) as bytestream:
_snake_case = _readaa(__A )
if magic != 2_049:
raise ValueError(
'Invalid magic number %d in MNIST label file: %s' % (magic, f.name) )
_snake_case = _readaa(__A )
_snake_case = bytestream.read(__A )
_snake_case = numpy.frombuffer(__A , dtype=numpy.uinta )
if one_hot:
return _dense_to_one_hot(__A , __A )
return labels
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
@deprecated(
lowercase , 'Please use alternatives such as official/mnist/_DataSet.py'
' from tensorflow/models.' , )
def __init__( self : Any , lowercase : Tuple , lowercase : Optional[int] , lowercase : Dict=False , lowercase : List[Any]=False , lowercase : Optional[Any]=dtypes.floataa , lowercase : Tuple=True , lowercase : Tuple=None , ):
'''simple docstring'''
_snake_case = random_seed.get_seed(lowercase )
# If op level seed is not set, use whatever graph level seed is returned
numpy.random.seed(seeda if seed is None else seeda )
_snake_case = dtypes.as_dtype(lowercase ).base_dtype
if dtype not in (dtypes.uinta, dtypes.floataa):
raise TypeError('Invalid image dtype %r, expected uint8 or float32' % dtype )
if fake_data:
_snake_case = 10_000
_snake_case = one_hot
else:
assert (
images.shape[0] == labels.shape[0]
), f'''images.shape: {images.shape} labels.shape: {labels.shape}'''
_snake_case = images.shape[0]
# Convert shape from [num examples, rows, columns, depth]
# to [num examples, rows*columns] (assuming depth == 1)
if reshape:
assert images.shape[3] == 1
_snake_case = images.reshape(
images.shape[0] , images.shape[1] * images.shape[2] )
if dtype == dtypes.floataa:
# Convert from [0, 255] -> [0.0, 1.0].
_snake_case = images.astype(numpy.floataa )
_snake_case = numpy.multiply(lowercase , 1.0 / 255.0 )
_snake_case = images
_snake_case = labels
_snake_case = 0
_snake_case = 0
@property
def A ( self : List[str] ):
'''simple docstring'''
return self._images
@property
def A ( self : str ):
'''simple docstring'''
return self._labels
@property
def A ( self : Optional[Any] ):
'''simple docstring'''
return self._num_examples
@property
def A ( self : Tuple ):
'''simple docstring'''
return self._epochs_completed
def A ( self : List[str] , lowercase : int , lowercase : Union[str, Any]=False , lowercase : str=True ):
'''simple docstring'''
if fake_data:
_snake_case = [1] * 784
_snake_case = [1] + [0] * 9 if self.one_hot else 0
return (
[fake_image for _ in range(lowercase )],
[fake_label for _ in range(lowercase )],
)
_snake_case = self._index_in_epoch
# Shuffle for the first epoch
if self._epochs_completed == 0 and start == 0 and shuffle:
_snake_case = numpy.arange(self._num_examples )
numpy.random.shuffle(lowercase )
_snake_case = self.images[perma]
_snake_case = self.labels[perma]
# Go to the next epoch
if start + batch_size > self._num_examples:
# Finished epoch
self._epochs_completed += 1
# Get the rest examples in this epoch
_snake_case = self._num_examples - start
_snake_case = self._images[start : self._num_examples]
_snake_case = self._labels[start : self._num_examples]
# Shuffle the data
if shuffle:
_snake_case = numpy.arange(self._num_examples )
numpy.random.shuffle(lowercase )
_snake_case = self.images[perm]
_snake_case = self.labels[perm]
# Start next epoch
_snake_case = 0
_snake_case = batch_size - rest_num_examples
_snake_case = self._index_in_epoch
_snake_case = self._images[start:end]
_snake_case = self._labels[start:end]
return (
numpy.concatenate((images_rest_part, images_new_part) , axis=0 ),
numpy.concatenate((labels_rest_part, labels_new_part) , axis=0 ),
)
else:
self._index_in_epoch += batch_size
_snake_case = self._index_in_epoch
return self._images[start:end], self._labels[start:end]
@deprecated(__A , 'Please write your own downloading logic.' )
def a_ ( __lowercase : Optional[Any] , __lowercase : Union[str, Any] , __lowercase : Optional[int] ) -> Dict:
if not gfile.Exists(__A ):
gfile.MakeDirs(__A )
_snake_case = os.path.join(__A , __A )
if not gfile.Exists(__A ):
urllib.request.urlretrieve(__A , __A ) # noqa: S310
with gfile.GFile(__A ) as f:
_snake_case = f.size()
print('Successfully downloaded' , __A , __A , 'bytes.' )
return filepath
@deprecated(
__A , 'Please use alternatives such as:' ' tensorflow_datasets.load(\'mnist\')' )
def a_ ( __lowercase : Any , __lowercase : Union[str, Any]=False , __lowercase : Tuple=False , __lowercase : Optional[Any]=dtypes.floataa , __lowercase : List[Any]=True , __lowercase : str=5_000 , __lowercase : Any=None , __lowercase : Any=DEFAULT_SOURCE_URL , ) -> Dict:
if fake_data:
def fake():
return _DataSet(
[] , [] , fake_data=__A , one_hot=__A , dtype=__A , seed=__A )
_snake_case = fake()
_snake_case = fake()
_snake_case = fake()
return _Datasets(train=__A , validation=__A , test=__A )
if not source_url: # empty string check
_snake_case = DEFAULT_SOURCE_URL
_snake_case = '''train-images-idx3-ubyte.gz'''
_snake_case = '''train-labels-idx1-ubyte.gz'''
_snake_case = '''t10k-images-idx3-ubyte.gz'''
_snake_case = '''t10k-labels-idx1-ubyte.gz'''
_snake_case = _maybe_download(
__A , __A , source_url + train_images_file )
with gfile.Open(__A , 'rb' ) as f:
_snake_case = _extract_images(__A )
_snake_case = _maybe_download(
__A , __A , source_url + train_labels_file )
with gfile.Open(__A , 'rb' ) as f:
_snake_case = _extract_labels(__A , one_hot=__A )
_snake_case = _maybe_download(
__A , __A , source_url + test_images_file )
with gfile.Open(__A , 'rb' ) as f:
_snake_case = _extract_images(__A )
_snake_case = _maybe_download(
__A , __A , source_url + test_labels_file )
with gfile.Open(__A , 'rb' ) as f:
_snake_case = _extract_labels(__A , one_hot=__A )
if not 0 <= validation_size <= len(__A ):
_snake_case = (
'''Validation size should be between 0 and '''
f'''{len(__A )}. Received: {validation_size}.'''
)
raise ValueError(__A )
_snake_case = train_images[:validation_size]
_snake_case = train_labels[:validation_size]
_snake_case = train_images[validation_size:]
_snake_case = train_labels[validation_size:]
_snake_case = {'''dtype''': dtype, '''reshape''': reshape, '''seed''': seed}
_snake_case = _DataSet(__A , __A , **__A )
_snake_case = _DataSet(__A , __A , **__A )
_snake_case = _DataSet(__A , __A , **__A )
return _Datasets(train=__A , validation=__A , test=__A )
| 686
|
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class UpperCAmelCase_ ( metaclass=__A ):
"""simple docstring"""
UpperCamelCase_ = ['''flax''']
def __init__( self : Dict , *UpperCAmelCase : List[Any] , **UpperCAmelCase : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
requires_backends(self , ['''flax'''] )
@classmethod
def A__ ( cls : Any , *UpperCAmelCase : Dict , **UpperCAmelCase : List[str] ) -> List[Any]:
'''simple docstring'''
requires_backends(cls , ['''flax'''] )
@classmethod
def A__ ( cls : Dict , *UpperCAmelCase : str , **UpperCAmelCase : Any ) -> Any:
'''simple docstring'''
requires_backends(cls , ['''flax'''] )
class UpperCAmelCase_ ( metaclass=__A ):
"""simple docstring"""
UpperCamelCase_ = ['''flax''']
def __init__( self : Union[str, Any] , *UpperCAmelCase : Optional[Any] , **UpperCAmelCase : str ) -> Optional[int]:
'''simple docstring'''
requires_backends(self , ['''flax'''] )
@classmethod
def A__ ( cls : Tuple , *UpperCAmelCase : Any , **UpperCAmelCase : List[Any] ) -> int:
'''simple docstring'''
requires_backends(cls , ['''flax'''] )
@classmethod
def A__ ( cls : List[Any] , *UpperCAmelCase : Union[str, Any] , **UpperCAmelCase : List[str] ) -> List[Any]:
'''simple docstring'''
requires_backends(cls , ['''flax'''] )
class UpperCAmelCase_ ( metaclass=__A ):
"""simple docstring"""
UpperCamelCase_ = ['''flax''']
def __init__( self : Any , *UpperCAmelCase : int , **UpperCAmelCase : Optional[Any] ) -> Any:
'''simple docstring'''
requires_backends(self , ['''flax'''] )
@classmethod
def A__ ( cls : str , *UpperCAmelCase : Tuple , **UpperCAmelCase : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
requires_backends(cls , ['''flax'''] )
@classmethod
def A__ ( cls : Optional[int] , *UpperCAmelCase : Dict , **UpperCAmelCase : Optional[int] ) -> List[str]:
'''simple docstring'''
requires_backends(cls , ['''flax'''] )
class UpperCAmelCase_ ( metaclass=__A ):
"""simple docstring"""
UpperCamelCase_ = ['''flax''']
def __init__( self : Union[str, Any] , *UpperCAmelCase : Optional[Any] , **UpperCAmelCase : int ) -> Optional[int]:
'''simple docstring'''
requires_backends(self , ['''flax'''] )
@classmethod
def A__ ( cls : Tuple , *UpperCAmelCase : Any , **UpperCAmelCase : Optional[int] ) -> List[Any]:
'''simple docstring'''
requires_backends(cls , ['''flax'''] )
@classmethod
def A__ ( cls : int , *UpperCAmelCase : int , **UpperCAmelCase : Dict ) -> Optional[Any]:
'''simple docstring'''
requires_backends(cls , ['''flax'''] )
class UpperCAmelCase_ ( metaclass=__A ):
"""simple docstring"""
UpperCamelCase_ = ['''flax''']
def __init__( self : Optional[int] , *UpperCAmelCase : Dict , **UpperCAmelCase : Union[str, Any] ) -> List[str]:
'''simple docstring'''
requires_backends(self , ['''flax'''] )
@classmethod
def A__ ( cls : str , *UpperCAmelCase : Optional[int] , **UpperCAmelCase : List[str] ) -> str:
'''simple docstring'''
requires_backends(cls , ['''flax'''] )
@classmethod
def A__ ( cls : Dict , *UpperCAmelCase : Union[str, Any] , **UpperCAmelCase : List[Any] ) -> Dict:
'''simple docstring'''
requires_backends(cls , ['''flax'''] )
class UpperCAmelCase_ ( metaclass=__A ):
"""simple docstring"""
UpperCamelCase_ = ['''flax''']
def __init__( self : Union[str, Any] , *UpperCAmelCase : Any , **UpperCAmelCase : Union[str, Any] ) -> str:
'''simple docstring'''
requires_backends(self , ['''flax'''] )
@classmethod
def A__ ( cls : Optional[Any] , *UpperCAmelCase : Optional[Any] , **UpperCAmelCase : Optional[Any] ) -> str:
'''simple docstring'''
requires_backends(cls , ['''flax'''] )
@classmethod
def A__ ( cls : Optional[int] , *UpperCAmelCase : Union[str, Any] , **UpperCAmelCase : Dict ) -> Dict:
'''simple docstring'''
requires_backends(cls , ['''flax'''] )
class UpperCAmelCase_ ( metaclass=__A ):
"""simple docstring"""
UpperCamelCase_ = ['''flax''']
def __init__( self : int , *UpperCAmelCase : List[Any] , **UpperCAmelCase : Tuple ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(self , ['''flax'''] )
@classmethod
def A__ ( cls : Any , *UpperCAmelCase : int , **UpperCAmelCase : Optional[Any] ) -> str:
'''simple docstring'''
requires_backends(cls , ['''flax'''] )
@classmethod
def A__ ( cls : Union[str, Any] , *UpperCAmelCase : List[str] , **UpperCAmelCase : List[str] ) -> Optional[Any]:
'''simple docstring'''
requires_backends(cls , ['''flax'''] )
class UpperCAmelCase_ ( metaclass=__A ):
"""simple docstring"""
UpperCamelCase_ = ['''flax''']
def __init__( self : Union[str, Any] , *UpperCAmelCase : Union[str, Any] , **UpperCAmelCase : List[str] ) -> List[Any]:
'''simple docstring'''
requires_backends(self , ['''flax'''] )
@classmethod
def A__ ( cls : Optional[Any] , *UpperCAmelCase : Optional[Any] , **UpperCAmelCase : Any ) -> List[Any]:
'''simple docstring'''
requires_backends(cls , ['''flax'''] )
@classmethod
def A__ ( cls : Dict , *UpperCAmelCase : Any , **UpperCAmelCase : Optional[Any] ) -> List[Any]:
'''simple docstring'''
requires_backends(cls , ['''flax'''] )
class UpperCAmelCase_ ( metaclass=__A ):
"""simple docstring"""
UpperCamelCase_ = ['''flax''']
def __init__( self : Optional[int] , *UpperCAmelCase : Optional[int] , **UpperCAmelCase : Any ) -> Any:
'''simple docstring'''
requires_backends(self , ['''flax'''] )
@classmethod
def A__ ( cls : Tuple , *UpperCAmelCase : List[str] , **UpperCAmelCase : Optional[int] ) -> List[str]:
'''simple docstring'''
requires_backends(cls , ['''flax'''] )
@classmethod
def A__ ( cls : Union[str, Any] , *UpperCAmelCase : Any , **UpperCAmelCase : int ) -> Tuple:
'''simple docstring'''
requires_backends(cls , ['''flax'''] )
class UpperCAmelCase_ ( metaclass=__A ):
"""simple docstring"""
UpperCamelCase_ = ['''flax''']
def __init__( self : Optional[int] , *UpperCAmelCase : Union[str, Any] , **UpperCAmelCase : Dict ) -> int:
'''simple docstring'''
requires_backends(self , ['''flax'''] )
@classmethod
def A__ ( cls : List[str] , *UpperCAmelCase : Dict , **UpperCAmelCase : Dict ) -> List[Any]:
'''simple docstring'''
requires_backends(cls , ['''flax'''] )
@classmethod
def A__ ( cls : str , *UpperCAmelCase : Optional[Any] , **UpperCAmelCase : str ) -> int:
'''simple docstring'''
requires_backends(cls , ['''flax'''] )
class UpperCAmelCase_ ( metaclass=__A ):
"""simple docstring"""
UpperCamelCase_ = ['''flax''']
def __init__( self : Optional[int] , *UpperCAmelCase : Any , **UpperCAmelCase : Optional[int] ) -> Optional[int]:
'''simple docstring'''
requires_backends(self , ['''flax'''] )
@classmethod
def A__ ( cls : Optional[int] , *UpperCAmelCase : Dict , **UpperCAmelCase : Optional[Any] ) -> Dict:
'''simple docstring'''
requires_backends(cls , ['''flax'''] )
@classmethod
def A__ ( cls : str , *UpperCAmelCase : str , **UpperCAmelCase : Any ) -> int:
'''simple docstring'''
requires_backends(cls , ['''flax'''] )
class UpperCAmelCase_ ( metaclass=__A ):
"""simple docstring"""
UpperCamelCase_ = ['''flax''']
def __init__( self : Optional[int] , *UpperCAmelCase : str , **UpperCAmelCase : Any ) -> Optional[int]:
'''simple docstring'''
requires_backends(self , ['''flax'''] )
@classmethod
def A__ ( cls : Optional[int] , *UpperCAmelCase : Tuple , **UpperCAmelCase : Any ) -> Dict:
'''simple docstring'''
requires_backends(cls , ['''flax'''] )
@classmethod
def A__ ( cls : Optional[int] , *UpperCAmelCase : Optional[int] , **UpperCAmelCase : Optional[Any] ) -> Dict:
'''simple docstring'''
requires_backends(cls , ['''flax'''] )
class UpperCAmelCase_ ( metaclass=__A ):
"""simple docstring"""
UpperCamelCase_ = ['''flax''']
def __init__( self : Optional[Any] , *UpperCAmelCase : str , **UpperCAmelCase : int ) -> Tuple:
'''simple docstring'''
requires_backends(self , ['''flax'''] )
@classmethod
def A__ ( cls : Dict , *UpperCAmelCase : List[Any] , **UpperCAmelCase : Optional[int] ) -> str:
'''simple docstring'''
requires_backends(cls , ['''flax'''] )
@classmethod
def A__ ( cls : Union[str, Any] , *UpperCAmelCase : List[Any] , **UpperCAmelCase : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(cls , ['''flax'''] )
| 94
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
UpperCAmelCase_ : str = {
'configuration_trocr': ['TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TrOCRConfig'],
'processing_trocr': ['TrOCRProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : List[Any] = [
'TROCR_PRETRAINED_MODEL_ARCHIVE_LIST',
'TrOCRForCausalLM',
'TrOCRPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
UpperCAmelCase_ : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 11
|
'''simple docstring'''
import random
from typing import Any
def A_ ( _lowerCAmelCase : list ):
"""simple docstring"""
for _ in range(len(_lowerCAmelCase ) ):
_lowerCamelCase : Any = random.randint(0 , len(_lowerCAmelCase ) - 1 )
_lowerCamelCase : List[str] = random.randint(0 , len(_lowerCAmelCase ) - 1 )
_lowerCamelCase , _lowerCamelCase : Union[str, Any] = data[b], data[a]
return data
if __name__ == "__main__":
UpperCAmelCase_ : Any = [0, 1, 2, 3, 4, 5, 6, 7]
UpperCAmelCase_ : Dict = ['python', 'says', 'hello', '!']
print('Fisher-Yates Shuffle:')
print('List', integers, strings)
print('FY Shuffle', fisher_yates_shuffle(integers), fisher_yates_shuffle(strings))
| 11
| 1
|
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
'''huggingface/informer-tourism-monthly''': (
'''https://huggingface.co/huggingface/informer-tourism-monthly/resolve/main/config.json'''
),
# See all Informer models at https://huggingface.co/models?filter=informer
}
class _UpperCamelCase( __lowerCamelCase ):
__SCREAMING_SNAKE_CASE : List[Any] = '''informer'''
__SCREAMING_SNAKE_CASE : List[Any] = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
'''num_hidden_layers''': '''encoder_layers''',
}
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : str = "student_t" , SCREAMING_SNAKE_CASE__ : str = "nll" , SCREAMING_SNAKE_CASE__ : int = 1 , SCREAMING_SNAKE_CASE__ : List[int] = None , SCREAMING_SNAKE_CASE__ : Optional[Union[str, bool]] = "mean" , SCREAMING_SNAKE_CASE__ : int = 0 , SCREAMING_SNAKE_CASE__ : int = 0 , SCREAMING_SNAKE_CASE__ : int = 0 , SCREAMING_SNAKE_CASE__ : int = 0 , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None , SCREAMING_SNAKE_CASE__ : int = 6_4 , SCREAMING_SNAKE_CASE__ : int = 3_2 , SCREAMING_SNAKE_CASE__ : int = 3_2 , SCREAMING_SNAKE_CASE__ : int = 2 , SCREAMING_SNAKE_CASE__ : int = 2 , SCREAMING_SNAKE_CASE__ : int = 2 , SCREAMING_SNAKE_CASE__ : int = 2 , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : str = "gelu" , SCREAMING_SNAKE_CASE__ : float = 0.05 , SCREAMING_SNAKE_CASE__ : float = 0.1 , SCREAMING_SNAKE_CASE__ : float = 0.1 , SCREAMING_SNAKE_CASE__ : float = 0.1 , SCREAMING_SNAKE_CASE__ : float = 0.1 , SCREAMING_SNAKE_CASE__ : int = 1_0_0 , SCREAMING_SNAKE_CASE__ : float = 0.02 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=True , SCREAMING_SNAKE_CASE__ : str = "prob" , SCREAMING_SNAKE_CASE__ : int = 5 , SCREAMING_SNAKE_CASE__ : bool = True , **SCREAMING_SNAKE_CASE__ : Tuple , ):
'''simple docstring'''
__a : Dict = prediction_length
__a : Tuple = context_length or prediction_length
__a : Tuple = distribution_output
__a : Tuple = loss
__a : str = input_size
__a : Dict = num_time_features
__a : Optional[int] = lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7]
__a : str = scaling
__a : Tuple = num_dynamic_real_features
__a : int = num_static_real_features
__a : Dict = num_static_categorical_features
# set cardinality
if cardinality and num_static_categorical_features > 0:
if len(SCREAMING_SNAKE_CASE__ ) != num_static_categorical_features:
raise ValueError(
'The cardinality should be a list of the same length as `num_static_categorical_features`' )
__a : Optional[Any] = cardinality
else:
__a : Optional[int] = [0]
# set embedding_dimension
if embedding_dimension and num_static_categorical_features > 0:
if len(SCREAMING_SNAKE_CASE__ ) != num_static_categorical_features:
raise ValueError(
'The embedding dimension should be a list of the same length as `num_static_categorical_features`' )
__a : int = embedding_dimension
else:
__a : List[Any] = [min(5_0 , (cat + 1) // 2 ) for cat in self.cardinality]
__a : int = num_parallel_samples
# Transformer architecture configuration
__a : str = input_size * len(self.lags_sequence ) + self._number_of_features
__a : Optional[int] = d_model
__a : Union[str, Any] = encoder_attention_heads
__a : int = decoder_attention_heads
__a : Any = encoder_ffn_dim
__a : Union[str, Any] = decoder_ffn_dim
__a : List[Any] = encoder_layers
__a : Optional[int] = decoder_layers
__a : int = dropout
__a : Optional[Any] = attention_dropout
__a : Dict = activation_dropout
__a : Union[str, Any] = encoder_layerdrop
__a : Optional[int] = decoder_layerdrop
__a : List[str] = activation_function
__a : str = init_std
__a : Optional[int] = use_cache
# Informer
__a : Union[str, Any] = attention_type
__a : str = sampling_factor
__a : Dict = distil
super().__init__(is_encoder_decoder=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
@property
def __lowerCAmelCase ( self : Any ):
'''simple docstring'''
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 47
|
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
'''huggingface/informer-tourism-monthly''': (
'''https://huggingface.co/huggingface/informer-tourism-monthly/resolve/main/config.json'''
),
# See all Informer models at https://huggingface.co/models?filter=informer
}
class _UpperCamelCase( __lowerCamelCase ):
__SCREAMING_SNAKE_CASE : List[Any] = '''informer'''
__SCREAMING_SNAKE_CASE : List[Any] = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
'''num_hidden_layers''': '''encoder_layers''',
}
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : str = "student_t" , SCREAMING_SNAKE_CASE__ : str = "nll" , SCREAMING_SNAKE_CASE__ : int = 1 , SCREAMING_SNAKE_CASE__ : List[int] = None , SCREAMING_SNAKE_CASE__ : Optional[Union[str, bool]] = "mean" , SCREAMING_SNAKE_CASE__ : int = 0 , SCREAMING_SNAKE_CASE__ : int = 0 , SCREAMING_SNAKE_CASE__ : int = 0 , SCREAMING_SNAKE_CASE__ : int = 0 , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None , SCREAMING_SNAKE_CASE__ : int = 6_4 , SCREAMING_SNAKE_CASE__ : int = 3_2 , SCREAMING_SNAKE_CASE__ : int = 3_2 , SCREAMING_SNAKE_CASE__ : int = 2 , SCREAMING_SNAKE_CASE__ : int = 2 , SCREAMING_SNAKE_CASE__ : int = 2 , SCREAMING_SNAKE_CASE__ : int = 2 , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : str = "gelu" , SCREAMING_SNAKE_CASE__ : float = 0.05 , SCREAMING_SNAKE_CASE__ : float = 0.1 , SCREAMING_SNAKE_CASE__ : float = 0.1 , SCREAMING_SNAKE_CASE__ : float = 0.1 , SCREAMING_SNAKE_CASE__ : float = 0.1 , SCREAMING_SNAKE_CASE__ : int = 1_0_0 , SCREAMING_SNAKE_CASE__ : float = 0.02 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=True , SCREAMING_SNAKE_CASE__ : str = "prob" , SCREAMING_SNAKE_CASE__ : int = 5 , SCREAMING_SNAKE_CASE__ : bool = True , **SCREAMING_SNAKE_CASE__ : Tuple , ):
'''simple docstring'''
__a : Dict = prediction_length
__a : Tuple = context_length or prediction_length
__a : Tuple = distribution_output
__a : Tuple = loss
__a : str = input_size
__a : Dict = num_time_features
__a : Optional[int] = lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7]
__a : str = scaling
__a : Tuple = num_dynamic_real_features
__a : int = num_static_real_features
__a : Dict = num_static_categorical_features
# set cardinality
if cardinality and num_static_categorical_features > 0:
if len(SCREAMING_SNAKE_CASE__ ) != num_static_categorical_features:
raise ValueError(
'The cardinality should be a list of the same length as `num_static_categorical_features`' )
__a : Optional[Any] = cardinality
else:
__a : Optional[int] = [0]
# set embedding_dimension
if embedding_dimension and num_static_categorical_features > 0:
if len(SCREAMING_SNAKE_CASE__ ) != num_static_categorical_features:
raise ValueError(
'The embedding dimension should be a list of the same length as `num_static_categorical_features`' )
__a : int = embedding_dimension
else:
__a : List[Any] = [min(5_0 , (cat + 1) // 2 ) for cat in self.cardinality]
__a : int = num_parallel_samples
# Transformer architecture configuration
__a : str = input_size * len(self.lags_sequence ) + self._number_of_features
__a : Optional[int] = d_model
__a : Union[str, Any] = encoder_attention_heads
__a : int = decoder_attention_heads
__a : Any = encoder_ffn_dim
__a : Union[str, Any] = decoder_ffn_dim
__a : List[Any] = encoder_layers
__a : Optional[int] = decoder_layers
__a : int = dropout
__a : Optional[Any] = attention_dropout
__a : Dict = activation_dropout
__a : Union[str, Any] = encoder_layerdrop
__a : Optional[int] = decoder_layerdrop
__a : List[str] = activation_function
__a : str = init_std
__a : Optional[int] = use_cache
# Informer
__a : Union[str, Any] = attention_type
__a : str = sampling_factor
__a : Dict = distil
super().__init__(is_encoder_decoder=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
@property
def __lowerCAmelCase ( self : Any ):
'''simple docstring'''
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 47
| 1
|
"""simple docstring"""
from ...processing_utils import ProcessorMixin
class UpperCAmelCase_ ( _a):
lowerCamelCase__ : Tuple = "SpeechT5FeatureExtractor"
lowerCamelCase__ : Optional[Any] = "SpeechT5Tokenizer"
def __init__( self , a , a ) -> str:
super().__init__(a , a )
def __call__( self , *a , **a ) -> Optional[Any]:
lowercase__ : Optional[Any] = kwargs.pop('audio' , a )
lowercase__ : Optional[Any] = kwargs.pop('text' , a )
lowercase__ : Optional[Any] = kwargs.pop('text_target' , a )
lowercase__ : int = kwargs.pop('audio_target' , a )
lowercase__ : Optional[Any] = kwargs.pop('sampling_rate' , a )
if audio is not None and text is not None:
raise ValueError(
'Cannot process both `audio` and `text` inputs. Did you mean `audio_target` or `text_target`?' )
if audio_target is not None and text_target is not None:
raise ValueError(
'Cannot process both `audio_target` and `text_target` inputs. Did you mean `audio` or `text`?' )
if audio is None and audio_target is None and text is None and text_target is None:
raise ValueError(
'You need to specify either an `audio`, `audio_target`, `text`, or `text_target` input to process.' )
if audio is not None:
lowercase__ : List[str] = self.feature_extractor(a , *a , sampling_rate=a , **a )
elif text is not None:
lowercase__ : int = self.tokenizer(a , **a )
else:
lowercase__ : Dict = None
if audio_target is not None:
lowercase__ : List[Any] = self.feature_extractor(audio_target=a , *a , sampling_rate=a , **a )
lowercase__ : Dict = targets['input_values']
elif text_target is not None:
lowercase__ : List[Any] = self.tokenizer(a , **a )
lowercase__ : List[Any] = targets['input_ids']
else:
lowercase__ : Tuple = None
if inputs is None:
return targets
if targets is not None:
lowercase__ : Optional[int] = labels
lowercase__ : Union[str, Any] = targets.get('attention_mask' )
if decoder_attention_mask is not None:
lowercase__ : Dict = decoder_attention_mask
return inputs
def _UpperCAmelCase ( self , *a , **a ) -> Optional[int]:
lowercase__ : Any = kwargs.pop('input_values' , a )
lowercase__ : Union[str, Any] = kwargs.pop('input_ids' , a )
lowercase__ : Union[str, Any] = kwargs.pop('labels' , a )
if input_values is not None and input_ids is not None:
raise ValueError('Cannot process both `input_values` and `input_ids` inputs.' )
if input_values is None and input_ids is None and labels is None:
raise ValueError(
'You need to specify either an `input_values`, `input_ids`, or `labels` input to be padded.' )
if input_values is not None:
lowercase__ : Optional[Any] = self.feature_extractor.pad(a , *a , **a )
elif input_ids is not None:
lowercase__ : Optional[int] = self.tokenizer.pad(a , **a )
else:
lowercase__ : Dict = None
if labels is not None:
if "input_ids" in labels or (isinstance(a , a ) and "input_ids" in labels[0]):
lowercase__ : Union[str, Any] = self.tokenizer.pad(a , **a )
lowercase__ : str = targets['input_ids']
else:
lowercase__ : List[str] = self.feature_extractor.feature_size
lowercase__ : int = self.feature_extractor.num_mel_bins
lowercase__ : Union[str, Any] = self.feature_extractor.pad(a , *a , **a )
lowercase__ : Optional[Any] = feature_size_hack
lowercase__ : Optional[int] = targets['input_values']
else:
lowercase__ : str = None
if inputs is None:
return targets
if targets is not None:
lowercase__ : Optional[int] = labels
lowercase__ : str = targets.get('attention_mask' )
if decoder_attention_mask is not None:
lowercase__ : str = decoder_attention_mask
return inputs
def _UpperCAmelCase ( self , *a , **a ) -> Any:
return self.tokenizer.batch_decode(*a , **a )
def _UpperCAmelCase ( self , *a , **a ) -> Optional[int]:
return self.tokenizer.decode(*a , **a )
| 645
|
"""simple docstring"""
import inspect
import unittest
from transformers import YolosConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import YolosForObjectDetection, YolosModel
from transformers.models.yolos.modeling_yolos import YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCAmelCase_ :
def __init__( self , a , a=1_3 , a=[3_0, 3_0] , a=2 , a=3 , a=True , a=True , a=3_2 , a=5 , a=4 , a=3_7 , a="gelu" , a=0.1 , a=0.1 , a=1_0 , a=0.02 , a=3 , a=None , a=8 , a=1_0 , ) -> Any:
lowercase__ : List[str] = parent
lowercase__ : Optional[Any] = batch_size
lowercase__ : Optional[int] = image_size
lowercase__ : List[Any] = patch_size
lowercase__ : Optional[Any] = num_channels
lowercase__ : str = is_training
lowercase__ : Optional[Any] = use_labels
lowercase__ : Optional[Any] = hidden_size
lowercase__ : Dict = num_hidden_layers
lowercase__ : Optional[Any] = num_attention_heads
lowercase__ : Dict = intermediate_size
lowercase__ : List[Any] = hidden_act
lowercase__ : List[Any] = hidden_dropout_prob
lowercase__ : Any = attention_probs_dropout_prob
lowercase__ : Any = type_sequence_label_size
lowercase__ : Dict = initializer_range
lowercase__ : Union[str, Any] = num_labels
lowercase__ : Tuple = scope
lowercase__ : Tuple = n_targets
lowercase__ : Optional[int] = num_detection_tokens
# we set the expected sequence length (which is used in several tests)
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) + num_detection_tokens
lowercase__ : Optional[Any] = (image_size[1] // patch_size) * (image_size[0] // patch_size)
lowercase__ : Tuple = num_patches + 1 + self.num_detection_tokens
def _UpperCAmelCase ( self ) -> Any:
lowercase__ : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size[0], self.image_size[1]] )
lowercase__ : Tuple = None
if self.use_labels:
# labels is a list of Dict (each Dict being the labels for a given example in the batch)
lowercase__ : int = []
for i in range(self.batch_size ):
lowercase__ : Optional[Any] = {}
lowercase__ : Any = torch.randint(
high=self.num_labels , size=(self.n_targets,) , device=a )
lowercase__ : List[str] = torch.rand(self.n_targets , 4 , device=a )
labels.append(a )
lowercase__ : Tuple = self.get_config()
return config, pixel_values, labels
def _UpperCAmelCase ( self ) -> List[Any]:
return YolosConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=a , initializer_range=self.initializer_range , num_detection_tokens=self.num_detection_tokens , num_labels=self.num_labels , )
def _UpperCAmelCase ( self , a , a , a ) -> int:
lowercase__ : List[str] = YolosModel(config=a )
model.to(a )
model.eval()
lowercase__ : List[Any] = model(a )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.expected_seq_len, self.hidden_size) )
def _UpperCAmelCase ( self , a , a , a ) -> Union[str, Any]:
lowercase__ : str = YolosForObjectDetection(a )
model.to(a )
model.eval()
lowercase__ : Dict = model(pixel_values=a )
lowercase__ : Tuple = model(a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
lowercase__ : str = model(pixel_values=a , labels=a )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
def _UpperCAmelCase ( self ) -> Tuple:
lowercase__ : int = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ : Any = config_and_inputs
lowercase__ : Any = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( _a , _a , unittest.TestCase):
lowerCamelCase__ : Optional[int] = (YolosModel, YolosForObjectDetection) if is_torch_available() else ()
lowerCamelCase__ : List[str] = (
{"feature-extraction": YolosModel, "object-detection": YolosForObjectDetection} if is_torch_available() else {}
)
lowerCamelCase__ : List[Any] = False
lowerCamelCase__ : Dict = False
lowerCamelCase__ : Tuple = False
lowerCamelCase__ : Union[str, Any] = False
def _UpperCAmelCase ( self , a , a , a=False ) -> Dict:
lowercase__ : List[str] = super()._prepare_for_class(a , a , return_labels=a )
if return_labels:
if model_class.__name__ == "YolosForObjectDetection":
lowercase__ : Optional[Any] = []
for i in range(self.model_tester.batch_size ):
lowercase__ : Dict = {}
lowercase__ : Dict = torch.ones(
size=(self.model_tester.n_targets,) , device=a , dtype=torch.long )
lowercase__ : Optional[Any] = torch.ones(
self.model_tester.n_targets , 4 , device=a , dtype=torch.float )
labels.append(a )
lowercase__ : Union[str, Any] = labels
return inputs_dict
def _UpperCAmelCase ( self ) -> Union[str, Any]:
lowercase__ : Dict = YolosModelTester(self )
lowercase__ : Optional[int] = ConfigTester(self , config_class=a , has_text_modality=a , hidden_size=3_7 )
def _UpperCAmelCase ( self ) -> str:
self.config_tester.run_common_tests()
def _UpperCAmelCase ( self ) -> Optional[Any]:
# YOLOS does not use inputs_embeds
pass
def _UpperCAmelCase ( self ) -> Optional[Any]:
lowercase__ , lowercase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : List[str] = model_class(a )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowercase__ : List[str] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(a , nn.Linear ) )
def _UpperCAmelCase ( self ) -> Optional[Any]:
lowercase__ , lowercase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : int = model_class(a )
lowercase__ : Union[str, Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ : Tuple = [*signature.parameters.keys()]
lowercase__ : List[Any] = ['pixel_values']
self.assertListEqual(arg_names[:1] , a )
def _UpperCAmelCase ( self ) -> Tuple:
lowercase__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a )
def _UpperCAmelCase ( self ) -> Dict:
lowercase__ , lowercase__ : int = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : Dict = True
# in YOLOS, the seq_len is different
lowercase__ : Tuple = self.model_tester.expected_seq_len
for model_class in self.all_model_classes:
lowercase__ : Optional[int] = True
lowercase__ : str = False
lowercase__ : str = True
lowercase__ : List[str] = model_class(a )
model.to(a )
model.eval()
with torch.no_grad():
lowercase__ : Any = model(**self._prepare_for_class(a , a ) )
lowercase__ : str = outputs.attentions
self.assertEqual(len(a ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowercase__ : Optional[int] = True
lowercase__ : List[Any] = model_class(a )
model.to(a )
model.eval()
with torch.no_grad():
lowercase__ : Union[str, Any] = model(**self._prepare_for_class(a , a ) )
lowercase__ : List[str] = outputs.attentions
self.assertEqual(len(a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
lowercase__ : Dict = len(a )
# Check attention is always last and order is fine
lowercase__ : Any = True
lowercase__ : int = True
lowercase__ : int = model_class(a )
model.to(a )
model.eval()
with torch.no_grad():
lowercase__ : Any = model(**self._prepare_for_class(a , a ) )
lowercase__ : Optional[Any] = 1
self.assertEqual(out_len + added_hidden_states , len(a ) )
lowercase__ : Tuple = outputs.attentions
self.assertEqual(len(a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def _UpperCAmelCase ( self ) -> List[str]:
def check_hidden_states_output(a , a , a ):
lowercase__ : str = model_class(a )
model.to(a )
model.eval()
with torch.no_grad():
lowercase__ : int = model(**self._prepare_for_class(a , a ) )
lowercase__ : int = outputs.hidden_states
lowercase__ : Any = getattr(
self.model_tester , 'expected_num_hidden_layers' , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(a ) , a )
# YOLOS has a different seq_length
lowercase__ : Optional[int] = self.model_tester.expected_seq_len
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
lowercase__ , lowercase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : Any = True
check_hidden_states_output(a , a , a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase__ : List[Any] = True
check_hidden_states_output(a , a , a )
def _UpperCAmelCase ( self ) -> List[Any]:
lowercase__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_object_detection(*a )
@slow
def _UpperCAmelCase ( self ) -> Union[str, Any]:
for model_name in YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : int = YolosModel.from_pretrained(a )
self.assertIsNotNone(a )
def a_ ( ):
'''simple docstring'''
lowercase__ : Optional[int] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class UpperCAmelCase_ ( unittest.TestCase):
@cached_property
def _UpperCAmelCase ( self ) -> Union[str, Any]:
return AutoImageProcessor.from_pretrained('hustvl/yolos-small' ) if is_vision_available() else None
@slow
def _UpperCAmelCase ( self ) -> int:
lowercase__ : Dict = YolosForObjectDetection.from_pretrained('hustvl/yolos-small' ).to(a )
lowercase__ : Tuple = self.default_image_processor
lowercase__ : Optional[int] = prepare_img()
lowercase__ : int = image_processor(images=a , return_tensors='pt' ).to(a )
# forward pass
with torch.no_grad():
lowercase__ : int = model(inputs.pixel_values )
# verify outputs
lowercase__ : Tuple = torch.Size((1, 1_0_0, 9_2) )
self.assertEqual(outputs.logits.shape , a )
lowercase__ : Any = torch.tensor(
[[-24.0_248, -10.3_024, -14.8_290], [-42.0_392, -16.8_200, -27.4_334], [-27.2_743, -11.8_154, -18.7_148]] , device=a , )
lowercase__ : List[str] = torch.tensor(
[[0.2_559, 0.5_455, 0.4_706], [0.2_989, 0.7_279, 0.1_875], [0.7_732, 0.4_017, 0.4_462]] , device=a )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , a , atol=1e-4 ) )
self.assertTrue(torch.allclose(outputs.pred_boxes[0, :3, :3] , a , atol=1e-4 ) )
# verify postprocessing
lowercase__ : Optional[Any] = image_processor.post_process_object_detection(
a , threshold=0.3 , target_sizes=[image.size[::-1]] )[0]
lowercase__ : str = torch.tensor([0.9_994, 0.9_790, 0.9_964, 0.9_972, 0.9_861] ).to(a )
lowercase__ : Any = [7_5, 7_5, 1_7, 6_3, 1_7]
lowercase__ : Optional[int] = torch.tensor([335.0_609, 79.3_848, 375.4_216, 187.2_495] ).to(a )
self.assertEqual(len(results['scores'] ) , 5 )
self.assertTrue(torch.allclose(results['scores'] , a , atol=1e-4 ) )
self.assertSequenceEqual(results['labels'].tolist() , a )
self.assertTrue(torch.allclose(results['boxes'][0, :] , a ) )
| 645
| 1
|
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class lowerCAmelCase ( unittest.TestCase):
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=7 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=18 , __SCREAMING_SNAKE_CASE=30 , __SCREAMING_SNAKE_CASE=400 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , __SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , ) -> Any:
'''simple docstring'''
__snake_case = size if size is not None else {'''shortest_edge''': 18}
__snake_case = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
__snake_case = parent
__snake_case = batch_size
__snake_case = num_channels
__snake_case = image_size
__snake_case = min_resolution
__snake_case = max_resolution
__snake_case = do_resize
__snake_case = size
__snake_case = do_center_crop
__snake_case = crop_size
__snake_case = do_normalize
__snake_case = image_mean
__snake_case = image_std
def lowerCAmelCase ( self ) -> Dict:
'''simple docstring'''
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"do_center_crop": self.do_center_crop,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class lowerCAmelCase ( __lowerCAmelCase , unittest.TestCase):
__lowercase : List[Any] = LevitImageProcessor if is_vision_available() else None
def lowerCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
__snake_case = LevitImageProcessingTester(self )
@property
def lowerCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
__snake_case = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''image_mean''' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''image_std''' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''do_normalize''' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''do_resize''' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''do_center_crop''' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''size''' ) )
def lowerCAmelCase ( self ) -> Any:
'''simple docstring'''
__snake_case = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 18} )
self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} )
__snake_case = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42} )
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} )
def lowerCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
pass
def lowerCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
__snake_case = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , Image.Image )
# Test not batched input
__snake_case = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
__snake_case = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def lowerCAmelCase ( self ) -> List[str]:
'''simple docstring'''
__snake_case = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE , numpify=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , np.ndarray )
# Test not batched input
__snake_case = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
__snake_case = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def lowerCAmelCase ( self ) -> Tuple:
'''simple docstring'''
__snake_case = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE , torchify=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , torch.Tensor )
# Test not batched input
__snake_case = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
__snake_case = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
| 24
|
_SCREAMING_SNAKE_CASE = [
9_9_9,
8_0_0,
7_9_9,
6_0_0,
5_9_9,
5_0_0,
4_0_0,
3_9_9,
3_7_7,
3_5_5,
3_3_3,
3_1_1,
2_8_8,
2_6_6,
2_4_4,
2_2_2,
2_0_0,
1_9_9,
1_7_7,
1_5_5,
1_3_3,
1_1_1,
8_8,
6_6,
4_4,
2_2,
0,
]
_SCREAMING_SNAKE_CASE = [
9_9_9,
9_7_6,
9_5_2,
9_2_8,
9_0_5,
8_8_2,
8_5_8,
8_5_7,
8_1_0,
7_6_2,
7_1_5,
7_1_4,
5_7_2,
4_2_9,
4_2_8,
2_8_6,
2_8_5,
2_3_8,
1_9_0,
1_4_3,
1_4_2,
1_1_8,
9_5,
7_1,
4_7,
2_4,
0,
]
_SCREAMING_SNAKE_CASE = [
9_9_9,
9_8_8,
9_7_7,
9_6_6,
9_5_5,
9_4_4,
9_3_3,
9_2_2,
9_1_1,
9_0_0,
8_9_9,
8_7_9,
8_5_9,
8_4_0,
8_2_0,
8_0_0,
7_9_9,
7_6_6,
7_3_3,
7_0_0,
6_9_9,
6_5_0,
6_0_0,
5_9_9,
5_0_0,
4_9_9,
4_0_0,
3_9_9,
3_5_0,
3_0_0,
2_9_9,
2_6_6,
2_3_3,
2_0_0,
1_9_9,
1_7_9,
1_5_9,
1_4_0,
1_2_0,
1_0_0,
9_9,
8_8,
7_7,
6_6,
5_5,
4_4,
3_3,
2_2,
1_1,
0,
]
_SCREAMING_SNAKE_CASE = [
9_9_9,
9_9_5,
9_9_2,
9_8_9,
9_8_5,
9_8_1,
9_7_8,
9_7_5,
9_7_1,
9_6_7,
9_6_4,
9_6_1,
9_5_7,
9_5_6,
9_5_1,
9_4_7,
9_4_2,
9_3_7,
9_3_3,
9_2_8,
9_2_3,
9_1_9,
9_1_4,
9_1_3,
9_0_8,
9_0_3,
8_9_7,
8_9_2,
8_8_7,
8_8_1,
8_7_6,
8_7_1,
8_7_0,
8_6_4,
8_5_8,
8_5_2,
8_4_6,
8_4_0,
8_3_4,
8_2_8,
8_2_7,
8_2_0,
8_1_3,
8_0_6,
7_9_9,
7_9_2,
7_8_5,
7_8_4,
7_7_7,
7_7_0,
7_6_3,
7_5_6,
7_4_9,
7_4_2,
7_4_1,
7_3_3,
7_2_4,
7_1_6,
7_0_7,
6_9_9,
6_9_8,
6_8_8,
6_7_7,
6_6_6,
6_5_6,
6_5_5,
6_4_5,
6_3_4,
6_2_3,
6_1_3,
6_1_2,
5_9_8,
5_8_4,
5_7_0,
5_6_9,
5_5_5,
5_4_1,
5_2_7,
5_2_6,
5_0_5,
4_8_4,
4_8_3,
4_6_2,
4_4_0,
4_3_9,
3_9_6,
3_9_5,
3_5_2,
3_5_1,
3_0_8,
3_0_7,
2_6_4,
2_6_3,
2_2_0,
2_1_9,
1_7_6,
1_3_2,
8_8,
4_4,
0,
]
_SCREAMING_SNAKE_CASE = [
9_9_9,
9_9_7,
9_9_5,
9_9_2,
9_9_0,
9_8_8,
9_8_6,
9_8_4,
9_8_1,
9_7_9,
9_7_7,
9_7_5,
9_7_2,
9_7_0,
9_6_8,
9_6_6,
9_6_4,
9_6_1,
9_5_9,
9_5_7,
9_5_6,
9_5_4,
9_5_1,
9_4_9,
9_4_6,
9_4_4,
9_4_1,
9_3_9,
9_3_6,
9_3_4,
9_3_1,
9_2_9,
9_2_6,
9_2_4,
9_2_1,
9_1_9,
9_1_6,
9_1_4,
9_1_3,
9_1_0,
9_0_7,
9_0_5,
9_0_2,
8_9_9,
8_9_6,
8_9_3,
8_9_1,
8_8_8,
8_8_5,
8_8_2,
8_7_9,
8_7_7,
8_7_4,
8_7_1,
8_7_0,
8_6_7,
8_6_4,
8_6_1,
8_5_8,
8_5_5,
8_5_2,
8_4_9,
8_4_6,
8_4_3,
8_4_0,
8_3_7,
8_3_4,
8_3_1,
8_2_8,
8_2_7,
8_2_4,
8_2_1,
8_1_7,
8_1_4,
8_1_1,
8_0_8,
8_0_4,
8_0_1,
7_9_8,
7_9_5,
7_9_1,
7_8_8,
7_8_5,
7_8_4,
7_8_0,
7_7_7,
7_7_4,
7_7_0,
7_6_6,
7_6_3,
7_6_0,
7_5_6,
7_5_2,
7_4_9,
7_4_6,
7_4_2,
7_4_1,
7_3_7,
7_3_3,
7_3_0,
7_2_6,
7_2_2,
7_1_8,
7_1_4,
7_1_0,
7_0_7,
7_0_3,
6_9_9,
6_9_8,
6_9_4,
6_9_0,
6_8_5,
6_8_1,
6_7_7,
6_7_3,
6_6_9,
6_6_4,
6_6_0,
6_5_6,
6_5_5,
6_5_0,
6_4_6,
6_4_1,
6_3_6,
6_3_2,
6_2_7,
6_2_2,
6_1_8,
6_1_3,
6_1_2,
6_0_7,
6_0_2,
5_9_6,
5_9_1,
5_8_6,
5_8_0,
5_7_5,
5_7_0,
5_6_9,
5_6_3,
5_5_7,
5_5_1,
5_4_5,
5_3_9,
5_3_3,
5_2_7,
5_2_6,
5_1_9,
5_1_2,
5_0_5,
4_9_8,
4_9_1,
4_8_4,
4_8_3,
4_7_4,
4_6_6,
4_5_7,
4_4_9,
4_4_0,
4_3_9,
4_2_8,
4_1_8,
4_0_7,
3_9_6,
3_9_5,
3_8_1,
3_6_6,
3_5_2,
3_5_1,
3_3_0,
3_0_8,
3_0_7,
2_8_6,
2_6_4,
2_6_3,
2_4_2,
2_2_0,
2_1_9,
1_7_6,
1_7_5,
1_3_2,
1_3_1,
8_8,
4_4,
0,
]
_SCREAMING_SNAKE_CASE = [
9_9_9,
9_9_1,
9_8_2,
9_7_4,
9_6_6,
9_5_8,
9_5_0,
9_4_1,
9_3_3,
9_2_5,
9_1_6,
9_0_8,
9_0_0,
8_9_9,
8_7_4,
8_5_0,
8_2_5,
8_0_0,
7_9_9,
7_0_0,
6_0_0,
5_0_0,
4_0_0,
3_0_0,
2_0_0,
1_0_0,
0,
]
_SCREAMING_SNAKE_CASE = [
9_9_9,
9_9_2,
9_8_5,
9_7_8,
9_7_1,
9_6_4,
9_5_7,
9_4_9,
9_4_2,
9_3_5,
9_2_8,
9_2_1,
9_1_4,
9_0_7,
9_0_0,
8_9_9,
8_7_9,
8_5_9,
8_4_0,
8_2_0,
8_0_0,
7_9_9,
7_6_6,
7_3_3,
7_0_0,
6_9_9,
6_5_0,
6_0_0,
5_9_9,
5_0_0,
4_9_9,
4_0_0,
3_9_9,
3_0_0,
2_9_9,
2_0_0,
1_9_9,
1_0_0,
9_9,
0,
]
_SCREAMING_SNAKE_CASE = [
9_9_9,
9_9_6,
9_9_2,
9_8_9,
9_8_5,
9_8_2,
9_7_9,
9_7_5,
9_7_2,
9_6_8,
9_6_5,
9_6_1,
9_5_8,
9_5_5,
9_5_1,
9_4_8,
9_4_4,
9_4_1,
9_3_8,
9_3_4,
9_3_1,
9_2_7,
9_2_4,
9_2_0,
9_1_7,
9_1_4,
9_1_0,
9_0_7,
9_0_3,
9_0_0,
8_9_9,
8_9_1,
8_8_4,
8_7_6,
8_6_9,
8_6_1,
8_5_3,
8_4_6,
8_3_8,
8_3_0,
8_2_3,
8_1_5,
8_0_8,
8_0_0,
7_9_9,
7_8_8,
7_7_7,
7_6_6,
7_5_5,
7_4_4,
7_3_3,
7_2_2,
7_1_1,
7_0_0,
6_9_9,
6_8_8,
6_7_7,
6_6_6,
6_5_5,
6_4_4,
6_3_3,
6_2_2,
6_1_1,
6_0_0,
5_9_9,
5_8_5,
5_7_1,
5_5_7,
5_4_2,
5_2_8,
5_1_4,
5_0_0,
4_9_9,
4_8_5,
4_7_1,
4_5_7,
4_4_2,
4_2_8,
4_1_4,
4_0_0,
3_9_9,
3_7_9,
3_5_9,
3_4_0,
3_2_0,
3_0_0,
2_9_9,
2_7_9,
2_5_9,
2_4_0,
2_2_0,
2_0_0,
1_9_9,
1_6_6,
1_3_3,
1_0_0,
9_9,
6_6,
3_3,
0,
]
| 537
| 0
|
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
from .timesteps import (
fastaa_timesteps,
smartaa_timesteps,
smartaa_timesteps,
smartaaa_timesteps,
smartaaa_timesteps,
superaa_timesteps,
superaa_timesteps,
superaaa_timesteps,
)
@dataclass
class __lowerCAmelCase ( A ):
'''simple docstring'''
UpperCamelCase = 4_2
UpperCamelCase = 4_2
UpperCamelCase = 4_2
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_if import IFPipeline
from .pipeline_if_imgaimg import IFImgaImgPipeline
from .pipeline_if_imgaimg_superresolution import IFImgaImgSuperResolutionPipeline
from .pipeline_if_inpainting import IFInpaintingPipeline
from .pipeline_if_inpainting_superresolution import IFInpaintingSuperResolutionPipeline
from .pipeline_if_superresolution import IFSuperResolutionPipeline
from .safety_checker import IFSafetyChecker
from .watermark import IFWatermarker
| 704
|
import unittest
from knapsack import knapsack as k
class __lowerCAmelCase ( unittest.TestCase ):
def _lowerCamelCase ( self : Optional[Any]) -> Any:
"""simple docstring"""
_UpperCAmelCase = 0
_UpperCAmelCase = [0]
_UpperCAmelCase = [0]
_UpperCAmelCase = len(A)
self.assertEqual(k.knapsack(A , A , A , A) , 0)
_UpperCAmelCase = [60]
_UpperCAmelCase = [10]
_UpperCAmelCase = len(A)
self.assertEqual(k.knapsack(A , A , A , A) , 0)
def _lowerCamelCase ( self : str) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = 3
_UpperCAmelCase = [1, 2, 3]
_UpperCAmelCase = [3, 2, 1]
_UpperCAmelCase = len(A)
self.assertEqual(k.knapsack(A , A , A , A) , 5)
def _lowerCamelCase ( self : Tuple) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = 50
_UpperCAmelCase = [60, 1_00, 1_20]
_UpperCAmelCase = [10, 20, 30]
_UpperCAmelCase = len(A)
self.assertEqual(k.knapsack(A , A , A , A) , 2_20)
if __name__ == "__main__":
unittest.main()
| 639
| 0
|
'''simple docstring'''
import copy
import inspect
import unittest
from transformers import AutoBackbone
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import require_timm, require_torch, torch_device
from transformers.utils.import_utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
if is_torch_available():
import torch
from transformers import TimmBackbone, TimmBackboneConfig
from ...test_pipeline_mixin import PipelineTesterMixin
class __snake_case :
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE="resnet50" , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=3_2 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , ):
snake_case__ : List[str] = parent
snake_case__ : List[Any] = out_indices if out_indices is not None else [4]
snake_case__ : Any = stage_names
snake_case__ : Union[str, Any] = out_features
snake_case__ : List[str] = backbone
snake_case__ : Any = batch_size
snake_case__ : Union[str, Any] = image_size
snake_case__ : Any = num_channels
snake_case__ : Union[str, Any] = use_pretrained_backbone
snake_case__ : Any = is_training
def __UpperCamelCase ( self ):
snake_case__ : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case__ : Any = self.get_config()
return config, pixel_values
def __UpperCamelCase ( self ):
return TimmBackboneConfig(
image_size=self.image_size , num_channels=self.num_channels , out_features=self.out_features , out_indices=self.out_indices , stage_names=self.stage_names , use_pretrained_backbone=self.use_pretrained_backbone , backbone=self.backbone , )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : List[str] = TimmBackbone(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
snake_case__ : List[Any] = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(
result.feature_map[-1].shape , (self.batch_size, model.channels[-1], 1_4, 1_4) , )
def __UpperCamelCase ( self ):
snake_case__ : Any = self.prepare_config_and_inputs()
snake_case__ , snake_case__ : List[str] = config_and_inputs
snake_case__ : List[Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
@require_timm
class __snake_case ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = (TimmBackbone,) if is_torch_available() else ()
lowerCamelCase__ = {'''feature-extraction''': TimmBackbone} if is_torch_available() else {}
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
def __UpperCamelCase ( self ):
snake_case__ : Tuple = TimmBackboneModelTester(self )
snake_case__ : int = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , has_text_modality=__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __UpperCamelCase ( self ):
snake_case__ : Union[str, Any] = """resnet18"""
snake_case__ : Optional[int] = """microsoft/resnet-18"""
snake_case__ : List[Any] = AutoBackbone.from_pretrained(__SCREAMING_SNAKE_CASE , use_timm_backbone=__SCREAMING_SNAKE_CASE )
snake_case__ : Dict = AutoBackbone.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(len(timm_model.stage_names ) , len(transformers_model.stage_names ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
# Out indices are set to the last layer by default. For timm models, we don't know
# the number of layers in advance, so we set it to (-1,), whereas for transformers
# models, we set it to [len(stage_names) - 1] (kept for backward compatibility).
self.assertEqual(timm_model.out_indices , (-1,) )
self.assertEqual(transformers_model.out_indices , [len(timm_model.stage_names ) - 1] )
snake_case__ : int = AutoBackbone.from_pretrained(__SCREAMING_SNAKE_CASE , use_timm_backbone=__SCREAMING_SNAKE_CASE , out_indices=[1, 2, 3] )
snake_case__ : Optional[Any] = AutoBackbone.from_pretrained(__SCREAMING_SNAKE_CASE , out_indices=[1, 2, 3] )
self.assertEqual(timm_model.out_indices , transformers_model.out_indices )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
@unittest.skip("""TimmBackbone doesn't support feed forward chunking""" )
def __UpperCamelCase ( self ):
pass
@unittest.skip("""TimmBackbone doesn't have num_hidden_layers attribute""" )
def __UpperCamelCase ( self ):
pass
@unittest.skip("""TimmBackbone initialization is managed on the timm side""" )
def __UpperCamelCase ( self ):
pass
@unittest.skip("""TimmBackbone models doesn't have inputs_embeds""" )
def __UpperCamelCase ( self ):
pass
@unittest.skip("""TimmBackbone models doesn't have inputs_embeds""" )
def __UpperCamelCase ( self ):
pass
@unittest.skip("""TimmBackbone model cannot be created without specifying a backbone checkpoint""" )
def __UpperCamelCase ( self ):
pass
@unittest.skip("""Only checkpoints on timm can be loaded into TimmBackbone""" )
def __UpperCamelCase ( self ):
pass
@unittest.skip("""model weights aren't tied in TimmBackbone.""" )
def __UpperCamelCase ( self ):
pass
@unittest.skip("""model weights aren't tied in TimmBackbone.""" )
def __UpperCamelCase ( self ):
pass
@unittest.skip("""Only checkpoints on timm can be loaded into TimmBackbone""" )
def __UpperCamelCase ( self ):
pass
@unittest.skip("""Only checkpoints on timm can be loaded into TimmBackbone""" )
def __UpperCamelCase ( self ):
pass
@unittest.skip("""TimmBackbone doesn't have hidden size info in its configuration.""" )
def __UpperCamelCase ( self ):
pass
@unittest.skip("""TimmBackbone doesn't support output_attentions.""" )
def __UpperCamelCase ( self ):
pass
@unittest.skip("""Safetensors is not supported by timm.""" )
def __UpperCamelCase ( self ):
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def __UpperCamelCase ( self ):
pass
def __UpperCamelCase ( self ):
snake_case__ , snake_case__ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ : Union[str, Any] = model_class(__SCREAMING_SNAKE_CASE )
snake_case__ : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case__ : Tuple = [*signature.parameters.keys()]
snake_case__ : str = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
snake_case__ , snake_case__ : str = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ : str = True
snake_case__ : List[str] = self.has_attentions
# no need to test all models as different heads yield the same functionality
snake_case__ : Optional[int] = self.all_model_classes[0]
snake_case__ : Optional[int] = model_class(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
snake_case__ : List[str] = self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
snake_case__ : int = model(**__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[int] = outputs[0][-1]
# Encoder-/Decoder-only models
snake_case__ : int = outputs.hidden_states[0]
hidden_states.retain_grad()
if self.has_attentions:
snake_case__ : Optional[Any] = outputs.attentions[0]
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(hidden_states.grad )
if self.has_attentions:
self.assertIsNotNone(attentions.grad )
def __UpperCamelCase ( self ):
snake_case__ , snake_case__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ : Optional[int] = model_class(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
snake_case__ : List[Any] = model(**__SCREAMING_SNAKE_CASE )
self.assertEqual(len(result.feature_maps ) , len(config.out_indices ) )
self.assertEqual(len(model.channels ) , len(config.out_indices ) )
# Check output of last stage is taken if out_features=None, out_indices=None
snake_case__ : Any = copy.deepcopy(__SCREAMING_SNAKE_CASE )
snake_case__ : str = None
snake_case__ : List[Any] = model_class(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
snake_case__ : Any = model(**__SCREAMING_SNAKE_CASE )
self.assertEqual(len(result.feature_maps ) , 1 )
self.assertEqual(len(model.channels ) , 1 )
# Check backbone can be initialized with fresh weights
snake_case__ : Union[str, Any] = copy.deepcopy(__SCREAMING_SNAKE_CASE )
snake_case__ : Tuple = False
snake_case__ : List[str] = model_class(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
snake_case__ : Union[str, Any] = model(**__SCREAMING_SNAKE_CASE )
| 38
|
def __lowerCAmelCase ( __magic_name__ = 5_0 ):
_lowercase: Union[str, Any] = [1] * (length + 1)
for row_length in range(3 , length + 1 ):
for block_length in range(3 , row_length + 1 ):
for block_start in range(row_length - block_length ):
ways_number[row_length] += ways_number[
row_length - block_start - block_length - 1
]
ways_number[row_length] += 1
return ways_number[length]
if __name__ == "__main__":
print(f'''{solution() = }''')
| 226
| 0
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor
from .base import PipelineTool
class _lowerCAmelCase ( _lowercase ):
A__ = 'openai/whisper-base'
A__ = (
'This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the '
'transcribed text.'
)
A__ = 'transcriber'
A__ = WhisperProcessor
A__ = WhisperForConditionalGeneration
A__ = ['audio']
A__ = ['text']
def __magic_name__( self , __UpperCAmelCase ):
return self.pre_processor(__UpperCAmelCase , return_tensors='''pt''' ).input_features
def __magic_name__( self , __UpperCAmelCase ):
return self.model.generate(inputs=__UpperCAmelCase )
def __magic_name__( self , __UpperCAmelCase ):
return self.pre_processor.batch_decode(__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase )[0]
| 470
|
import numpy as np
class _lowerCAmelCase :
def __init__( self ):
lowerCAmelCase__ : List[Any] = (0, 0)
lowerCAmelCase__ : Optional[int] = None
lowerCAmelCase__ : Optional[Any] = 0
lowerCAmelCase__ : Optional[int] = 0
lowerCAmelCase__ : Tuple = 0
def __eq__( self , __UpperCAmelCase ):
return self.position == cell.position
def __magic_name__( self ):
print(self.position )
class _lowerCAmelCase :
def __init__( self , __UpperCAmelCase=(5, 5) ):
lowerCAmelCase__ : List[str] = np.zeros(__UpperCAmelCase )
lowerCAmelCase__ : List[Any] = world_size[0]
lowerCAmelCase__ : str = world_size[1]
def __magic_name__( self ):
print(self.w )
def __magic_name__( self , __UpperCAmelCase ):
lowerCAmelCase__ : Optional[Any] = [
(-1, -1),
(-1, 0),
(-1, 1),
(0, -1),
(0, 1),
(1, -1),
(1, 0),
(1, 1),
]
lowerCAmelCase__ : Tuple = cell.position[0]
lowerCAmelCase__ : Optional[int] = cell.position[1]
lowerCAmelCase__ : Any = []
for n in neughbour_cord:
lowerCAmelCase__ : int = current_x + n[0]
lowerCAmelCase__ : Dict = current_y + n[1]
if 0 <= x < self.world_x_limit and 0 <= y < self.world_y_limit:
lowerCAmelCase__ : Optional[int] = Cell()
lowerCAmelCase__ : Optional[int] = (x, y)
lowerCAmelCase__ : Any = cell
neighbours.append(__UpperCAmelCase )
return neighbours
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Union[str, Any]:
lowerCAmelCase__ : int = []
lowerCAmelCase__ : int = []
_open.append(UpperCamelCase )
while _open:
lowerCAmelCase__ : Dict = np.argmin([n.f for n in _open] )
lowerCAmelCase__ : Dict = _open[min_f]
_closed.append(_open.pop(UpperCamelCase ) )
if current == goal:
break
for n in world.get_neigbours(UpperCamelCase ):
for c in _closed:
if c == n:
continue
lowerCAmelCase__ : Any = current.g + 1
lowerCAmelCase__ , lowerCAmelCase__ : int = n.position
lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = goal.position
lowerCAmelCase__ : Tuple = (ya - ya) ** 2 + (xa - xa) ** 2
lowerCAmelCase__ : Optional[Any] = n.h + n.g
for c in _open:
if c == n and c.f < n.f:
continue
_open.append(UpperCamelCase )
lowerCAmelCase__ : List[Any] = []
while current.parent is not None:
path.append(current.position )
lowerCAmelCase__ : List[Any] = current.parent
path.append(current.position )
return path[::-1]
if __name__ == "__main__":
lowerCAmelCase_ = Gridworld()
# Start position and goal
lowerCAmelCase_ = Cell()
lowerCAmelCase_ = (0, 0)
lowerCAmelCase_ = Cell()
lowerCAmelCase_ = (4, 4)
print(F"""path from {start.position} to {goal.position}""")
lowerCAmelCase_ = astar(world, start, goal)
# Just for visual reasons.
for i in s:
lowerCAmelCase_ = 1
print(world.w)
| 470
| 1
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
__a: str = logging.get_logger(__name__)
if is_vision_available():
import PIL
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
_lowerCamelCase = ['''pixel_values''']
def __init__( self : Optional[int] , lowerCamelCase : bool = True , lowerCamelCase : Dict[str, int] = None , lowerCamelCase : PILImageResampling = PILImageResampling.BICUBIC , lowerCamelCase : bool = True , lowerCamelCase : Dict[str, int] = None , lowerCamelCase : bool = True , lowerCamelCase : Union[int, float] = 1 / 255 , lowerCamelCase : bool = True , lowerCamelCase : Optional[Union[float, List[float]]] = None , lowerCamelCase : Optional[Union[float, List[float]]] = None , lowerCamelCase : bool = True , **lowerCamelCase : Optional[int] , ) -> None:
"""simple docstring"""
super().__init__(**lowerCamelCase )
_UpperCAmelCase = size if size is not None else {"""shortest_edge""": 224}
_UpperCAmelCase = get_size_dict(lowerCamelCase , default_to_square=lowerCamelCase )
_UpperCAmelCase = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
_UpperCAmelCase = get_size_dict(lowerCamelCase , default_to_square=lowerCamelCase , param_name="""crop_size""" )
_UpperCAmelCase = do_resize
_UpperCAmelCase = size
_UpperCAmelCase = resample
_UpperCAmelCase = do_center_crop
_UpperCAmelCase = crop_size
_UpperCAmelCase = do_rescale
_UpperCAmelCase = rescale_factor
_UpperCAmelCase = do_normalize
_UpperCAmelCase = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
_UpperCAmelCase = image_std if image_std is not None else OPENAI_CLIP_STD
_UpperCAmelCase = do_convert_rgb
def lowerCamelCase ( self : List[str] , lowerCamelCase : np.ndarray , lowerCamelCase : Dict[str, int] , lowerCamelCase : PILImageResampling = PILImageResampling.BICUBIC , lowerCamelCase : Optional[Union[str, ChannelDimension]] = None , **lowerCamelCase : Dict , ) -> np.ndarray:
"""simple docstring"""
_UpperCAmelCase = get_size_dict(lowerCamelCase , default_to_square=lowerCamelCase )
if "shortest_edge" not in size:
raise ValueError(f"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" )
_UpperCAmelCase = get_resize_output_image_size(lowerCamelCase , size=size["""shortest_edge"""] , default_to_square=lowerCamelCase )
return resize(lowerCamelCase , size=lowerCamelCase , resample=lowerCamelCase , data_format=lowerCamelCase , **lowerCamelCase )
def lowerCamelCase ( self : Tuple , lowerCamelCase : np.ndarray , lowerCamelCase : Dict[str, int] , lowerCamelCase : Optional[Union[str, ChannelDimension]] = None , **lowerCamelCase : Dict , ) -> np.ndarray:
"""simple docstring"""
_UpperCAmelCase = get_size_dict(lowerCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(f"""The `size` parameter must contain the keys (height, width). Got {size.keys()}""" )
return center_crop(lowerCamelCase , size=(size["""height"""], size["""width"""]) , data_format=lowerCamelCase , **lowerCamelCase )
def lowerCamelCase ( self : Optional[int] , lowerCamelCase : np.ndarray , lowerCamelCase : Union[int, float] , lowerCamelCase : Optional[Union[str, ChannelDimension]] = None , **lowerCamelCase : Dict , ) -> Optional[int]:
"""simple docstring"""
return rescale(lowerCamelCase , scale=lowerCamelCase , data_format=lowerCamelCase , **lowerCamelCase )
def lowerCamelCase ( self : List[Any] , lowerCamelCase : np.ndarray , lowerCamelCase : Union[float, List[float]] , lowerCamelCase : Union[float, List[float]] , lowerCamelCase : Optional[Union[str, ChannelDimension]] = None , **lowerCamelCase : Any , ) -> np.ndarray:
"""simple docstring"""
return normalize(lowerCamelCase , mean=lowerCamelCase , std=lowerCamelCase , data_format=lowerCamelCase , **lowerCamelCase )
def lowerCamelCase ( self : List[str] , lowerCamelCase : ImageInput , lowerCamelCase : bool = None , lowerCamelCase : Dict[str, int] = None , lowerCamelCase : PILImageResampling = None , lowerCamelCase : bool = None , lowerCamelCase : int = None , lowerCamelCase : bool = None , lowerCamelCase : float = None , lowerCamelCase : bool = None , lowerCamelCase : Optional[Union[float, List[float]]] = None , lowerCamelCase : Optional[Union[float, List[float]]] = None , lowerCamelCase : bool = None , lowerCamelCase : Optional[Union[str, TensorType]] = None , lowerCamelCase : Optional[ChannelDimension] = ChannelDimension.FIRST , **lowerCamelCase : Any , ) -> PIL.Image.Image:
"""simple docstring"""
_UpperCAmelCase = do_resize if do_resize is not None else self.do_resize
_UpperCAmelCase = size if size is not None else self.size
_UpperCAmelCase = get_size_dict(lowerCamelCase , param_name="""size""" , default_to_square=lowerCamelCase )
_UpperCAmelCase = resample if resample is not None else self.resample
_UpperCAmelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
_UpperCAmelCase = crop_size if crop_size is not None else self.crop_size
_UpperCAmelCase = get_size_dict(lowerCamelCase , param_name="""crop_size""" , default_to_square=lowerCamelCase )
_UpperCAmelCase = do_rescale if do_rescale is not None else self.do_rescale
_UpperCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
_UpperCAmelCase = do_normalize if do_normalize is not None else self.do_normalize
_UpperCAmelCase = image_mean if image_mean is not None else self.image_mean
_UpperCAmelCase = image_std if image_std is not None else self.image_std
_UpperCAmelCase = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
_UpperCAmelCase = make_list_of_images(lowerCamelCase )
if not valid_images(lowerCamelCase ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
_UpperCAmelCase = [convert_to_rgb(lowerCamelCase ) for image in images]
# All transformations expect numpy arrays.
_UpperCAmelCase = [to_numpy_array(lowerCamelCase ) for image in images]
if do_resize:
_UpperCAmelCase = [self.resize(image=lowerCamelCase , size=lowerCamelCase , resample=lowerCamelCase ) for image in images]
if do_center_crop:
_UpperCAmelCase = [self.center_crop(image=lowerCamelCase , size=lowerCamelCase ) for image in images]
if do_rescale:
_UpperCAmelCase = [self.rescale(image=lowerCamelCase , scale=lowerCamelCase ) for image in images]
if do_normalize:
_UpperCAmelCase = [self.normalize(image=lowerCamelCase , mean=lowerCamelCase , std=lowerCamelCase ) for image in images]
_UpperCAmelCase = [to_channel_dimension_format(lowerCamelCase , lowerCamelCase ) for image in images]
_UpperCAmelCase = {"""pixel_values""": images}
return BatchFeature(data=lowerCamelCase , tensor_type=lowerCamelCase )
| 108
|
"""simple docstring"""
import re
from filelock import FileLock
try:
import nltk
SCREAMING_SNAKE_CASE_ = True
except (ImportError, ModuleNotFoundError):
SCREAMING_SNAKE_CASE_ = False
if NLTK_AVAILABLE:
with FileLock('''.lock''') as lock:
nltk.download('''punkt''', quiet=True)
def A__ ( A__ ) -> str:
'''simple docstring'''
re.sub("<n>" , "" , A__ ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(A__ ) )
| 426
| 0
|
'''simple docstring'''
import tempfile
import unittest
import numpy as np
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_flax
if is_flax_available():
import os
from flax.core.frozen_dict import unfreeze
from flax.traverse_util import flatten_dict
from transformers import FlaxBertModel
a_ : str = '0.12' # assumed parallelism: 8
@require_flax
@is_staging_test
class _snake_case ( unittest.TestCase ):
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls) -> int:
SCREAMING_SNAKE_CASE = TOKEN
HfFolder.save_token(a)
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls) -> List[Any]:
try:
delete_repo(token=cls._token , repo_id='test-model-flax')
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='valid_org/test-model-flax-org')
except HTTPError:
pass
def SCREAMING_SNAKE_CASE__ ( self) -> Dict:
SCREAMING_SNAKE_CASE = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37)
SCREAMING_SNAKE_CASE = FlaxBertModel(a)
model.push_to_hub('test-model-flax' , use_auth_token=self._token)
SCREAMING_SNAKE_CASE = FlaxBertModel.from_pretrained(f'''{USER}/test-model-flax''')
SCREAMING_SNAKE_CASE = flatten_dict(unfreeze(model.params))
SCREAMING_SNAKE_CASE = flatten_dict(unfreeze(new_model.params))
for key in base_params.keys():
SCREAMING_SNAKE_CASE = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(a , 1E-3 , msg=f'''{key} not identical''')
# Reset repo
delete_repo(token=self._token , repo_id='test-model-flax')
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(a , repo_id='test-model-flax' , push_to_hub=a , use_auth_token=self._token)
SCREAMING_SNAKE_CASE = FlaxBertModel.from_pretrained(f'''{USER}/test-model-flax''')
SCREAMING_SNAKE_CASE = flatten_dict(unfreeze(model.params))
SCREAMING_SNAKE_CASE = flatten_dict(unfreeze(new_model.params))
for key in base_params.keys():
SCREAMING_SNAKE_CASE = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(a , 1E-3 , msg=f'''{key} not identical''')
def SCREAMING_SNAKE_CASE__ ( self) -> str:
SCREAMING_SNAKE_CASE = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37)
SCREAMING_SNAKE_CASE = FlaxBertModel(a)
model.push_to_hub('valid_org/test-model-flax-org' , use_auth_token=self._token)
SCREAMING_SNAKE_CASE = FlaxBertModel.from_pretrained('valid_org/test-model-flax-org')
SCREAMING_SNAKE_CASE = flatten_dict(unfreeze(model.params))
SCREAMING_SNAKE_CASE = flatten_dict(unfreeze(new_model.params))
for key in base_params.keys():
SCREAMING_SNAKE_CASE = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(a , 1E-3 , msg=f'''{key} not identical''')
# Reset repo
delete_repo(token=self._token , repo_id='valid_org/test-model-flax-org')
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(
a , repo_id='valid_org/test-model-flax-org' , push_to_hub=a , use_auth_token=self._token)
SCREAMING_SNAKE_CASE = FlaxBertModel.from_pretrained('valid_org/test-model-flax-org')
SCREAMING_SNAKE_CASE = flatten_dict(unfreeze(model.params))
SCREAMING_SNAKE_CASE = flatten_dict(unfreeze(new_model.params))
for key in base_params.keys():
SCREAMING_SNAKE_CASE = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(a , 1E-3 , msg=f'''{key} not identical''')
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase):
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = flatten_dict(modela.params)
SCREAMING_SNAKE_CASE = flatten_dict(modela.params)
for key in flat_params_a.keys():
if np.sum(np.abs(flat_params_a[key] - flat_params_a[key])) > 1e-4:
SCREAMING_SNAKE_CASE = False
return models_are_equal
@require_flax
class _snake_case ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self) -> List[Any]:
SCREAMING_SNAKE_CASE = BertConfig.from_pretrained('hf-internal-testing/tiny-bert-flax-only')
SCREAMING_SNAKE_CASE = FlaxBertModel(a)
SCREAMING_SNAKE_CASE = 'bert'
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(a , a))
with self.assertRaises(a):
SCREAMING_SNAKE_CASE = FlaxBertModel.from_pretrained(a)
SCREAMING_SNAKE_CASE = FlaxBertModel.from_pretrained(a , subfolder=a)
self.assertTrue(check_models_equal(a , a))
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[int]:
SCREAMING_SNAKE_CASE = BertConfig.from_pretrained('hf-internal-testing/tiny-bert-flax-only')
SCREAMING_SNAKE_CASE = FlaxBertModel(a)
SCREAMING_SNAKE_CASE = 'bert'
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(a , a) , max_shard_size='10KB')
with self.assertRaises(a):
SCREAMING_SNAKE_CASE = FlaxBertModel.from_pretrained(a)
SCREAMING_SNAKE_CASE = FlaxBertModel.from_pretrained(a , subfolder=a)
self.assertTrue(check_models_equal(a , a))
def SCREAMING_SNAKE_CASE__ ( self) -> Union[str, Any]:
SCREAMING_SNAKE_CASE = 'bert'
SCREAMING_SNAKE_CASE = 'hf-internal-testing/tiny-random-bert-subfolder'
with self.assertRaises(a):
SCREAMING_SNAKE_CASE = FlaxBertModel.from_pretrained(a)
SCREAMING_SNAKE_CASE = FlaxBertModel.from_pretrained(a , subfolder=a)
self.assertIsNotNone(a)
def SCREAMING_SNAKE_CASE__ ( self) -> List[Any]:
SCREAMING_SNAKE_CASE = 'bert'
SCREAMING_SNAKE_CASE = 'hf-internal-testing/tiny-random-bert-sharded-subfolder'
with self.assertRaises(a):
SCREAMING_SNAKE_CASE = FlaxBertModel.from_pretrained(a)
SCREAMING_SNAKE_CASE = FlaxBertModel.from_pretrained(a , subfolder=a)
self.assertIsNotNone(a)
| 702
|
from __future__ import annotations
import unittest
from transformers import EsmConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers.models.esm.modeling_tf_esm import (
TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
TFEsmModel,
)
class _snake_case :
def __init__( self , a , ) -> Tuple:
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = 13
SCREAMING_SNAKE_CASE = 7
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = 99
SCREAMING_SNAKE_CASE = 32
SCREAMING_SNAKE_CASE = 2
SCREAMING_SNAKE_CASE = 4
SCREAMING_SNAKE_CASE = 37
SCREAMING_SNAKE_CASE = 'gelu'
SCREAMING_SNAKE_CASE = 0.1
SCREAMING_SNAKE_CASE = 0.1
SCREAMING_SNAKE_CASE = 512
SCREAMING_SNAKE_CASE = 16
SCREAMING_SNAKE_CASE = 2
SCREAMING_SNAKE_CASE = 0.02
SCREAMING_SNAKE_CASE = 3
SCREAMING_SNAKE_CASE = 4
SCREAMING_SNAKE_CASE = None
def SCREAMING_SNAKE_CASE__ ( self) -> int:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
SCREAMING_SNAKE_CASE = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length])
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
if self.use_labels:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size)
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_choices)
SCREAMING_SNAKE_CASE = EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , pad_token_id=1 , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE__ ( self) -> Tuple:
(
(
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) ,
) = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.seq_length, self.hidden_size])
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2)
return (
config,
input_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def SCREAMING_SNAKE_CASE__ ( self , a , a , a , a , a , a) -> List[str]:
SCREAMING_SNAKE_CASE = TFEsmModel(config=a)
SCREAMING_SNAKE_CASE = {'input_ids': input_ids, 'attention_mask': input_mask}
SCREAMING_SNAKE_CASE = model(a)
SCREAMING_SNAKE_CASE = [input_ids, input_mask]
SCREAMING_SNAKE_CASE = model(a)
SCREAMING_SNAKE_CASE = model(a)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def SCREAMING_SNAKE_CASE__ ( self , a , a , a , a , a , a , a , a , ) -> str:
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = TFEsmModel(config=a)
SCREAMING_SNAKE_CASE = {
'input_ids': input_ids,
'attention_mask': input_mask,
'encoder_hidden_states': encoder_hidden_states,
'encoder_attention_mask': encoder_attention_mask,
}
SCREAMING_SNAKE_CASE = model(a)
SCREAMING_SNAKE_CASE = [input_ids, input_mask]
SCREAMING_SNAKE_CASE = model(a , encoder_hidden_states=a)
# Also check the case where encoder outputs are not passed
SCREAMING_SNAKE_CASE = model(a , attention_mask=a)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def SCREAMING_SNAKE_CASE__ ( self , a , a , a , a , a , a) -> Optional[int]:
SCREAMING_SNAKE_CASE = TFEsmForMaskedLM(config=a)
SCREAMING_SNAKE_CASE = model([input_ids, input_mask])
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def SCREAMING_SNAKE_CASE__ ( self , a , a , a , a , a , a) -> Dict:
SCREAMING_SNAKE_CASE = self.num_labels
SCREAMING_SNAKE_CASE = TFEsmForTokenClassification(config=a)
SCREAMING_SNAKE_CASE = {'input_ids': input_ids, 'attention_mask': input_mask}
SCREAMING_SNAKE_CASE = model(a)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def SCREAMING_SNAKE_CASE__ ( self) -> Tuple:
SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) ,
) = config_and_inputs
SCREAMING_SNAKE_CASE = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class _snake_case ( A__ , A__ , unittest.TestCase ):
_lowercase : str = (
(
TFEsmModel,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
)
if is_tf_available()
else ()
)
_lowercase : int = (
{
'''feature-extraction''': TFEsmModel,
'''fill-mask''': TFEsmForMaskedLM,
'''text-classification''': TFEsmForSequenceClassification,
'''token-classification''': TFEsmForTokenClassification,
'''zero-shot''': TFEsmForSequenceClassification,
}
if is_tf_available()
else {}
)
_lowercase : str = False
_lowercase : Tuple = False
def SCREAMING_SNAKE_CASE__ ( self) -> Tuple:
SCREAMING_SNAKE_CASE = TFEsmModelTester(self)
SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=a , hidden_size=37)
def SCREAMING_SNAKE_CASE__ ( self) -> str:
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[int]:
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a)
def SCREAMING_SNAKE_CASE__ ( self) -> Tuple:
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*a)
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[Any]:
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*a)
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[int]:
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*a)
@slow
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[Any]:
for model_name in TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE = TFEsmModel.from_pretrained(a)
self.assertIsNotNone(a)
@unittest.skip('Protein models do not support embedding resizing.')
def SCREAMING_SNAKE_CASE__ ( self) -> List[Any]:
pass
@unittest.skip('Protein models do not support embedding resizing.')
def SCREAMING_SNAKE_CASE__ ( self) -> Dict:
pass
def SCREAMING_SNAKE_CASE__ ( self) -> Any:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE = model_class(a)
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer)
if model_class is TFEsmForMaskedLM:
# Output embedding test differs from the main test because they're a matrix, not a layer
SCREAMING_SNAKE_CASE = model.get_bias()
assert isinstance(a , a)
for k, v in name.items():
assert isinstance(a , tf.Variable)
else:
SCREAMING_SNAKE_CASE = model.get_output_embeddings()
assert x is None
SCREAMING_SNAKE_CASE = model.get_bias()
assert name is None
@require_tf
class _snake_case ( unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[Any]:
SCREAMING_SNAKE_CASE = TFEsmForMaskedLM.from_pretrained('facebook/esm2_t6_8M_UR50D')
SCREAMING_SNAKE_CASE = tf.constant([[0, 1, 2, 3, 4, 5]])
SCREAMING_SNAKE_CASE = model(a)[0]
SCREAMING_SNAKE_CASE = [1, 6, 33]
self.assertEqual(list(output.numpy().shape) , a)
# compare the actual values for a slice.
SCREAMING_SNAKE_CASE = tf.constant(
[
[
[8.92_15_18, -10.58_98_14, -6.4_67_13_07],
[-6.3_96_71_56, -13.91_13_77, -1.1_21_19_15],
[-7.78_12_47, -13.95_15_57, -3.74_05_92],
]
])
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-2))
@slow
def SCREAMING_SNAKE_CASE__ ( self) -> str:
SCREAMING_SNAKE_CASE = TFEsmModel.from_pretrained('facebook/esm2_t6_8M_UR50D')
SCREAMING_SNAKE_CASE = tf.constant([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]])
SCREAMING_SNAKE_CASE = model(a)[0]
# compare the actual values for a slice.
SCREAMING_SNAKE_CASE = tf.constant(
[
[
[0.14_44_30_92, 0.54_12_53_27, 0.3_24_77_39],
[0.30_34_04_84, 0.00_52_66_76, 0.31_07_77_22],
[0.32_27_80_43, -0.24_98_70_96, 0.3_41_46_28],
]
])
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4))
| 444
| 0
|
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTConfig, DeiTForImageClassificationWithTeacher, DeiTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=False ) -> int:
UpperCAmelCase_ = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'''blocks.{i}.norm1.weight''', f'''deit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((f'''blocks.{i}.norm1.bias''', f'''deit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append((f'''blocks.{i}.attn.proj.weight''', f'''deit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((f'''blocks.{i}.attn.proj.bias''', f'''deit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((f'''blocks.{i}.norm2.weight''', f'''deit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((f'''blocks.{i}.norm2.bias''', f'''deit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((f'''blocks.{i}.mlp.fc1.weight''', f'''deit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((f'''blocks.{i}.mlp.fc1.bias''', f'''deit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((f'''blocks.{i}.mlp.fc2.weight''', f'''deit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((f'''blocks.{i}.mlp.fc2.bias''', f'''deit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
("cls_token", "deit.embeddings.cls_token"),
("dist_token", "deit.embeddings.distillation_token"),
("patch_embed.proj.weight", "deit.embeddings.patch_embeddings.projection.weight"),
("patch_embed.proj.bias", "deit.embeddings.patch_embeddings.projection.bias"),
("pos_embed", "deit.embeddings.position_embeddings"),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
("pre_logits.fc.weight", "pooler.dense.weight"),
("pre_logits.fc.bias", "pooler.dense.bias"),
] )
# if just the base model, we should remove "deit" from all keys that start with "deit"
UpperCAmelCase_ = [(pair[0], pair[1][4:]) if pair[1].startswith("deit" ) else pair for pair in rename_keys]
else:
# layernorm + classification heads
rename_keys.extend(
[
("norm.weight", "deit.layernorm.weight"),
("norm.bias", "deit.layernorm.bias"),
("head.weight", "cls_classifier.weight"),
("head.bias", "cls_classifier.bias"),
("head_dist.weight", "distillation_classifier.weight"),
("head_dist.bias", "distillation_classifier.bias"),
] )
return rename_keys
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=False ) -> List[Any]:
for i in range(config.num_hidden_layers ):
if base_model:
UpperCAmelCase_ = ""
else:
UpperCAmelCase_ = "deit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
UpperCAmelCase_ = state_dict.pop(f'''blocks.{i}.attn.qkv.weight''' )
UpperCAmelCase_ = state_dict.pop(f'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase_ = in_proj_weight[
: config.hidden_size, :
]
UpperCAmelCase_ = in_proj_bias[: config.hidden_size]
UpperCAmelCase_ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
UpperCAmelCase_ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
UpperCAmelCase_ = in_proj_weight[
-config.hidden_size :, :
]
UpperCAmelCase_ = in_proj_bias[-config.hidden_size :]
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> List[str]:
UpperCAmelCase_ = dct.pop(lowerCamelCase__ )
UpperCAmelCase_ = val
def snake_case__ ( ) -> Tuple:
UpperCAmelCase_ = "http://images.cocodataset.org/val2017/000000039769.jpg"
UpperCAmelCase_ = Image.open(requests.get(lowerCamelCase__ , stream=lowerCamelCase__ ).raw )
return im
@torch.no_grad()
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> List[Any]:
UpperCAmelCase_ = DeiTConfig()
# all deit models have fine-tuned heads
UpperCAmelCase_ = False
# dataset (fine-tuned on ImageNet 2012), patch_size and image_size
UpperCAmelCase_ = 1000
UpperCAmelCase_ = "huggingface/label-files"
UpperCAmelCase_ = "imagenet-1k-id2label.json"
UpperCAmelCase_ = json.load(open(hf_hub_download(lowerCamelCase__ , lowerCamelCase__ , repo_type="dataset" ) , "r" ) )
UpperCAmelCase_ = {int(lowerCamelCase__ ): v for k, v in idalabel.items()}
UpperCAmelCase_ = idalabel
UpperCAmelCase_ = {v: k for k, v in idalabel.items()}
UpperCAmelCase_ = int(deit_name[-6:-4] )
UpperCAmelCase_ = int(deit_name[-3:] )
# size of the architecture
if deit_name[9:].startswith("tiny" ):
UpperCAmelCase_ = 192
UpperCAmelCase_ = 768
UpperCAmelCase_ = 12
UpperCAmelCase_ = 3
elif deit_name[9:].startswith("small" ):
UpperCAmelCase_ = 384
UpperCAmelCase_ = 1536
UpperCAmelCase_ = 12
UpperCAmelCase_ = 6
if deit_name[9:].startswith("base" ):
pass
elif deit_name[4:].startswith("large" ):
UpperCAmelCase_ = 1024
UpperCAmelCase_ = 4096
UpperCAmelCase_ = 24
UpperCAmelCase_ = 16
# load original model from timm
UpperCAmelCase_ = timm.create_model(lowerCamelCase__ , pretrained=lowerCamelCase__ )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
UpperCAmelCase_ = timm_model.state_dict()
UpperCAmelCase_ = create_rename_keys(lowerCamelCase__ , lowerCamelCase__ )
for src, dest in rename_keys:
rename_key(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
read_in_q_k_v(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# load HuggingFace model
UpperCAmelCase_ = DeiTForImageClassificationWithTeacher(lowerCamelCase__ ).eval()
model.load_state_dict(lowerCamelCase__ )
# Check outputs on an image, prepared by DeiTImageProcessor
UpperCAmelCase_ = int(
(256 / 224) * config.image_size ) # to maintain same ratio w.r.t. 224 images, see https://github.com/facebookresearch/deit/blob/ab5715372db8c6cad5740714b2216d55aeae052e/datasets.py#L103
UpperCAmelCase_ = DeiTImageProcessor(size=lowerCamelCase__ , crop_size=config.image_size )
UpperCAmelCase_ = image_processor(images=prepare_img() , return_tensors="pt" )
UpperCAmelCase_ = encoding["pixel_values"]
UpperCAmelCase_ = model(lowerCamelCase__ )
UpperCAmelCase_ = timm_model(lowerCamelCase__ )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(lowerCamelCase__ , outputs.logits , atol=1E-3 )
Path(lowerCamelCase__ ).mkdir(exist_ok=lowerCamelCase__ )
print(f'''Saving model {deit_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(lowerCamelCase__ )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(lowerCamelCase__ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--deit_name",
default="vit_deit_base_distilled_patch16_224",
type=str,
help="Name of the DeiT timm model you\'d like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
SCREAMING_SNAKE_CASE = parser.parse_args()
convert_deit_checkpoint(args.deit_name, args.pytorch_dump_folder_path)
| 579
|
"""simple docstring"""
import os
import re
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowercase_ : List[str] = logging.get_logger(__name__)
lowercase_ : List[Any] = {'''vocab_file''': '''spiece.model'''}
lowercase_ : Optional[int] = {
'''vocab_file''': {
'''google/bigbird-roberta-base''': '''https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model''',
'''google/bigbird-roberta-large''': (
'''https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model'''
),
'''google/bigbird-base-trivia-itc''': (
'''https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model'''
),
}
}
lowercase_ : Union[str, Any] = {
'''google/bigbird-roberta-base''': 40_96,
'''google/bigbird-roberta-large''': 40_96,
'''google/bigbird-base-trivia-itc''': 40_96,
}
class UpperCamelCase ( __SCREAMING_SNAKE_CASE ):
A__ = VOCAB_FILES_NAMES
A__ = PRETRAINED_VOCAB_FILES_MAP
A__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ = ["""input_ids""", """attention_mask"""]
A__ = []
def __init__( self , snake_case__ , snake_case__="<unk>" , snake_case__="<s>" , snake_case__="</s>" , snake_case__="<pad>" , snake_case__="[SEP]" , snake_case__="[MASK]" , snake_case__="[CLS]" , snake_case__ = None , **snake_case__ , ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Optional[int] = AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else bos_token
_SCREAMING_SNAKE_CASE : Dict = AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else eos_token
_SCREAMING_SNAKE_CASE : Union[str, Any] = AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else unk_token
_SCREAMING_SNAKE_CASE : Optional[int] = AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else pad_token
_SCREAMING_SNAKE_CASE : Union[str, Any] = AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else cls_token
_SCREAMING_SNAKE_CASE : Union[str, Any] = AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
_SCREAMING_SNAKE_CASE : Tuple = AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else mask_token
_SCREAMING_SNAKE_CASE : Dict = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=snake_case__ , eos_token=snake_case__ , unk_token=snake_case__ , pad_token=snake_case__ , sep_token=snake_case__ , mask_token=snake_case__ , cls_token=snake_case__ , sp_model_kwargs=self.sp_model_kwargs , **snake_case__ , )
_SCREAMING_SNAKE_CASE : Any = vocab_file
_SCREAMING_SNAKE_CASE : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(snake_case__ )
@property
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
return self.sp_model.get_piece_size()
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Optional[int] = {self.convert_ids_to_tokens(snake_case__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Any = self.__dict__.copy()
_SCREAMING_SNAKE_CASE : Optional[Any] = None
return state
def __setstate__( self , snake_case__ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Dict = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
_SCREAMING_SNAKE_CASE : Any = {}
_SCREAMING_SNAKE_CASE : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __SCREAMING_SNAKE_CASE ( self , snake_case__ ):
"""simple docstring"""
return self.sp_model.encode(snake_case__ , out_type=snake_case__ )
def __SCREAMING_SNAKE_CASE ( self , snake_case__ ):
"""simple docstring"""
return self.sp_model.piece_to_id(snake_case__ )
def __SCREAMING_SNAKE_CASE ( self , snake_case__ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : str = self.sp_model.IdToPiece(snake_case__ )
return token
def __SCREAMING_SNAKE_CASE ( self , snake_case__ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : int = []
_SCREAMING_SNAKE_CASE : Tuple = ""
_SCREAMING_SNAKE_CASE : str = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(snake_case__ ) + token
_SCREAMING_SNAKE_CASE : Dict = True
_SCREAMING_SNAKE_CASE : Dict = []
else:
current_sub_tokens.append(snake_case__ )
_SCREAMING_SNAKE_CASE : int = False
out_string += self.sp_model.decode(snake_case__ )
return out_string.strip()
def __SCREAMING_SNAKE_CASE ( self , snake_case__ , snake_case__ = False , snake_case__ = None , snake_case__ = True , **snake_case__ , ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : List[str] = kwargs.pop("use_source_tokenizer" , snake_case__ )
_SCREAMING_SNAKE_CASE : int = self.convert_ids_to_tokens(snake_case__ , skip_special_tokens=snake_case__ )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
_SCREAMING_SNAKE_CASE : List[Any] = []
_SCREAMING_SNAKE_CASE : Any = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(snake_case__ ) )
_SCREAMING_SNAKE_CASE : Any = []
sub_texts.append(snake_case__ )
else:
current_sub_text.append(snake_case__ )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(snake_case__ ) )
# Mimic the behavior of the Rust tokenizer:
# No space before [MASK] and [SEP]
if spaces_between_special_tokens:
_SCREAMING_SNAKE_CASE : Optional[int] = re.sub(r" (\[(MASK|SEP)\])" , r"\1" , " ".join(snake_case__ ) )
else:
_SCREAMING_SNAKE_CASE : Dict = "".join(snake_case__ )
_SCREAMING_SNAKE_CASE : Tuple = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
_SCREAMING_SNAKE_CASE : Optional[int] = self.clean_up_tokenization(snake_case__ )
return clean_text
else:
return text
def __SCREAMING_SNAKE_CASE ( self , snake_case__ , snake_case__ = None ):
"""simple docstring"""
if not os.path.isdir(snake_case__ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
_SCREAMING_SNAKE_CASE : Optional[int] = os.path.join(
snake_case__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , snake_case__ )
elif not os.path.isfile(self.vocab_file ):
with open(snake_case__ , "wb" ) as fi:
_SCREAMING_SNAKE_CASE : Any = self.sp_model.serialized_model_proto()
fi.write(snake_case__ )
return (out_vocab_file,)
def __SCREAMING_SNAKE_CASE ( self , snake_case__ , snake_case__ = None ):
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_SCREAMING_SNAKE_CASE : Dict = [self.cls_token_id]
_SCREAMING_SNAKE_CASE : Union[str, Any] = [self.sep_token_id]
return cls + token_ids_a + sep + token_ids_a + sep
def __SCREAMING_SNAKE_CASE ( self , snake_case__ , snake_case__ = None , snake_case__ = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case__ , token_ids_a=snake_case__ , already_has_special_tokens=snake_case__ )
if token_ids_a is None:
return [1] + ([0] * len(snake_case__ )) + [1]
return [1] + ([0] * len(snake_case__ )) + [1] + ([0] * len(snake_case__ )) + [1]
def __SCREAMING_SNAKE_CASE ( self , snake_case__ , snake_case__ = None ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Optional[int] = [self.sep_token_id]
_SCREAMING_SNAKE_CASE : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
| 572
| 0
|
"""simple docstring"""
from math import isqrt, loga
def lowercase ( UpperCamelCase : int ):
"""simple docstring"""
A__ : Optional[int] =[True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , UpperCamelCase , UpperCamelCase ):
A__ : Dict =False
return [i for i in range(2 , UpperCamelCase ) if is_prime[i]]
def lowercase ( UpperCamelCase : int = 800800 , UpperCamelCase : int = 800800 ):
"""simple docstring"""
A__ : List[str] =degree * loga(UpperCamelCase )
A__ : Any =int(UpperCamelCase )
A__ : Optional[int] =calculate_prime_numbers(UpperCamelCase )
A__ : List[str] =0
A__ : Optional[int] =0
A__ : int =len(UpperCamelCase ) - 1
while left < right:
while (
prime_numbers[right] * loga(prime_numbers[left] )
+ prime_numbers[left] * loga(prime_numbers[right] )
> upper_bound
):
right -= 1
hybrid_integers_count += right - left
left += 1
return hybrid_integers_count
if __name__ == "__main__":
print(f"""{solution() = }""")
| 595
|
"""simple docstring"""
from collections import defaultdict
def lowercase ( UpperCamelCase : int ):
"""simple docstring"""
A__ : Union[str, Any] =1
A__ : int =True
for v in tree[start]:
if v not in visited:
ret += dfs(UpperCamelCase )
if ret % 2 == 0:
cuts.append(UpperCamelCase )
return ret
def lowercase ( ):
"""simple docstring"""
dfs(1 )
if __name__ == "__main__":
__A , __A : List[str] = 10, 9
__A : Dict = defaultdict(list)
__A : dict[int, bool] = {}
__A : list[int] = []
__A : List[str] = 0
__A : str = [(2, 1), (3, 1), (4, 3), (5, 2), (6, 1), (7, 2), (8, 6), (9, 8), (10, 8)]
for u, v in edges:
tree[u].append(v)
tree[v].append(u)
even_tree()
print(len(cuts) - 1)
| 595
| 1
|
'''simple docstring'''
import math
def lowerCamelCase__ ( __lowercase , __lowercase ):
if (
not isinstance(SCREAMING_SNAKE_CASE__ , (int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError("""power_factor must be a valid float value between -1 and 1.""" )
return apparent_power * power_factor
def lowerCamelCase__ ( __lowercase , __lowercase ):
if (
not isinstance(SCREAMING_SNAKE_CASE__ , (int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError("""power_factor must be a valid float value between -1 and 1.""" )
return apparent_power * math.sqrt(1 - power_factor**2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 116
|
"""simple docstring"""
from typing import Optional
from .. import Features, NamedSplit
from ..packaged_modules.text.text import Text
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
def __init__(self , lowercase__ , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = False , lowercase__ = False , lowercase__ = None , **lowercase__ , ):
super().__init__(
lowercase__ , split=lowercase__ , features=lowercase__ , cache_dir=lowercase__ , keep_in_memory=lowercase__ , streaming=lowercase__ , num_proc=lowercase__ , **lowercase__ , )
snake_case_ : Tuple = path_or_paths if isinstance(lowercase__ , lowercase__ ) else {self.split: path_or_paths}
snake_case_ : Dict = Text(
cache_dir=lowercase__ , data_files=lowercase__ , features=lowercase__ , **lowercase__ , )
def __UpperCamelCase (self ):
# Build iterable dataset
if self.streaming:
snake_case_ : List[str] = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
snake_case_ : str = None
snake_case_ : int = None
snake_case_ : int = None
snake_case_ : int = None
self.builder.download_and_prepare(
download_config=lowercase__ , download_mode=lowercase__ , verification_mode=lowercase__ , base_path=lowercase__ , num_proc=self.num_proc , )
snake_case_ : Dict = self.builder.as_dataset(
split=self.split , verification_mode=lowercase__ , in_memory=self.keep_in_memory )
return dataset
| 480
| 0
|
import json
import os
import pickle
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers import is_faiss_available
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bart.tokenization_bart import BartTokenizer
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.dpr.tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.retrieval_rag import CustomHFIndex, RagRetriever
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_sentencepiece, require_tokenizers, require_torch
if is_faiss_available():
import faiss
@require_faiss
class a__ ( A__ ):
def __UpperCamelCase ( self : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 8
# DPR tok
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
SCREAMING_SNAKE_CASE_ : List[Any] = os.path.join(self.tmpdirname,"dpr_tokenizer" )
os.makedirs(_A,exist_ok=_A )
SCREAMING_SNAKE_CASE_ : int = os.path.join(_A,DPR_VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file,"w",encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
# BART tok
SCREAMING_SNAKE_CASE_ : Optional[Any] = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
]
SCREAMING_SNAKE_CASE_ : Tuple = dict(zip(_A,range(len(_A ) ) ) )
SCREAMING_SNAKE_CASE_ : Optional[int] = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
SCREAMING_SNAKE_CASE_ : int = {"unk_token": "<unk>"}
SCREAMING_SNAKE_CASE_ : Tuple = os.path.join(self.tmpdirname,"bart_tokenizer" )
os.makedirs(_A,exist_ok=_A )
SCREAMING_SNAKE_CASE_ : List[str] = os.path.join(_A,BART_VOCAB_FILES_NAMES["vocab_file"] )
SCREAMING_SNAKE_CASE_ : int = os.path.join(_A,BART_VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file,"w",encoding="utf-8" ) as fp:
fp.write(json.dumps(_A ) + "\n" )
with open(self.merges_file,"w",encoding="utf-8" ) as fp:
fp.write("\n".join(_A ) )
def __UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname,"dpr_tokenizer" ) )
def __UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
return DPRContextEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname,"dpr_tokenizer" ) )
def __UpperCamelCase ( self : Any ):
"""simple docstring"""
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname,"bart_tokenizer" ) )
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def __UpperCamelCase ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = Dataset.from_dict(
{
"id": ["0", "1"],
"text": ["foo", "bar"],
"title": ["Foo", "Bar"],
"embeddings": [np.ones(self.retrieval_vector_size ), 2 * np.ones(self.retrieval_vector_size )],
} )
dataset.add_faiss_index("embeddings",string_factory="Flat",metric_type=faiss.METRIC_INNER_PRODUCT )
return dataset
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.get_dummy_dataset()
SCREAMING_SNAKE_CASE_ : List[str] = RagConfig(
retrieval_vector_size=self.retrieval_vector_size,question_encoder=DPRConfig().to_dict(),generator=BartConfig().to_dict(),)
with patch("transformers.models.rag.retrieval_rag.load_dataset" ) as mock_load_dataset:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = dataset
SCREAMING_SNAKE_CASE_ : str = RagRetriever(
_A,question_encoder_tokenizer=self.get_dpr_tokenizer(),generator_tokenizer=self.get_bart_tokenizer(),)
return retriever
def __UpperCamelCase ( self : Any,_A : bool ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.get_dummy_dataset()
SCREAMING_SNAKE_CASE_ : List[Any] = RagConfig(
retrieval_vector_size=self.retrieval_vector_size,question_encoder=DPRConfig().to_dict(),generator=BartConfig().to_dict(),index_name="custom",)
if from_disk:
SCREAMING_SNAKE_CASE_ : List[str] = os.path.join(self.tmpdirname,"dataset" )
SCREAMING_SNAKE_CASE_ : Optional[int] = os.path.join(self.tmpdirname,"index.faiss" )
dataset.get_index("embeddings" ).save(os.path.join(self.tmpdirname,"index.faiss" ) )
dataset.drop_index("embeddings" )
dataset.save_to_disk(os.path.join(self.tmpdirname,"dataset" ) )
del dataset
SCREAMING_SNAKE_CASE_ : Any = RagRetriever(
_A,question_encoder_tokenizer=self.get_dpr_tokenizer(),generator_tokenizer=self.get_bart_tokenizer(),)
else:
SCREAMING_SNAKE_CASE_ : Any = RagRetriever(
_A,question_encoder_tokenizer=self.get_dpr_tokenizer(),generator_tokenizer=self.get_bart_tokenizer(),index=CustomHFIndex(config.retrieval_vector_size,_A ),)
return retriever
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = Dataset.from_dict(
{
"id": ["0", "1"],
"text": ["foo", "bar"],
"title": ["Foo", "Bar"],
"embeddings": [np.ones(self.retrieval_vector_size + 1 ), 2 * np.ones(self.retrieval_vector_size + 1 )],
} )
dataset.add_faiss_index("embeddings",string_factory="Flat",metric_type=faiss.METRIC_INNER_PRODUCT )
SCREAMING_SNAKE_CASE_ : int = os.path.join(self.tmpdirname,"hf_bert_base.hnswSQ8_correct_phi_128.c_index" )
dataset.save_faiss_index("embeddings",index_file_name + ".index.dpr" )
pickle.dump(dataset["id"],open(index_file_name + ".index_meta.dpr","wb" ) )
SCREAMING_SNAKE_CASE_ : List[Any] = os.path.join(self.tmpdirname,"psgs_w100.tsv.pkl" )
SCREAMING_SNAKE_CASE_ : Any = {sample["id"]: [sample["text"], sample["title"]] for sample in dataset}
pickle.dump(_A,open(_A,"wb" ) )
SCREAMING_SNAKE_CASE_ : Dict = RagConfig(
retrieval_vector_size=self.retrieval_vector_size,question_encoder=DPRConfig().to_dict(),generator=BartConfig().to_dict(),index_name="legacy",index_path=self.tmpdirname,)
SCREAMING_SNAKE_CASE_ : List[Any] = RagRetriever(
_A,question_encoder_tokenizer=self.get_dpr_tokenizer(),generator_tokenizer=self.get_bart_tokenizer() )
return retriever
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = 1
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.get_dummy_canonical_hf_index_retriever()
SCREAMING_SNAKE_CASE_ : str = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )],dtype=np.floataa )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = retriever.retrieve(_A,n_docs=_A )
self.assertEqual(retrieved_doc_embeds.shape,(2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(_A ),2 )
self.assertEqual(sorted(doc_dicts[0] ),["embeddings", "id", "text", "title"] )
self.assertEqual(len(doc_dicts[0]["id"] ),_A )
self.assertEqual(doc_dicts[0]["id"][0],"1" ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["id"][0],"0" ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist(),[[1], [0]] )
def __UpperCamelCase ( self : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = self.get_dummy_canonical_hf_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
with patch("transformers.models.rag.retrieval_rag.load_dataset" ) as mock_load_dataset:
SCREAMING_SNAKE_CASE_ : Any = self.get_dummy_dataset()
retriever.save_pretrained(_A )
SCREAMING_SNAKE_CASE_ : Dict = RagRetriever.from_pretrained(_A )
self.assertIsInstance(_A,_A )
SCREAMING_SNAKE_CASE_ : Dict = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )],dtype=np.floataa )
SCREAMING_SNAKE_CASE_ : int = retriever.retrieve(_A,n_docs=1 )
self.assertTrue(out is not None )
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = 1
SCREAMING_SNAKE_CASE_ : str = self.get_dummy_custom_hf_index_retriever(from_disk=_A )
SCREAMING_SNAKE_CASE_ : Dict = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )],dtype=np.floataa )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : str = retriever.retrieve(_A,n_docs=_A )
self.assertEqual(retrieved_doc_embeds.shape,(2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(_A ),2 )
self.assertEqual(sorted(doc_dicts[0] ),["embeddings", "id", "text", "title"] )
self.assertEqual(len(doc_dicts[0]["id"] ),_A )
self.assertEqual(doc_dicts[0]["id"][0],"1" ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["id"][0],"0" ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist(),[[1], [0]] )
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = self.get_dummy_custom_hf_index_retriever(from_disk=_A )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(_A )
SCREAMING_SNAKE_CASE_ : List[str] = RagRetriever.from_pretrained(_A )
self.assertIsInstance(_A,_A )
SCREAMING_SNAKE_CASE_ : Any = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )],dtype=np.floataa )
SCREAMING_SNAKE_CASE_ : Optional[int] = retriever.retrieve(_A,n_docs=1 )
self.assertTrue(out is not None )
def __UpperCamelCase ( self : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = 1
SCREAMING_SNAKE_CASE_ : int = self.get_dummy_custom_hf_index_retriever(from_disk=_A )
SCREAMING_SNAKE_CASE_ : Any = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )],dtype=np.floataa )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[str] = retriever.retrieve(_A,n_docs=_A )
self.assertEqual(retrieved_doc_embeds.shape,(2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(_A ),2 )
self.assertEqual(sorted(doc_dicts[0] ),["embeddings", "id", "text", "title"] )
self.assertEqual(len(doc_dicts[0]["id"] ),_A )
self.assertEqual(doc_dicts[0]["id"][0],"1" ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["id"][0],"0" ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist(),[[1], [0]] )
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.get_dummy_custom_hf_index_retriever(from_disk=_A )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(_A )
SCREAMING_SNAKE_CASE_ : List[Any] = RagRetriever.from_pretrained(_A )
self.assertIsInstance(_A,_A )
SCREAMING_SNAKE_CASE_ : Any = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )],dtype=np.floataa )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = retriever.retrieve(_A,n_docs=1 )
self.assertTrue(out is not None )
def __UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = 1
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.get_dummy_legacy_index_retriever()
SCREAMING_SNAKE_CASE_ : Optional[int] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )],dtype=np.floataa )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : int = retriever.retrieve(_A,n_docs=_A )
self.assertEqual(retrieved_doc_embeds.shape,(2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(_A ),2 )
self.assertEqual(sorted(doc_dicts[0] ),["text", "title"] )
self.assertEqual(len(doc_dicts[0]["text"] ),_A )
self.assertEqual(doc_dicts[0]["text"][0],"bar" ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["text"][0],"foo" ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist(),[[1], [0]] )
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = self.get_dummy_legacy_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(_A )
SCREAMING_SNAKE_CASE_ : Optional[int] = RagRetriever.from_pretrained(_A )
self.assertIsInstance(_A,_A )
SCREAMING_SNAKE_CASE_ : Optional[int] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )],dtype=np.floataa )
SCREAMING_SNAKE_CASE_ : int = retriever.retrieve(_A,n_docs=1 )
self.assertTrue(out is not None )
@require_torch
@require_tokenizers
@require_sentencepiece
def __UpperCamelCase ( self : str ):
"""simple docstring"""
import torch
SCREAMING_SNAKE_CASE_ : int = 1
SCREAMING_SNAKE_CASE_ : List[Any] = self.get_dummy_canonical_hf_index_retriever()
SCREAMING_SNAKE_CASE_ : Optional[Any] = [[5, 7], [10, 11]]
SCREAMING_SNAKE_CASE_ : Dict = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )],dtype=np.floataa )
SCREAMING_SNAKE_CASE_ : int = retriever(_A,_A,prefix=retriever.config.generator.prefix,n_docs=_A )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[str] = (
out["context_input_ids"],
out["context_attention_mask"],
out["retrieved_doc_embeds"],
)
self.assertEqual(retrieved_doc_embeds.shape,(2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(_A,_A )
self.assertIsInstance(_A,_A )
self.assertIsInstance(_A,np.ndarray )
SCREAMING_SNAKE_CASE_ : Optional[Any] = retriever(
_A,_A,prefix=retriever.config.generator.prefix,n_docs=_A,return_tensors="pt",)
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[Any] = ( # noqa: F841
out["context_input_ids"],
out["context_attention_mask"],
out["retrieved_doc_embeds"],
out["doc_ids"],
)
self.assertEqual(retrieved_doc_embeds.shape,(2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(_A,torch.Tensor )
self.assertIsInstance(_A,torch.Tensor )
self.assertIsInstance(_A,torch.Tensor )
@require_torch
@require_tokenizers
@require_sentencepiece
def __UpperCamelCase ( self : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = self.get_dpr_ctx_encoder_tokenizer()
SCREAMING_SNAKE_CASE_ : List[Any] = 1
SCREAMING_SNAKE_CASE_ : List[str] = self.get_dummy_custom_hf_index_retriever(from_disk=_A )
retriever.set_ctx_encoder_tokenizer(_A )
SCREAMING_SNAKE_CASE_ : Optional[Any] = [[5, 7], [10, 11]]
SCREAMING_SNAKE_CASE_ : int = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )],dtype=np.floataa )
SCREAMING_SNAKE_CASE_ : str = retriever(_A,_A,prefix=retriever.config.generator.prefix,n_docs=_A )
self.assertEqual(
len(_A ),6 ) # check whether the retriever output consist of 6 attributes including tokenized docs
self.assertEqual(
all(k in out for k in ("tokenized_doc_ids", "tokenized_doc_attention_mask") ),_A ) # check for doc token related keys in dictionary.
| 316
|
from collections import defaultdict
from typing import Optional
from ..image_utils import load_image
from ..utils import (
add_end_docstrings,
is_torch_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_MASK_GENERATION_MAPPING
__lowerCamelCase : List[Any] = logging.get_logger(__name__)
@add_end_docstrings(A__ )
class a__ ( A__ ):
def __init__( self : Dict,**_A : List[str] ):
"""simple docstring"""
super().__init__(**_A )
requires_backends(self,"vision" )
requires_backends(self,"torch" )
if self.framework != "pt":
raise ValueError(F'The {self.__class__} is only available in PyTorch.' )
self.check_model_type(_A )
def __UpperCamelCase ( self : int,**_A : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = {}
SCREAMING_SNAKE_CASE_ : List[Any] = {}
SCREAMING_SNAKE_CASE_ : Tuple = {}
# preprocess args
if "points_per_batch" in kwargs:
SCREAMING_SNAKE_CASE_ : Optional[int] = kwargs["points_per_batch"]
if "points_per_crop" in kwargs:
SCREAMING_SNAKE_CASE_ : List[Any] = kwargs["points_per_crop"]
if "crops_n_layers" in kwargs:
SCREAMING_SNAKE_CASE_ : Dict = kwargs["crops_n_layers"]
if "crop_overlap_ratio" in kwargs:
SCREAMING_SNAKE_CASE_ : Dict = kwargs["crop_overlap_ratio"]
if "crop_n_points_downscale_factor" in kwargs:
SCREAMING_SNAKE_CASE_ : List[str] = kwargs["crop_n_points_downscale_factor"]
# postprocess args
if "pred_iou_thresh" in kwargs:
SCREAMING_SNAKE_CASE_ : str = kwargs["pred_iou_thresh"]
if "stability_score_offset" in kwargs:
SCREAMING_SNAKE_CASE_ : Optional[int] = kwargs["stability_score_offset"]
if "mask_threshold" in kwargs:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = kwargs["mask_threshold"]
if "stability_score_thresh" in kwargs:
SCREAMING_SNAKE_CASE_ : List[str] = kwargs["stability_score_thresh"]
if "crops_nms_thresh" in kwargs:
SCREAMING_SNAKE_CASE_ : List[Any] = kwargs["crops_nms_thresh"]
if "output_rle_mask" in kwargs:
SCREAMING_SNAKE_CASE_ : Optional[Any] = kwargs["output_rle_mask"]
if "output_bboxes_mask" in kwargs:
SCREAMING_SNAKE_CASE_ : Any = kwargs["output_bboxes_mask"]
return preprocess_kwargs, forward_params, postprocess_kwargs
def __call__( self : List[Any],_A : Any,*_A : int,_A : Optional[int]=None,_A : Dict=None,**_A : Union[str, Any] ):
"""simple docstring"""
return super().__call__(_A,*_A,num_workers=_A,batch_size=_A,**_A )
def __UpperCamelCase ( self : Any,_A : List[str],_A : Optional[Any]=64,_A : int = 0,_A : float = 512 / 1500,_A : Optional[int] = 32,_A : Optional[int] = 1,):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = load_image(_A )
SCREAMING_SNAKE_CASE_ : int = self.image_processor.size["longest_edge"]
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Any = self.image_processor.generate_crop_boxes(
_A,_A,_A,_A,_A,_A )
SCREAMING_SNAKE_CASE_ : Optional[int] = self.image_processor(images=_A,return_tensors="pt" )
with self.device_placement():
if self.framework == "pt":
SCREAMING_SNAKE_CASE_ : Any = self.get_inference_context()
with inference_context():
SCREAMING_SNAKE_CASE_ : Any = self._ensure_tensor_on_device(_A,device=self.device )
SCREAMING_SNAKE_CASE_ : Any = self.model.get_image_embeddings(model_inputs.pop("pixel_values" ) )
SCREAMING_SNAKE_CASE_ : str = image_embeddings
SCREAMING_SNAKE_CASE_ : Union[str, Any] = grid_points.shape[1]
SCREAMING_SNAKE_CASE_ : Tuple = points_per_batch if points_per_batch is not None else n_points
if points_per_batch <= 0:
raise ValueError(
"Cannot have points_per_batch<=0. Must be >=1 to returned batched outputs. "
"To return all points at once, set points_per_batch to None" )
for i in range(0,_A,_A ):
SCREAMING_SNAKE_CASE_ : Optional[int] = grid_points[:, i : i + points_per_batch, :, :]
SCREAMING_SNAKE_CASE_ : Dict = input_labels[:, i : i + points_per_batch]
SCREAMING_SNAKE_CASE_ : List[str] = i == n_points - points_per_batch
yield {
"input_points": batched_points,
"input_labels": labels,
"input_boxes": crop_boxes,
"is_last": is_last,
**model_inputs,
}
def __UpperCamelCase ( self : Union[str, Any],_A : int,_A : Optional[Any]=0.88,_A : Dict=0.95,_A : Union[str, Any]=0,_A : List[Any]=1,):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = model_inputs.pop("input_boxes" )
SCREAMING_SNAKE_CASE_ : Tuple = model_inputs.pop("is_last" )
SCREAMING_SNAKE_CASE_ : Tuple = model_inputs.pop("original_sizes" ).tolist()
SCREAMING_SNAKE_CASE_ : List[str] = model_inputs.pop("reshaped_input_sizes" ).tolist()
SCREAMING_SNAKE_CASE_ : List[Any] = self.model(**_A )
# post processing happens here in order to avoid CPU GPU copies of ALL the masks
SCREAMING_SNAKE_CASE_ : Optional[int] = model_outputs["pred_masks"]
SCREAMING_SNAKE_CASE_ : str = self.image_processor.post_process_masks(
_A,_A,_A,_A,binarize=_A )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = model_outputs["iou_scores"]
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : str = self.image_processor.filter_masks(
masks[0],iou_scores[0],original_sizes[0],input_boxes[0],_A,_A,_A,_A,)
return {
"masks": masks,
"is_last": is_last,
"boxes": boxes,
"iou_scores": iou_scores,
}
def __UpperCamelCase ( self : Optional[Any],_A : Union[str, Any],_A : Tuple=False,_A : Any=False,_A : List[str]=0.7,):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = []
SCREAMING_SNAKE_CASE_ : Dict = []
SCREAMING_SNAKE_CASE_ : str = []
for model_output in model_outputs:
all_scores.append(model_output.pop("iou_scores" ) )
all_masks.extend(model_output.pop("masks" ) )
all_boxes.append(model_output.pop("boxes" ) )
SCREAMING_SNAKE_CASE_ : List[Any] = torch.cat(_A )
SCREAMING_SNAKE_CASE_ : int = torch.cat(_A )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[int] = self.image_processor.post_process_for_mask_generation(
_A,_A,_A,_A )
SCREAMING_SNAKE_CASE_ : Tuple = defaultdict(_A )
for output in model_outputs:
for k, v in output.items():
extra[k].append(_A )
SCREAMING_SNAKE_CASE_ : Optional[Any] = {}
if output_rle_mask:
SCREAMING_SNAKE_CASE_ : str = rle_mask
if output_bboxes_mask:
SCREAMING_SNAKE_CASE_ : str = bounding_boxes
return {"masks": output_masks, "scores": iou_scores, **optional, **extra}
| 316
| 1
|
import torch
from diffusers import DPMSolverSDEScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import require_torchsde
from .test_schedulers import SchedulerCommonTest
@require_torchsde
class lowerCamelCase__ ( _A):
"""simple docstring"""
a__ : Union[str, Any] = (DPMSolverSDEScheduler,)
a__ : Any = 10
def snake_case_ ( self : List[Any] , **__lowerCAmelCase : List[Any] ) -> str:
_A = {
'''num_train_timesteps''': 11_00,
'''beta_start''': 0.0001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''noise_sampler_seed''': 0,
}
config.update(**__lowerCAmelCase )
return config
def snake_case_ ( self : Tuple ) -> str:
for timesteps in [10, 50, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=__lowerCAmelCase )
def snake_case_ ( self : Dict ) -> Any:
for beta_start, beta_end in zip([0.0_0001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ):
self.check_over_configs(beta_start=__lowerCAmelCase , beta_end=__lowerCAmelCase )
def snake_case_ ( self : Any ) -> int:
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=__lowerCAmelCase )
def snake_case_ ( self : Optional[Any] ) -> Optional[int]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__lowerCAmelCase )
def snake_case_ ( self : Any ) -> Optional[Any]:
_A = self.scheduler_classes[0]
_A = self.get_scheduler_config()
_A = scheduler_class(**__lowerCAmelCase )
scheduler.set_timesteps(self.num_inference_steps )
_A = self.dummy_model()
_A = self.dummy_sample_deter * scheduler.init_noise_sigma
_A = sample.to(__lowerCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
_A = scheduler.scale_model_input(__lowerCAmelCase , __lowerCAmelCase )
_A = model(__lowerCAmelCase , __lowerCAmelCase )
_A = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
_A = output.prev_sample
_A = torch.sum(torch.abs(__lowerCAmelCase ) )
_A = torch.mean(torch.abs(__lowerCAmelCase ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.47_8210_4492_1875 ) < 1E-2
assert abs(result_mean.item() - 0.2178_7059_6456_5277 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59_3521_1181_6406 ) < 1E-2
assert abs(result_mean.item() - 0.2_2342_9068_9229_9652 ) < 1E-3
else:
assert abs(result_sum.item() - 162.52_3834_2285_1562 ) < 1E-2
assert abs(result_mean.item() - 0.211_6195_7085_1326 ) < 1E-3
def snake_case_ ( self : Dict ) -> str:
_A = self.scheduler_classes[0]
_A = self.get_scheduler_config(prediction_type='''v_prediction''' )
_A = scheduler_class(**__lowerCAmelCase )
scheduler.set_timesteps(self.num_inference_steps )
_A = self.dummy_model()
_A = self.dummy_sample_deter * scheduler.init_noise_sigma
_A = sample.to(__lowerCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
_A = scheduler.scale_model_input(__lowerCAmelCase , __lowerCAmelCase )
_A = model(__lowerCAmelCase , __lowerCAmelCase )
_A = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
_A = output.prev_sample
_A = torch.sum(torch.abs(__lowerCAmelCase ) )
_A = torch.mean(torch.abs(__lowerCAmelCase ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 124.77_1492_0043_9453 ) < 1E-2
assert abs(result_mean.item() - 0.1_6226_2890_1481_6284 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 128.1_6633_6059_5703 ) < 1E-2
assert abs(result_mean.item() - 0.1_6688_3260_0116_7297 ) < 1E-3
else:
assert abs(result_sum.item() - 119.8_4875_4882_8125 ) < 1E-2
assert abs(result_mean.item() - 0.1560_5306_6253_6621 ) < 1E-3
def snake_case_ ( self : int ) -> Any:
_A = self.scheduler_classes[0]
_A = self.get_scheduler_config()
_A = scheduler_class(**__lowerCAmelCase )
scheduler.set_timesteps(self.num_inference_steps , device=__lowerCAmelCase )
_A = self.dummy_model()
_A = self.dummy_sample_deter.to(__lowerCAmelCase ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
_A = scheduler.scale_model_input(__lowerCAmelCase , __lowerCAmelCase )
_A = model(__lowerCAmelCase , __lowerCAmelCase )
_A = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
_A = output.prev_sample
_A = torch.sum(torch.abs(__lowerCAmelCase ) )
_A = torch.mean(torch.abs(__lowerCAmelCase ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.46_9573_9746_0938 ) < 1E-2
assert abs(result_mean.item() - 0.2_1805_9346_0798_2635 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59_3536_3769_5312 ) < 1E-2
assert abs(result_mean.item() - 0.2_2342_9083_8241_5771 ) < 1E-3
else:
assert abs(result_sum.item() - 162.52_3834_2285_1562 ) < 1E-2
assert abs(result_mean.item() - 0.211_6195_7085_1326 ) < 1E-3
def snake_case_ ( self : Tuple ) -> Optional[Any]:
_A = self.scheduler_classes[0]
_A = self.get_scheduler_config()
_A = scheduler_class(**__lowerCAmelCase , use_karras_sigmas=__lowerCAmelCase )
scheduler.set_timesteps(self.num_inference_steps , device=__lowerCAmelCase )
_A = self.dummy_model()
_A = self.dummy_sample_deter.to(__lowerCAmelCase ) * scheduler.init_noise_sigma
_A = sample.to(__lowerCAmelCase )
for t in scheduler.timesteps:
_A = scheduler.scale_model_input(__lowerCAmelCase , __lowerCAmelCase )
_A = model(__lowerCAmelCase , __lowerCAmelCase )
_A = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
_A = output.prev_sample
_A = torch.sum(torch.abs(__lowerCAmelCase ) )
_A = torch.mean(torch.abs(__lowerCAmelCase ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 176.66_9741_3574_2188 ) < 1E-2
assert abs(result_mean.item() - 0.2_3003_8727_3098_1811 ) < 1E-2
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 177.63_6535_6445_3125 ) < 1E-2
assert abs(result_mean.item() - 0.2_3003_8727_3098_1811 ) < 1E-2
else:
assert abs(result_sum.item() - 170.3_1352_2338_8672 ) < 1E-2
assert abs(result_mean.item() - 0.2_3003_8727_3098_1811 ) < 1E-2
| 2
|
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DeformableDetrImageProcessor
class lowerCamelCase__ ( unittest.TestCase):
"""simple docstring"""
def __init__( self : Optional[int] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Dict=7 , __lowerCAmelCase : Tuple=3 , __lowerCAmelCase : int=30 , __lowerCAmelCase : Dict=4_00 , __lowerCAmelCase : Optional[Any]=True , __lowerCAmelCase : List[str]=None , __lowerCAmelCase : Dict=True , __lowerCAmelCase : Optional[Any]=[0.5, 0.5, 0.5] , __lowerCAmelCase : Dict=[0.5, 0.5, 0.5] , __lowerCAmelCase : List[str]=True , __lowerCAmelCase : List[str]=1 / 2_55 , __lowerCAmelCase : int=True , ) -> List[str]:
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
_A = size if size is not None else {'''shortest_edge''': 18, '''longest_edge''': 13_33}
_A = parent
_A = batch_size
_A = num_channels
_A = min_resolution
_A = max_resolution
_A = do_resize
_A = size
_A = do_normalize
_A = image_mean
_A = image_std
_A = do_rescale
_A = rescale_factor
_A = do_pad
def snake_case_ ( self : Optional[int] ) -> Any:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def snake_case_ ( self : List[Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : str=False ) -> Dict:
if not batched:
_A = image_inputs[0]
if isinstance(__lowerCAmelCase , Image.Image ):
_A , _A = image.size
else:
_A , _A = image.shape[1], image.shape[2]
if w < h:
_A = int(self.size['''shortest_edge'''] * h / w )
_A = self.size['''shortest_edge''']
elif w > h:
_A = self.size['''shortest_edge''']
_A = int(self.size['''shortest_edge'''] * w / h )
else:
_A = self.size['''shortest_edge''']
_A = self.size['''shortest_edge''']
else:
_A = []
for image in image_inputs:
_A , _A = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
_A = max(__lowerCAmelCase , key=lambda __lowerCAmelCase : item[0] )[0]
_A = max(__lowerCAmelCase , key=lambda __lowerCAmelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class lowerCamelCase__ ( _A , unittest.TestCase):
"""simple docstring"""
a__ : Any = DeformableDetrImageProcessor if is_vision_available() else None
def snake_case_ ( self : Optional[int] ) -> Any:
_A = DeformableDetrImageProcessingTester(self )
@property
def snake_case_ ( self : Union[str, Any] ) -> Dict:
return self.image_processor_tester.prepare_image_processor_dict()
def snake_case_ ( self : Optional[int] ) -> List[str]:
_A = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__lowerCAmelCase , '''image_mean''' ) )
self.assertTrue(hasattr(__lowerCAmelCase , '''image_std''' ) )
self.assertTrue(hasattr(__lowerCAmelCase , '''do_normalize''' ) )
self.assertTrue(hasattr(__lowerCAmelCase , '''do_resize''' ) )
self.assertTrue(hasattr(__lowerCAmelCase , '''do_rescale''' ) )
self.assertTrue(hasattr(__lowerCAmelCase , '''do_pad''' ) )
self.assertTrue(hasattr(__lowerCAmelCase , '''size''' ) )
def snake_case_ ( self : List[str] ) -> int:
_A = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 18, '''longest_edge''': 13_33} )
self.assertEqual(image_processor.do_pad , __lowerCAmelCase )
_A = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=__lowerCAmelCase )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42, '''longest_edge''': 84} )
self.assertEqual(image_processor.do_pad , __lowerCAmelCase )
def snake_case_ ( self : Any ) -> Union[str, Any]:
pass
def snake_case_ ( self : List[str] ) -> Optional[int]:
# Initialize image_processing
_A = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_A = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCAmelCase , Image.Image )
# Test not batched input
_A = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
_A , _A = self.image_processor_tester.get_expected_values(__lowerCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_A , _A = self.image_processor_tester.get_expected_values(__lowerCAmelCase , batched=__lowerCAmelCase )
_A = image_processing(__lowerCAmelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def snake_case_ ( self : Tuple ) -> int:
# Initialize image_processing
_A = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_A = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase , numpify=__lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCAmelCase , np.ndarray )
# Test not batched input
_A = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
_A , _A = self.image_processor_tester.get_expected_values(__lowerCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_A = image_processing(__lowerCAmelCase , return_tensors='''pt''' ).pixel_values
_A , _A = self.image_processor_tester.get_expected_values(__lowerCAmelCase , batched=__lowerCAmelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def snake_case_ ( self : Optional[Any] ) -> int:
# Initialize image_processing
_A = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_A = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase , torchify=__lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCAmelCase , torch.Tensor )
# Test not batched input
_A = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
_A , _A = self.image_processor_tester.get_expected_values(__lowerCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_A = image_processing(__lowerCAmelCase , return_tensors='''pt''' ).pixel_values
_A , _A = self.image_processor_tester.get_expected_values(__lowerCAmelCase , batched=__lowerCAmelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def snake_case_ ( self : Optional[Any] ) -> Optional[int]:
# prepare image and target
_A = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''' ) as f:
_A = json.loads(f.read() )
_A = {'''image_id''': 3_97_69, '''annotations''': target}
# encode them
_A = DeformableDetrImageProcessor()
_A = image_processing(images=__lowerCAmelCase , annotations=__lowerCAmelCase , return_tensors='''pt''' )
# verify pixel values
_A = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding['''pixel_values'''].shape , __lowerCAmelCase )
_A = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , __lowerCAmelCase , atol=1E-4 ) )
# verify area
_A = torch.tensor([5887.9600, 1_1250.2061, 48_9353.8438, 83_7122.7500, 14_7967.5156, 16_5732.3438] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , __lowerCAmelCase ) )
# verify boxes
_A = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , __lowerCAmelCase )
_A = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , __lowerCAmelCase , atol=1E-3 ) )
# verify image_id
_A = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , __lowerCAmelCase ) )
# verify is_crowd
_A = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , __lowerCAmelCase ) )
# verify class_labels
_A = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , __lowerCAmelCase ) )
# verify orig_size
_A = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , __lowerCAmelCase ) )
# verify size
_A = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , __lowerCAmelCase ) )
@slow
def snake_case_ ( self : List[str] ) -> List[str]:
# prepare image, target and masks_path
_A = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''' ) as f:
_A = json.loads(f.read() )
_A = {'''file_name''': '''000000039769.png''', '''image_id''': 3_97_69, '''segments_info''': target}
_A = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''' )
# encode them
_A = DeformableDetrImageProcessor(format='''coco_panoptic''' )
_A = image_processing(images=__lowerCAmelCase , annotations=__lowerCAmelCase , masks_path=__lowerCAmelCase , return_tensors='''pt''' )
# verify pixel values
_A = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding['''pixel_values'''].shape , __lowerCAmelCase )
_A = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , __lowerCAmelCase , atol=1E-4 ) )
# verify area
_A = torch.tensor([14_7979.6875, 16_5527.0469, 48_4638.5938, 1_1292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , __lowerCAmelCase ) )
# verify boxes
_A = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , __lowerCAmelCase )
_A = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , __lowerCAmelCase , atol=1E-3 ) )
# verify image_id
_A = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , __lowerCAmelCase ) )
# verify is_crowd
_A = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , __lowerCAmelCase ) )
# verify class_labels
_A = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , __lowerCAmelCase ) )
# verify masks
_A = 82_28_73
self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , __lowerCAmelCase )
# verify orig_size
_A = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , __lowerCAmelCase ) )
# verify size
_A = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , __lowerCAmelCase ) )
| 2
| 1
|
"""simple docstring"""
from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401
from .utils import deprecate
deprecate(
'pipelines_utils',
'0.22.0',
'Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.',
standard_warn=False,
stacklevel=3,
)
| 700
|
"""simple docstring"""
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
def get_matched_characters(__UpperCamelCase , __UpperCamelCase ) -> str:
__A = []
__A = min(len(_stra ) , len(_stra ) ) // 2
for i, l in enumerate(_stra ):
__A = int(max(0 , i - limit ) )
__A = int(min(i + limit + 1 , len(_stra ) ) )
if l in _stra[left:right]:
matched.append(__UpperCamelCase )
__A = f'{_stra[0:_stra.index(__UpperCamelCase )]} {_stra[_stra.index(__UpperCamelCase ) + 1:]}'
return "".join(__UpperCamelCase )
# matching characters
__A = get_matched_characters(__UpperCamelCase , __UpperCamelCase )
__A = get_matched_characters(__UpperCamelCase , __UpperCamelCase )
__A = len(__UpperCamelCase )
# transposition
__A = (
len([(ca, ca) for ca, ca in zip(__UpperCamelCase , __UpperCamelCase ) if ca != ca] ) // 2
)
if not match_count:
__A = 0.0
else:
__A = (
1
/ 3
* (
match_count / len(__UpperCamelCase )
+ match_count / len(__UpperCamelCase )
+ (match_count - transpositions) / match_count
)
)
# common prefix up to 4 characters
__A = 0
for ca, ca in zip(stra[:4] , stra[:4] ):
if ca == ca:
prefix_len += 1
else:
break
return jaro + 0.1 * prefix_len * (1 - jaro)
if __name__ == "__main__":
import doctest
doctest.testmod()
print(jaro_winkler('hello', 'world'))
| 215
| 0
|
import PIL.Image
import PIL.ImageOps
from packaging import version
from PIL import Image
if version.parse(version.parse(PIL.__version__).base_version) >= version.parse('''9.1.0'''):
__a: Optional[int] = {
'''linear''': PIL.Image.Resampling.BILINEAR,
'''bilinear''': PIL.Image.Resampling.BILINEAR,
'''bicubic''': PIL.Image.Resampling.BICUBIC,
'''lanczos''': PIL.Image.Resampling.LANCZOS,
'''nearest''': PIL.Image.Resampling.NEAREST,
}
else:
__a: Tuple = {
'''linear''': PIL.Image.LINEAR,
'''bilinear''': PIL.Image.BILINEAR,
'''bicubic''': PIL.Image.BICUBIC,
'''lanczos''': PIL.Image.LANCZOS,
'''nearest''': PIL.Image.NEAREST,
}
def _SCREAMING_SNAKE_CASE ( __snake_case ) -> Optional[int]:
_UpperCAmelCase = (images / 2 + 0.5).clamp(0 , 1 )
_UpperCAmelCase = images.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
_UpperCAmelCase = numpy_to_pil(__snake_case )
return images
def _SCREAMING_SNAKE_CASE ( __snake_case ) -> Optional[Any]:
if images.ndim == 3:
_UpperCAmelCase = images[None, ...]
_UpperCAmelCase = (images * 2_5_5).round().astype("""uint8""" )
if images.shape[-1] == 1:
# special case for grayscale (single channel) images
_UpperCAmelCase = [Image.fromarray(image.squeeze() , mode="""L""" ) for image in images]
else:
_UpperCAmelCase = [Image.fromarray(__snake_case ) for image in images]
return pil_images
| 108
|
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert import BertTokenizer
__a: Tuple = logging.get_logger(__name__)
__a: Optional[Any] = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
__a: Any = {
'''vocab_file''': {
'''facebook/dpr-ctx_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt'''
),
'''facebook/dpr-ctx_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''facebook/dpr-ctx_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json'''
),
'''facebook/dpr-ctx_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json'''
),
},
}
__a: Any = {
'''vocab_file''': {
'''facebook/dpr-question_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt'''
),
'''facebook/dpr-question_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''facebook/dpr-question_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json'''
),
'''facebook/dpr-question_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json'''
),
},
}
__a: str = {
'''vocab_file''': {
'''facebook/dpr-reader-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt'''
),
'''facebook/dpr-reader-multiset-base''': (
'''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''facebook/dpr-reader-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json'''
),
'''facebook/dpr-reader-multiset-base''': (
'''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json'''
),
},
}
__a: Dict = {
'''facebook/dpr-ctx_encoder-single-nq-base''': 512,
'''facebook/dpr-ctx_encoder-multiset-base''': 512,
}
__a: List[str] = {
'''facebook/dpr-question_encoder-single-nq-base''': 512,
'''facebook/dpr-question_encoder-multiset-base''': 512,
}
__a: Dict = {
'''facebook/dpr-reader-single-nq-base''': 512,
'''facebook/dpr-reader-multiset-base''': 512,
}
__a: Optional[int] = {
'''facebook/dpr-ctx_encoder-single-nq-base''': {'''do_lower_case''': True},
'''facebook/dpr-ctx_encoder-multiset-base''': {'''do_lower_case''': True},
}
__a: Tuple = {
'''facebook/dpr-question_encoder-single-nq-base''': {'''do_lower_case''': True},
'''facebook/dpr-question_encoder-multiset-base''': {'''do_lower_case''': True},
}
__a: Optional[int] = {
'''facebook/dpr-reader-single-nq-base''': {'''do_lower_case''': True},
'''facebook/dpr-reader-multiset-base''': {'''do_lower_case''': True},
}
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
_lowerCamelCase = VOCAB_FILES_NAMES
_lowerCamelCase = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCamelCase = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
_lowerCamelCase = VOCAB_FILES_NAMES
_lowerCamelCase = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCamelCase = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
__a: List[Any] = collections.namedtuple(
'''DPRSpanPrediction''', ['''span_score''', '''relevance_score''', '''doc_id''', '''start_index''', '''end_index''', '''text''']
)
__a: Optional[int] = collections.namedtuple('''DPRReaderOutput''', ['''start_logits''', '''end_logits''', '''relevance_logits'''])
__a: Optional[Any] = R'''
Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.
It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),
using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`
with the format:
```
[CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>
```
Args:
questions (`str` or `List[str]`):
The questions to be encoded. You can specify one question for many passages. In this case, the question
will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in
`titles` or `texts`.
titles (`str` or `List[str]`):
The passages titles to be encoded. This can be a string or a list of strings if there are several passages.
texts (`str` or `List[str]`):
The passages texts to be encoded. This can be a string or a list of strings if there are several passages.
padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):
Activates and controls padding. Accepts the following values:
- `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence
if provided).
- `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided.
- `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different
lengths).
truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):
Activates and controls truncation. Accepts the following values:
- `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to
the maximum acceptable input length for the model if that argument is not provided. This will truncate
token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch
of pairs) is provided.
- `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the first
sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the
second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths
greater than the model maximum admissible input size).
max_length (`int`, *optional*):
Controls the maximum length to use by one of the truncation/padding parameters.
If left unset or set to `None`, this will use the predefined model maximum length if a maximum length
is required by one of the truncation/padding parameters. If the model has no specific maximum input
length (like XLNet) truncation/padding to a maximum length will be deactivated.
return_tensors (`str` or [`~utils.TensorType`], *optional*):
If set, will return tensors instead of list of python integers. Acceptable values are:
- `\'tf\'`: Return TensorFlow `tf.constant` objects.
- `\'pt\'`: Return PyTorch `torch.Tensor` objects.
- `\'np\'`: Return Numpy `np.ndarray` objects.
return_attention_mask (`bool`, *optional*):
Whether or not to return the attention mask. If not set, will return the attention mask according to the
specific tokenizer\'s default, defined by the `return_outputs` attribute.
[What are attention masks?](../glossary#attention-mask)
Returns:
`Dict[str, List[List[int]]]`: A dictionary with the following keys:
- `input_ids`: List of token ids to be fed to a model.
- `attention_mask`: List of indices specifying which tokens should be attended to by the model.
'''
@add_start_docstrings(UpperCAmelCase )
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
def __call__( self : int , lowerCamelCase : int , lowerCamelCase : Optional[str] = None , lowerCamelCase : Optional[str] = None , lowerCamelCase : Union[bool, str] = False , lowerCamelCase : Union[bool, str] = False , lowerCamelCase : Optional[int] = None , lowerCamelCase : Optional[Union[str, TensorType]] = None , lowerCamelCase : Optional[bool] = None , **lowerCamelCase : Optional[int] , ) -> BatchEncoding:
"""simple docstring"""
if titles is None and texts is None:
return super().__call__(
lowerCamelCase , padding=lowerCamelCase , truncation=lowerCamelCase , max_length=lowerCamelCase , return_tensors=lowerCamelCase , return_attention_mask=lowerCamelCase , **lowerCamelCase , )
elif titles is None or texts is None:
_UpperCAmelCase = titles if texts is None else texts
return super().__call__(
lowerCamelCase , lowerCamelCase , padding=lowerCamelCase , truncation=lowerCamelCase , max_length=lowerCamelCase , return_tensors=lowerCamelCase , return_attention_mask=lowerCamelCase , **lowerCamelCase , )
_UpperCAmelCase = titles if not isinstance(lowerCamelCase , lowerCamelCase ) else [titles]
_UpperCAmelCase = texts if not isinstance(lowerCamelCase , lowerCamelCase ) else [texts]
_UpperCAmelCase = len(lowerCamelCase )
_UpperCAmelCase = questions if not isinstance(lowerCamelCase , lowerCamelCase ) else [questions] * n_passages
if len(lowerCamelCase ) != len(lowerCamelCase ):
raise ValueError(
f"""There should be as many titles than texts but got {len(lowerCamelCase )} titles and {len(lowerCamelCase )} texts.""" )
_UpperCAmelCase = super().__call__(lowerCamelCase , lowerCamelCase , padding=lowerCamelCase , truncation=lowerCamelCase )["""input_ids"""]
_UpperCAmelCase = super().__call__(lowerCamelCase , add_special_tokens=lowerCamelCase , padding=lowerCamelCase , truncation=lowerCamelCase )["""input_ids"""]
_UpperCAmelCase = {
"""input_ids""": [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(lowerCamelCase , lowerCamelCase )
]
}
if return_attention_mask is not False:
_UpperCAmelCase = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
_UpperCAmelCase = attention_mask
return self.pad(lowerCamelCase , padding=lowerCamelCase , max_length=lowerCamelCase , return_tensors=lowerCamelCase )
def lowerCamelCase ( self : Tuple , lowerCamelCase : BatchEncoding , lowerCamelCase : DPRReaderOutput , lowerCamelCase : int = 16 , lowerCamelCase : int = 64 , lowerCamelCase : int = 4 , ) -> List[DPRSpanPrediction]:
"""simple docstring"""
_UpperCAmelCase = reader_input["""input_ids"""]
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = reader_output[:3]
_UpperCAmelCase = len(lowerCamelCase )
_UpperCAmelCase = sorted(range(lowerCamelCase ) , reverse=lowerCamelCase , key=relevance_logits.__getitem__ )
_UpperCAmelCase = []
for doc_id in sorted_docs:
_UpperCAmelCase = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
_UpperCAmelCase = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
_UpperCAmelCase = sequence_ids.index(self.pad_token_id )
else:
_UpperCAmelCase = len(lowerCamelCase )
_UpperCAmelCase = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=lowerCamelCase , top_spans=lowerCamelCase , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=lowerCamelCase , start_index=lowerCamelCase , end_index=lowerCamelCase , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(lowerCamelCase ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def lowerCamelCase ( self : List[Any] , lowerCamelCase : List[int] , lowerCamelCase : List[int] , lowerCamelCase : int , lowerCamelCase : int , ) -> List[DPRSpanPrediction]:
"""simple docstring"""
_UpperCAmelCase = []
for start_index, start_score in enumerate(lowerCamelCase ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
_UpperCAmelCase = sorted(lowerCamelCase , key=lambda lowerCamelCase : x[1] , reverse=lowerCamelCase )
_UpperCAmelCase = []
for (start_index, end_index), score in scores:
if start_index > end_index:
raise ValueError(f"""Wrong span indices: [{start_index}:{end_index}]""" )
_UpperCAmelCase = end_index - start_index + 1
if length > max_answer_length:
raise ValueError(f"""Span is too long: {length} > {max_answer_length}""" )
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(lowerCamelCase ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(UpperCAmelCase )
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase , UpperCAmelCase ):
'''simple docstring'''
_lowerCamelCase = VOCAB_FILES_NAMES
_lowerCamelCase = READER_PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCamelCase = READER_PRETRAINED_INIT_CONFIGURATION
_lowerCamelCase = ['''input_ids''', '''attention_mask''']
| 108
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A : Optional[Any] = logging.get_logger(__name__)
__A : Optional[int] = {
'facebook/nllb-moe-54B': 'https://huggingface.co/facebook/nllb-moe-54b/resolve/main/config.json',
}
class _SCREAMING_SNAKE_CASE ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ = "nllb-moe"
lowerCamelCase__ = ["past_key_values"]
lowerCamelCase__ = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self : Dict , __lowerCamelCase : Dict=128112 , __lowerCamelCase : Optional[Any]=1024 , __lowerCamelCase : Tuple=12 , __lowerCamelCase : Optional[Any]=4096 , __lowerCamelCase : List[Any]=16 , __lowerCamelCase : Optional[int]=12 , __lowerCamelCase : Union[str, Any]=4096 , __lowerCamelCase : Optional[Any]=16 , __lowerCamelCase : str=0.05 , __lowerCamelCase : Union[str, Any]=0.05 , __lowerCamelCase : Tuple=True , __lowerCamelCase : List[str]=True , __lowerCamelCase : Optional[int]="relu" , __lowerCamelCase : int=1024 , __lowerCamelCase : Tuple=0.1 , __lowerCamelCase : int=0.1 , __lowerCamelCase : Dict=0.0 , __lowerCamelCase : str=0.02 , __lowerCamelCase : Optional[Any]=2 , __lowerCamelCase : List[Any]=True , __lowerCamelCase : Optional[Any]=False , __lowerCamelCase : Tuple="float32" , __lowerCamelCase : List[str]=False , __lowerCamelCase : Union[str, Any]=128 , __lowerCamelCase : Optional[int]=64 , __lowerCamelCase : Optional[Any]=4 , __lowerCamelCase : Union[str, Any]=4 , __lowerCamelCase : Dict=0.001 , __lowerCamelCase : Dict=0.001 , __lowerCamelCase : List[Any]="all" , __lowerCamelCase : Optional[int]=False , __lowerCamelCase : List[Any]=False , __lowerCamelCase : Union[str, Any]=1.0 , __lowerCamelCase : Tuple=0.2 , __lowerCamelCase : str=1 , __lowerCamelCase : str=0 , __lowerCamelCase : List[Any]=2 , __lowerCamelCase : Optional[Any]=False , **__lowerCamelCase : Union[str, Any] , ):
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = max_position_embeddings
SCREAMING_SNAKE_CASE = d_model
SCREAMING_SNAKE_CASE = encoder_ffn_dim
SCREAMING_SNAKE_CASE = encoder_layers
SCREAMING_SNAKE_CASE = encoder_attention_heads
SCREAMING_SNAKE_CASE = decoder_ffn_dim
SCREAMING_SNAKE_CASE = decoder_layers
SCREAMING_SNAKE_CASE = decoder_attention_heads
SCREAMING_SNAKE_CASE = dropout
SCREAMING_SNAKE_CASE = attention_dropout
SCREAMING_SNAKE_CASE = activation_dropout
SCREAMING_SNAKE_CASE = activation_function
SCREAMING_SNAKE_CASE = init_std
SCREAMING_SNAKE_CASE = encoder_layerdrop
SCREAMING_SNAKE_CASE = decoder_layerdrop
SCREAMING_SNAKE_CASE = use_cache
SCREAMING_SNAKE_CASE = encoder_layers
SCREAMING_SNAKE_CASE = scale_embedding # scale factor will be sqrt(d_model) if True
SCREAMING_SNAKE_CASE = router_z_loss_coef
SCREAMING_SNAKE_CASE = router_aux_loss_coef
SCREAMING_SNAKE_CASE = decoder_sparse_step
SCREAMING_SNAKE_CASE = encoder_sparse_step
SCREAMING_SNAKE_CASE = num_experts
SCREAMING_SNAKE_CASE = expert_capacity
SCREAMING_SNAKE_CASE = router_bias
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(f"`router_dtype` must be one of 'float32', 'float16' or 'bfloat16', got {router_dtype}" )
SCREAMING_SNAKE_CASE = router_dtype
SCREAMING_SNAKE_CASE = router_ignore_padding_tokens
SCREAMING_SNAKE_CASE = batch_prioritized_routing
SCREAMING_SNAKE_CASE = second_expert_policy
SCREAMING_SNAKE_CASE = normalize_router_prob_before_dropping
SCREAMING_SNAKE_CASE = moe_eval_capacity_token_fraction
SCREAMING_SNAKE_CASE = moe_token_dropout
SCREAMING_SNAKE_CASE = output_router_logits
super().__init__(
pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , is_encoder_decoder=__lowerCamelCase , decoder_start_token_id=__lowerCamelCase , **__lowerCamelCase , )
| 698
|
import unittest
from transformers import MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, AutoTokenizer, is_vision_available
from transformers.pipelines import pipeline
from transformers.pipelines.document_question_answering import apply_tesseract
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_detectrona,
require_pytesseract,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
from transformers.image_utils import load_image
else:
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
@staticmethod
def _snake_case ( *__lowerCamelCase : Optional[Any] , **__lowerCamelCase : Union[str, Any] ):
pass
def __a ( A__ : str ):
return None
# This is a pinned image from a specific revision of a document question answering space, hosted by HuggingFace,
# so we can expect it to be available.
__A : Tuple = (
'https://huggingface.co/spaces/impira/docquery/resolve/2f6c96314dc84dfda62d40de9da55f2f5165d403/invoice.png'
)
@is_pipeline_test
@require_torch
@require_vision
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING
@require_pytesseract
@require_vision
def _snake_case ( self : Optional[Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Dict , __lowerCamelCase : Optional[Any] ):
SCREAMING_SNAKE_CASE = pipeline(
"document-question-answering" , model=__lowerCamelCase , tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
SCREAMING_SNAKE_CASE = INVOICE_URL
SCREAMING_SNAKE_CASE = list(zip(*apply_tesseract(load_image(__lowerCamelCase ) , __lowerCamelCase , "" ) ) )
SCREAMING_SNAKE_CASE = "What is the placebo?"
SCREAMING_SNAKE_CASE = [
{
"image": load_image(__lowerCamelCase ),
"question": question,
},
{
"image": image,
"question": question,
},
{
"image": image,
"question": question,
"word_boxes": word_boxes,
},
]
return dqa_pipeline, examples
def _snake_case ( self : List[Any] , __lowerCamelCase : int , __lowerCamelCase : int ):
SCREAMING_SNAKE_CASE = dqa_pipeline(__lowerCamelCase , top_k=2 )
self.assertEqual(
__lowerCamelCase , [
[
{"score": ANY(__lowerCamelCase ), "answer": ANY(__lowerCamelCase ), "start": ANY(__lowerCamelCase ), "end": ANY(__lowerCamelCase )},
{"score": ANY(__lowerCamelCase ), "answer": ANY(__lowerCamelCase ), "start": ANY(__lowerCamelCase ), "end": ANY(__lowerCamelCase )},
]
]
* 3 , )
@require_torch
@require_detectrona
@require_pytesseract
def _snake_case ( self : Optional[int] ):
SCREAMING_SNAKE_CASE = pipeline("document-question-answering" , model="hf-internal-testing/tiny-random-layoutlmv2" )
SCREAMING_SNAKE_CASE = INVOICE_URL
SCREAMING_SNAKE_CASE = "How many cats are there?"
SCREAMING_SNAKE_CASE = [
{"score": 0.0_001, "answer": "oy 2312/2019", "start": 38, "end": 39},
{"score": 0.0_001, "answer": "oy 2312/2019 DUE", "start": 38, "end": 40},
]
SCREAMING_SNAKE_CASE = dqa_pipeline(image=__lowerCamelCase , question=__lowerCamelCase , top_k=2 )
self.assertEqual(nested_simplify(__lowerCamelCase , decimals=4 ) , __lowerCamelCase )
SCREAMING_SNAKE_CASE = dqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(nested_simplify(__lowerCamelCase , decimals=4 ) , __lowerCamelCase )
# This image does not detect ANY text in it, meaning layoutlmv2 should fail.
# Empty answer probably
SCREAMING_SNAKE_CASE = "./tests/fixtures/tests_samples/COCO/000000039769.png"
SCREAMING_SNAKE_CASE = dqa_pipeline(image=__lowerCamelCase , question=__lowerCamelCase , top_k=2 )
self.assertEqual(__lowerCamelCase , [] )
# We can optionnally pass directly the words and bounding boxes
SCREAMING_SNAKE_CASE = "./tests/fixtures/tests_samples/COCO/000000039769.png"
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = dqa_pipeline(image=__lowerCamelCase , question=__lowerCamelCase , words=__lowerCamelCase , boxes=__lowerCamelCase , top_k=2 )
self.assertEqual(__lowerCamelCase , [] )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def _snake_case ( self : List[str] ):
SCREAMING_SNAKE_CASE = pipeline(
"document-question-answering" , model="tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa" , revision="9977165" , )
SCREAMING_SNAKE_CASE = INVOICE_URL
SCREAMING_SNAKE_CASE = "What is the invoice number?"
SCREAMING_SNAKE_CASE = dqa_pipeline(image=__lowerCamelCase , question=__lowerCamelCase , top_k=2 )
self.assertEqual(
nested_simplify(__lowerCamelCase , decimals=4 ) , [
{"score": 0.9_944, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0_009, "answer": "us-001", "start": 16, "end": 16},
] , )
SCREAMING_SNAKE_CASE = dqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(__lowerCamelCase , decimals=4 ) , [
{"score": 0.9_944, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0_009, "answer": "us-001", "start": 16, "end": 16},
] , )
SCREAMING_SNAKE_CASE = dqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 )
self.assertEqual(
nested_simplify(__lowerCamelCase , decimals=4 ) , [
[
{"score": 0.9_944, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0_009, "answer": "us-001", "start": 16, "end": 16},
],
]
* 2 , )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def _snake_case ( self : str ):
SCREAMING_SNAKE_CASE = pipeline(
"document-question-answering" , model="tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa" , revision="9977165" , max_seq_len=50 , )
SCREAMING_SNAKE_CASE = INVOICE_URL
SCREAMING_SNAKE_CASE = "What is the invoice number?"
SCREAMING_SNAKE_CASE = dqa_pipeline(image=__lowerCamelCase , question=__lowerCamelCase , top_k=2 )
self.assertEqual(
nested_simplify(__lowerCamelCase , decimals=4 ) , [
{"score": 0.9_974, "answer": "1110212019", "start": 23, "end": 23},
{"score": 0.9_948, "answer": "us-001", "start": 16, "end": 16},
] , )
SCREAMING_SNAKE_CASE = dqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(__lowerCamelCase , decimals=4 ) , [
{"score": 0.9_974, "answer": "1110212019", "start": 23, "end": 23},
{"score": 0.9_948, "answer": "us-001", "start": 16, "end": 16},
] , )
SCREAMING_SNAKE_CASE = dqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 )
self.assertEqual(
nested_simplify(__lowerCamelCase , decimals=4 ) , [
[
{"score": 0.9_974, "answer": "1110212019", "start": 23, "end": 23},
{"score": 0.9_948, "answer": "us-001", "start": 16, "end": 16},
]
]
* 2 , )
@slow
@require_torch
@require_pytesseract
@require_vision
def _snake_case ( self : str ):
SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(
"impira/layoutlm-document-qa" , revision="3dc6de3" , add_prefix_space=__lowerCamelCase )
SCREAMING_SNAKE_CASE = pipeline(
"document-question-answering" , model="impira/layoutlm-document-qa" , tokenizer=__lowerCamelCase , revision="3dc6de3" , )
SCREAMING_SNAKE_CASE = INVOICE_URL
SCREAMING_SNAKE_CASE = "What is the invoice number?"
SCREAMING_SNAKE_CASE = dqa_pipeline(image=__lowerCamelCase , question=__lowerCamelCase , top_k=2 )
self.assertEqual(
nested_simplify(__lowerCamelCase , decimals=4 ) , [
{"score": 0.4_251, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0_819, "answer": "1110212019", "start": 23, "end": 23},
] , )
SCREAMING_SNAKE_CASE = dqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(__lowerCamelCase , decimals=4 ) , [
{"score": 0.4_251, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0_819, "answer": "1110212019", "start": 23, "end": 23},
] , )
SCREAMING_SNAKE_CASE = dqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 )
self.assertEqual(
nested_simplify(__lowerCamelCase , decimals=4 ) , [
[
{"score": 0.4_251, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0_819, "answer": "1110212019", "start": 23, "end": 23},
]
]
* 2 , )
SCREAMING_SNAKE_CASE = list(zip(*apply_tesseract(load_image(__lowerCamelCase ) , __lowerCamelCase , "" ) ) )
# This model should also work if `image` is set to None
SCREAMING_SNAKE_CASE = dqa_pipeline({"image": None, "word_boxes": word_boxes, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(__lowerCamelCase , decimals=4 ) , [
{"score": 0.4_251, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0_819, "answer": "1110212019", "start": 23, "end": 23},
] , )
@slow
@require_torch
@require_pytesseract
@require_vision
def _snake_case ( self : Dict ):
SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(
"impira/layoutlm-document-qa" , revision="3dc6de3" , add_prefix_space=__lowerCamelCase )
SCREAMING_SNAKE_CASE = pipeline(
"document-question-answering" , model="impira/layoutlm-document-qa" , tokenizer=__lowerCamelCase , revision="3dc6de3" , max_seq_len=50 , )
SCREAMING_SNAKE_CASE = INVOICE_URL
SCREAMING_SNAKE_CASE = "What is the invoice number?"
SCREAMING_SNAKE_CASE = dqa_pipeline(image=__lowerCamelCase , question=__lowerCamelCase , top_k=2 )
self.assertEqual(
nested_simplify(__lowerCamelCase , decimals=4 ) , [
{"score": 0.9_999, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.9_998, "answer": "us-001", "start": 16, "end": 16},
] , )
SCREAMING_SNAKE_CASE = dqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 )
self.assertEqual(
nested_simplify(__lowerCamelCase , decimals=4 ) , [
[
{"score": 0.9_999, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.9_998, "answer": "us-001", "start": 16, "end": 16},
]
]
* 2 , )
SCREAMING_SNAKE_CASE = list(zip(*apply_tesseract(load_image(__lowerCamelCase ) , __lowerCamelCase , "" ) ) )
# This model should also work if `image` is set to None
SCREAMING_SNAKE_CASE = dqa_pipeline({"image": None, "word_boxes": word_boxes, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(__lowerCamelCase , decimals=4 ) , [
{"score": 0.9_999, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.9_998, "answer": "us-001", "start": 16, "end": 16},
] , )
@slow
@require_torch
def _snake_case ( self : List[str] ):
SCREAMING_SNAKE_CASE = pipeline(
"document-question-answering" , model="naver-clova-ix/donut-base-finetuned-docvqa" , tokenizer=AutoTokenizer.from_pretrained("naver-clova-ix/donut-base-finetuned-docvqa" ) , feature_extractor="naver-clova-ix/donut-base-finetuned-docvqa" , )
SCREAMING_SNAKE_CASE = INVOICE_URL
SCREAMING_SNAKE_CASE = "What is the invoice number?"
SCREAMING_SNAKE_CASE = dqa_pipeline(image=__lowerCamelCase , question=__lowerCamelCase , top_k=2 )
self.assertEqual(nested_simplify(__lowerCamelCase , decimals=4 ) , [{"answer": "us-001"}] )
@require_tf
@unittest.skip("Document question answering not implemented in TF" )
def _snake_case ( self : List[Any] ):
pass
| 698
| 1
|
import copy
import unittest
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
MODEL_FOR_QUESTION_ANSWERING_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
)
from transformers.models.layoutlmva.modeling_layoutlmva import LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class lowerCAmelCase_ :
def __init__( self , _lowerCAmelCase , _lowerCAmelCase=2 , _lowerCAmelCase=3 , _lowerCAmelCase=4 , _lowerCAmelCase=2 , _lowerCAmelCase=7 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=9_9 , _lowerCAmelCase=3_6 , _lowerCAmelCase=3 , _lowerCAmelCase=4 , _lowerCAmelCase=3_7 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=5_1_2 , _lowerCAmelCase=1_6 , _lowerCAmelCase=2 , _lowerCAmelCase=0.02 , _lowerCAmelCase=6 , _lowerCAmelCase=6 , _lowerCAmelCase=3 , _lowerCAmelCase=4 , _lowerCAmelCase=None , _lowerCAmelCase=1_0_0_0 , ):
_lowercase : Optional[Any] = parent
_lowercase : Optional[int] = batch_size
_lowercase : List[str] = num_channels
_lowercase : Tuple = image_size
_lowercase : str = patch_size
_lowercase : Union[str, Any] = text_seq_length
_lowercase : List[str] = is_training
_lowercase : Optional[int] = use_input_mask
_lowercase : Dict = use_token_type_ids
_lowercase : Any = use_labels
_lowercase : Optional[int] = vocab_size
_lowercase : int = hidden_size
_lowercase : Dict = num_hidden_layers
_lowercase : Optional[Any] = num_attention_heads
_lowercase : List[Any] = intermediate_size
_lowercase : Optional[int] = hidden_act
_lowercase : Dict = hidden_dropout_prob
_lowercase : List[str] = attention_probs_dropout_prob
_lowercase : Union[str, Any] = max_position_embeddings
_lowercase : Optional[Any] = type_vocab_size
_lowercase : List[str] = type_sequence_label_size
_lowercase : Union[str, Any] = initializer_range
_lowercase : Optional[Any] = coordinate_size
_lowercase : Tuple = shape_size
_lowercase : Any = num_labels
_lowercase : List[str] = num_choices
_lowercase : Optional[Any] = scope
_lowercase : str = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
_lowercase : Optional[Any] = text_seq_length
_lowercase : Dict = (image_size // patch_size) ** 2 + 1
_lowercase : Union[str, Any] = self.text_seq_length + self.image_seq_length
def __a ( self ):
_lowercase : str = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
_lowercase : Dict = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
_lowercase : Optional[int] = bbox[i, j, 3]
_lowercase : Optional[int] = bbox[i, j, 1]
_lowercase : str = t
if bbox[i, j, 2] < bbox[i, j, 0]:
_lowercase : Dict = bbox[i, j, 2]
_lowercase : Any = bbox[i, j, 0]
_lowercase : int = t
_lowercase : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowercase : int = None
if self.use_input_mask:
_lowercase : Union[str, Any] = random_attention_mask([self.batch_size, self.text_seq_length] )
_lowercase : int = None
if self.use_token_type_ids:
_lowercase : List[str] = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
_lowercase : List[Any] = None
_lowercase : Union[str, Any] = None
if self.use_labels:
_lowercase : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowercase : Dict = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
_lowercase : List[Any] = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowercase : Optional[int] = LayoutLMvaModel(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
# text + image
_lowercase : str = model(_lowerCAmelCase , pixel_values=_lowerCAmelCase )
_lowercase : Optional[Any] = model(
_lowerCAmelCase , bbox=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase )
_lowercase : List[Any] = model(_lowerCAmelCase , bbox=_lowerCAmelCase , pixel_values=_lowerCAmelCase , token_type_ids=_lowerCAmelCase )
_lowercase : List[str] = model(_lowerCAmelCase , bbox=_lowerCAmelCase , pixel_values=_lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
_lowercase : List[str] = model(_lowerCAmelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
_lowercase : str = model(pixel_values=_lowerCAmelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowercase : Any = self.num_labels
_lowercase : str = LayoutLMvaForSequenceClassification(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
_lowercase : Any = model(
_lowerCAmelCase , bbox=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowercase : List[Any] = self.num_labels
_lowercase : Optional[Any] = LayoutLMvaForTokenClassification(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
_lowercase : Tuple = model(
_lowerCAmelCase , bbox=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowercase : str = LayoutLMvaForQuestionAnswering(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
_lowercase : Union[str, Any] = model(
_lowerCAmelCase , bbox=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , start_positions=_lowerCAmelCase , end_positions=_lowerCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __a ( self ):
_lowercase : List[Any] = self.prepare_config_and_inputs()
(
(
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) ,
) : Union[str, Any] = config_and_inputs
_lowercase : Dict = {
'input_ids': input_ids,
'bbox': bbox,
'pixel_values': pixel_values,
'token_type_ids': token_type_ids,
'attention_mask': input_mask,
}
return config, inputs_dict
@require_torch
class lowerCAmelCase_ ( __snake_case , __snake_case , unittest.TestCase ):
_UpperCamelCase : List[Any] = False
_UpperCamelCase : str = False
_UpperCamelCase : Any = False
_UpperCamelCase : Dict = (
(
LayoutLMvaModel,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaForQuestionAnswering,
)
if is_torch_available()
else ()
)
_UpperCamelCase : Any = (
{"document-question-answering": LayoutLMvaForQuestionAnswering, "feature-extraction": LayoutLMvaModel}
if is_torch_available()
else {}
)
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
# `DocumentQuestionAnsweringPipeline` is expected to work with this model, but it combines the text and visual
# embedding along the sequence dimension (dim 1), which causes an error during post-processing as `p_mask` has
# the sequence dimension of the text embedding only.
# (see the line `embedding_output = torch.cat([embedding_output, visual_embeddings], dim=1)`)
return True
def __a ( self ):
_lowercase : List[Any] = LayoutLMvaModelTester(self )
_lowercase : Optional[int] = ConfigTester(self , config_class=_lowerCAmelCase , hidden_size=3_7 )
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=False ):
_lowercase : Optional[int] = copy.deepcopy(_lowerCAmelCase )
if model_class in get_values(_lowerCAmelCase ):
_lowercase : Tuple = {
k: v.unsqueeze(1 ).expand(-1 , self.model_tester.num_choices , -1 ).contiguous()
if isinstance(_lowerCAmelCase , torch.Tensor ) and v.ndim > 1
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(_lowerCAmelCase ):
_lowercase : Tuple = torch.ones(self.model_tester.batch_size , dtype=torch.long , device=_lowerCAmelCase )
elif model_class in get_values(_lowerCAmelCase ):
_lowercase : str = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_lowerCAmelCase )
_lowercase : Any = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_lowerCAmelCase )
elif model_class in [
*get_values(_lowerCAmelCase ),
]:
_lowercase : Any = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_lowerCAmelCase )
elif model_class in [
*get_values(_lowerCAmelCase ),
]:
_lowercase : List[Any] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=torch.long , device=_lowerCAmelCase , )
return inputs_dict
def __a ( self ):
self.config_tester.run_common_tests()
def __a ( self ):
_lowercase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCAmelCase )
def __a ( self ):
_lowercase : Tuple = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_lowercase : Union[str, Any] = type
self.model_tester.create_and_check_model(*_lowerCAmelCase )
def __a ( self ):
_lowercase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_lowerCAmelCase )
def __a ( self ):
_lowercase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_lowerCAmelCase )
def __a ( self ):
_lowercase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_lowerCAmelCase )
@slow
def __a ( self ):
for model_name in LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowercase : Tuple = LayoutLMvaModel.from_pretrained(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
def __magic_name__ ( ) -> Any:
_lowercase : Any = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
class lowerCAmelCase_ ( unittest.TestCase ):
@cached_property
def __a ( self ):
return LayoutLMvaImageProcessor(apply_ocr=_lowerCAmelCase ) if is_vision_available() else None
@slow
def __a ( self ):
_lowercase : Tuple = LayoutLMvaModel.from_pretrained('microsoft/layoutlmv3-base' ).to(_lowerCAmelCase )
_lowercase : List[Any] = self.default_image_processor
_lowercase : List[str] = prepare_img()
_lowercase : Union[str, Any] = image_processor(images=_lowerCAmelCase , return_tensors='pt' ).pixel_values.to(_lowerCAmelCase )
_lowercase : Tuple = torch.tensor([[1, 2]] )
_lowercase : Any = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]] ).unsqueeze(0 )
# forward pass
_lowercase : int = model(
input_ids=input_ids.to(_lowerCAmelCase ) , bbox=bbox.to(_lowerCAmelCase ) , pixel_values=pixel_values.to(_lowerCAmelCase ) , )
# verify the logits
_lowercase : Union[str, Any] = torch.Size((1, 1_9_9, 7_6_8) )
self.assertEqual(outputs.last_hidden_state.shape , _lowerCAmelCase )
_lowercase : int = torch.tensor(
[[-0.05_29, 0.36_18, 0.16_32], [-0.15_87, -0.16_67, -0.04_00], [-0.15_57, -0.16_71, -0.05_05]] ).to(_lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , _lowerCAmelCase , atol=1E-4 ) )
| 66
|
'''simple docstring'''
def _a ( __lowerCAmelCase : int , __lowerCAmelCase : int ):
"""simple docstring"""
return numa ^ numa < 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 347
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__snake_case = {
'''configuration_llama''': ['''LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LlamaConfig'''],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = ['''LlamaTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = ['''LlamaTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
'''LlamaForCausalLM''',
'''LlamaModel''',
'''LlamaPreTrainedModel''',
'''LlamaForSequenceClassification''',
]
if TYPE_CHECKING:
from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama import LlamaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama_fast import LlamaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel
else:
import sys
__snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 717
|
"""simple docstring"""
def A_ ( _lowerCAmelCase : Dict=2_81_23 ):
"""simple docstring"""
_a = [1] * (limit + 1)
for i in range(2, int(limit**0.5 ) + 1 ):
sum_divs[i * i] += i
for k in range(i + 1, limit // i + 1 ):
sum_divs[k * i] += k + i
_a = set()
_a = 0
for n in range(1, limit + 1 ):
if sum_divs[n] > n:
abundants.add(_lowerCAmelCase )
if not any((n - a in abundants) for a in abundants ):
res += n
return res
if __name__ == "__main__":
print(solution())
| 285
| 0
|
import json
import multiprocessing as mp
import re
from collections import defaultdict
from functools import partial
from typing import Dict, List, Optional, Set, Tuple, Type
from datasets import Dataset
from datasketch import MinHash, MinHashLSH
from dpu_utils.utils.iterators import ThreadedIterator
from tqdm import tqdm
snake_case = re.compile("""[^A-Za-z_0-9]""")
# parameters used in DuplicationIndex
snake_case = 10
snake_case = 256
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
if len(lowercase ) < MIN_NUM_TOKENS:
return None
SCREAMING_SNAKE_CASE : int = MinHash(num_perm=lowercase )
for token in set(lowercase ):
min_hash.update(token.encode() )
return min_hash
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
return {t for t in NON_ALPHA.split(lowercase ) if len(t.strip() ) > 0}
class SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : Optional[Any] , *,
UpperCAmelCase_ : float = 0.85 , ):
SCREAMING_SNAKE_CASE : Tuple = duplication_jaccard_threshold
SCREAMING_SNAKE_CASE : Dict = NUM_PERM
SCREAMING_SNAKE_CASE : Optional[int] = MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm )
SCREAMING_SNAKE_CASE : Optional[Any] = defaultdict(UpperCAmelCase_ )
def _A ( self : Union[str, Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : MinHash ):
SCREAMING_SNAKE_CASE : Optional[Any] = self._index.query(UpperCAmelCase_ )
if code_key in self._index.keys:
print(f'''Duplicate key {code_key}''' )
return
self._index.insert(UpperCAmelCase_ , UpperCAmelCase_ )
if len(UpperCAmelCase_ ) > 0:
for base_duplicate in close_duplicates:
if base_duplicate in self._duplicate_clusters:
self._duplicate_clusters[base_duplicate].add(UpperCAmelCase_ )
break
else:
self._duplicate_clusters[close_duplicates[0]].add(UpperCAmelCase_ )
def _A ( self : Dict ):
SCREAMING_SNAKE_CASE : Any = []
for base, duplicates in self._duplicate_clusters.items():
SCREAMING_SNAKE_CASE : Dict = [base] + list(UpperCAmelCase_ )
# reformat the cluster to be a list of dict
SCREAMING_SNAKE_CASE : Optional[Any] = [{"base_index": el[0], "repo_name": el[1], "path": el[2]} for el in cluster]
duplicate_clusters.append(UpperCAmelCase_ )
return duplicate_clusters
def _A ( self : int , UpperCAmelCase_ : Any ):
SCREAMING_SNAKE_CASE : Tuple = self.get_duplicate_clusters()
with open(UpperCAmelCase_ , "w" ) as f:
json.dump(UpperCAmelCase_ , UpperCAmelCase_ )
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = element
SCREAMING_SNAKE_CASE : str = get_min_hash([t for t in NON_ALPHA.split(data["content"] ) if len(t.strip() ) > 0] )
if min_hash is not None:
return (index, data["repo_name"], data["path"]), min_hash
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
with mp.Pool() as pool:
for data in pool.imap_unordered(
_compute_min_hash , ThreadedIterator(lowercase , max_queue_size=10000 ) , chunksize=100 , ):
if data is not None:
yield data
def lowerCamelCase__ ( lowercase , lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = DuplicationIndex(duplication_jaccard_threshold=lowercase )
for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(lowercase ) ) , max_queue_size=100 ) ):
di.add(lowercase , lowercase )
# Returns a List[Cluster] where Cluster is List[str] with the filenames.
return di.get_duplicate_clusters()
def lowerCamelCase__ ( lowercase , lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = get_tokens(lowercase )
SCREAMING_SNAKE_CASE : int = get_tokens(lowercase )
return len(tokensa & tokensa ) / len(tokensa | tokensa )
snake_case = None
def lowerCamelCase__ ( lowercase , lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = []
for elementa in cluster:
SCREAMING_SNAKE_CASE : str = _shared_dataset[elementa["base_index"]]["content"]
for elementa in extremes:
SCREAMING_SNAKE_CASE : Optional[int] = _shared_dataset[elementa["base_index"]]["content"]
if jaccard_similarity(lowercase , lowercase ) >= jaccard_threshold:
elementa["copies"] += 1
break
else:
SCREAMING_SNAKE_CASE : Any = 1
extremes.append(lowercase )
return extremes
def lowerCamelCase__ ( lowercase , lowercase , lowercase ):
"""simple docstring"""
global _shared_dataset
SCREAMING_SNAKE_CASE : Optional[int] = dataset
SCREAMING_SNAKE_CASE : Tuple = []
SCREAMING_SNAKE_CASE : Any = partial(_find_cluster_extremes_shared , jaccard_threshold=lowercase )
with mp.Pool() as pool:
for extremes in tqdm(
pool.imap_unordered(
lowercase , lowercase , ) , total=len(lowercase ) , ):
extremes_list.append(lowercase )
return extremes_list
def lowerCamelCase__ ( lowercase , lowercase = 0.85 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = make_duplicate_clusters(lowercase , lowercase )
SCREAMING_SNAKE_CASE : List[Any] = {x["base_index"] for cluster in duplicate_clusters for x in cluster}
SCREAMING_SNAKE_CASE : Dict = {}
SCREAMING_SNAKE_CASE : int = find_extremes(lowercase , lowercase , lowercase )
for extremes in extremes_clusters:
for element in extremes:
SCREAMING_SNAKE_CASE : Any = element
SCREAMING_SNAKE_CASE : str = duplicate_indices - set(extreme_dict.keys() )
SCREAMING_SNAKE_CASE : Tuple = dataset.filter(lambda lowercase , lowercase : idx not in remove_indices , with_indices=lowercase )
# update duplicate_clusters
for cluster in duplicate_clusters:
for element in cluster:
SCREAMING_SNAKE_CASE : List[str] = element["base_index"] in extreme_dict
if element["is_extreme"]:
SCREAMING_SNAKE_CASE : Tuple = extreme_dict[element["base_index"]]["copies"]
print(F'''Original dataset size: {len(lowercase )}''' )
print(F'''Number of duplicate clusters: {len(lowercase )}''' )
print(F'''Files in duplicate cluster: {len(lowercase )}''' )
print(F'''Unique files in duplicate cluster: {len(lowercase )}''' )
print(F'''Filtered dataset size: {len(lowercase )}''' )
return ds_filter, duplicate_clusters
| 62
|
import json
import os
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding, EncodedInput
from ...utils import PaddingStrategy, logging
snake_case : Optional[Any] = logging.get_logger(__name__)
snake_case : Any = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt'''}
# See all LED models at https://huggingface.co/models?filter=LED
snake_case : Tuple = {
'''vocab_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json''',
},
'''merges_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json''',
},
}
snake_case : Tuple = {
'''allenai/led-base-16384''': 1_63_84,
}
@lru_cache()
# Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode
def __lowercase ( ):
a__ = (
list(range(ord('!' ) , ord('~' ) + 1 ) ) + list(range(ord('¡' ) , ord('¬' ) + 1 ) ) + list(range(ord('®' ) , ord('ÿ' ) + 1 ) )
)
a__ = bs[:]
a__ = 0
for b in range(2**8 ):
if b not in bs:
bs.append(__lowerCAmelCase )
cs.append(2**8 + n )
n += 1
a__ = [chr(__lowerCAmelCase ) for n in cs]
return dict(zip(__lowerCAmelCase , __lowerCAmelCase ) )
def __lowercase ( __lowerCAmelCase : List[Any] ):
a__ = set()
a__ = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
a__ = char
return pairs
class snake_case_ (lowerCamelCase_ ):
UpperCAmelCase__ : Dict = VOCAB_FILES_NAMES
UpperCAmelCase__ : Dict = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ : Any = ['''input_ids''', '''attention_mask''']
def __init__( self :Dict ,__snake_case :List[Any] ,__snake_case :Optional[Any] ,__snake_case :Tuple="replace" ,__snake_case :Tuple="<s>" ,__snake_case :Tuple="</s>" ,__snake_case :Dict="</s>" ,__snake_case :Any="<s>" ,__snake_case :List[str]="<unk>" ,__snake_case :Tuple="<pad>" ,__snake_case :Dict="<mask>" ,__snake_case :Dict=False ,**__snake_case :Union[str, Any] ,) -> Optional[Any]:
a__ = AddedToken(__snake_case ,lstrip=__snake_case ,rstrip=__snake_case ) if isinstance(__snake_case ,__snake_case ) else bos_token
a__ = AddedToken(__snake_case ,lstrip=__snake_case ,rstrip=__snake_case ) if isinstance(__snake_case ,__snake_case ) else eos_token
a__ = AddedToken(__snake_case ,lstrip=__snake_case ,rstrip=__snake_case ) if isinstance(__snake_case ,__snake_case ) else sep_token
a__ = AddedToken(__snake_case ,lstrip=__snake_case ,rstrip=__snake_case ) if isinstance(__snake_case ,__snake_case ) else cls_token
a__ = AddedToken(__snake_case ,lstrip=__snake_case ,rstrip=__snake_case ) if isinstance(__snake_case ,__snake_case ) else unk_token
a__ = AddedToken(__snake_case ,lstrip=__snake_case ,rstrip=__snake_case ) if isinstance(__snake_case ,__snake_case ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
a__ = AddedToken(__snake_case ,lstrip=__snake_case ,rstrip=__snake_case ) if isinstance(__snake_case ,__snake_case ) else mask_token
super().__init__(
errors=__snake_case ,bos_token=__snake_case ,eos_token=__snake_case ,unk_token=__snake_case ,sep_token=__snake_case ,cls_token=__snake_case ,pad_token=__snake_case ,mask_token=__snake_case ,add_prefix_space=__snake_case ,**__snake_case ,)
with open(__snake_case ,encoding='utf-8' ) as vocab_handle:
a__ = json.load(__snake_case )
a__ = {v: k for k, v in self.encoder.items()}
a__ = errors # how to handle errors in decoding
a__ = bytes_to_unicode()
a__ = {v: k for k, v in self.byte_encoder.items()}
with open(__snake_case ,encoding='utf-8' ) as merges_handle:
a__ = merges_handle.read().split('\n' )[1:-1]
a__ = [tuple(merge.split() ) for merge in bpe_merges]
a__ = dict(zip(__snake_case ,range(len(__snake_case ) ) ) )
a__ = {}
a__ = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
a__ = re.compile(R'\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+' )
@property
# Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size
def lowerCamelCase__( self :Union[str, Any] ) -> List[Any]:
return len(self.encoder )
def lowerCamelCase__( self :List[Any] ) -> str:
return dict(self.encoder ,**self.added_tokens_encoder )
def lowerCamelCase__( self :List[str] ,__snake_case :int ) -> List[Any]:
if token in self.cache:
return self.cache[token]
a__ = tuple(__snake_case )
a__ = get_pairs(__snake_case )
if not pairs:
return token
while True:
a__ = min(__snake_case ,key=lambda __snake_case : self.bpe_ranks.get(__snake_case ,float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
a__ , a__ = bigram
a__ = []
a__ = 0
while i < len(__snake_case ):
try:
a__ = word.index(__snake_case ,__snake_case )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
a__ = j
if word[i] == first and i < len(__snake_case ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
a__ = tuple(__snake_case )
a__ = new_word
if len(__snake_case ) == 1:
break
else:
a__ = get_pairs(__snake_case )
a__ = ' '.join(__snake_case )
a__ = word
return word
def lowerCamelCase__( self :List[Any] ,__snake_case :Tuple ) -> Any:
a__ = []
for token in re.findall(self.pat ,__snake_case ):
a__ = ''.join(
self.byte_encoder[b] for b in token.encode('utf-8' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(__snake_case ).split(' ' ) )
return bpe_tokens
def lowerCamelCase__( self :Union[str, Any] ,__snake_case :Tuple ) -> Tuple:
return self.encoder.get(__snake_case ,self.encoder.get(self.unk_token ) )
def lowerCamelCase__( self :str ,__snake_case :str ) -> Tuple:
return self.decoder.get(__snake_case )
def lowerCamelCase__( self :int ,__snake_case :Optional[int] ) -> Any:
a__ = ''.join(__snake_case )
a__ = bytearray([self.byte_decoder[c] for c in text] ).decode('utf-8' ,errors=self.errors )
return text
def lowerCamelCase__( self :Union[str, Any] ,__snake_case :str ,__snake_case :Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(__snake_case ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
a__ = os.path.join(
__snake_case ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
a__ = os.path.join(
__snake_case ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
with open(__snake_case ,'w' ,encoding='utf-8' ) as f:
f.write(json.dumps(self.encoder ,indent=2 ,sort_keys=__snake_case ,ensure_ascii=__snake_case ) + '\n' )
a__ = 0
with open(__snake_case ,'w' ,encoding='utf-8' ) as writer:
writer.write('#version: 0.2\n' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() ,key=lambda __snake_case : kv[1] ):
if index != token_index:
logger.warning(
F'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'
' Please check that the tokenizer is not corrupted!' )
a__ = token_index
writer.write(' '.join(__snake_case ) + '\n' )
index += 1
return vocab_file, merge_file
def lowerCamelCase__( self :Union[str, Any] ,__snake_case :List[int] ,__snake_case :Optional[List[int]] = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
a__ = [self.cls_token_id]
a__ = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowerCamelCase__( self :Tuple ,__snake_case :List[int] ,__snake_case :Optional[List[int]] = None ,__snake_case :bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__snake_case ,token_ids_a=__snake_case ,already_has_special_tokens=__snake_case )
if token_ids_a is None:
return [1] + ([0] * len(__snake_case )) + [1]
return [1] + ([0] * len(__snake_case )) + [1, 1] + ([0] * len(__snake_case )) + [1]
def lowerCamelCase__( self :int ,__snake_case :List[int] ,__snake_case :Optional[List[int]] = None ) -> List[int]:
a__ = [self.sep_token_id]
a__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCamelCase__( self :List[Any] ,__snake_case :List[str] ,__snake_case :Dict=False ,**__snake_case :Optional[Any] ) -> Tuple:
a__ = kwargs.pop('add_prefix_space' ,self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(__snake_case ) > 0 and not text[0].isspace()):
a__ = ' ' + text
return (text, kwargs)
def lowerCamelCase__( self :Optional[Any] ,__snake_case :Union[Dict[str, EncodedInput], BatchEncoding] ,__snake_case :Optional[int] = None ,__snake_case :PaddingStrategy = PaddingStrategy.DO_NOT_PAD ,__snake_case :Optional[int] = None ,__snake_case :Optional[bool] = None ,) -> dict:
a__ = super()._pad(
encoded_inputs=__snake_case ,max_length=__snake_case ,padding_strategy=__snake_case ,pad_to_multiple_of=__snake_case ,return_attention_mask=__snake_case ,)
# Load from model defaults
if return_attention_mask is None:
a__ = 'attention_mask' in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
a__ = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
a__ = len(encoded_inputs['global_attention_mask'] ) != len(__snake_case )
if needs_to_be_padded:
a__ = len(__snake_case ) - len(encoded_inputs['global_attention_mask'] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
a__ = (
encoded_inputs['global_attention_mask'] + [-1] * difference
)
elif self.padding_side == "left":
a__ = [-1] * difference + encoded_inputs[
'global_attention_mask'
]
else:
raise ValueError('Invalid padding strategy:' + str(self.padding_side ) )
return encoded_inputs
| 335
| 0
|
'''simple docstring'''
from collections import defaultdict
from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst
def SCREAMING_SNAKE_CASE ( ):
__a , __a = 9, 14 # noqa: F841
__a = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
__a = defaultdict(a_ )
for nodea, nodea, cost in edges:
adjancency[nodea].append([nodea, cost] )
adjancency[nodea].append([nodea, cost] )
__a = mst(a_ )
__a = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
for answer in expected:
__a = tuple(answer[:2] )
__a = tuple(edge[::-1] )
assert edge in result or reverse in result
| 718
|
'''simple docstring'''
import os
from datetime import datetime as dt
from github import Github
UpperCAmelCase_ = [
"good first issue",
"good second issue",
"good difficult issue",
"enhancement",
"new pipeline/model",
"new scheduler",
"wip",
]
def SCREAMING_SNAKE_CASE ( ):
__a = Github(os.environ['GITHUB_TOKEN'] )
__a = g.get_repo('huggingface/diffusers' )
__a = repo.get_issues(state='open' )
for issue in open_issues:
__a = sorted(issue.get_comments() , key=lambda a_ : i.created_at , reverse=a_ )
__a = comments[0] if len(a_ ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Closes the issue after 7 days of inactivity since the Stalebot notification.
issue.edit(state='closed' )
elif (
"stale" in issue.get_labels()
and last_comment is not None
and last_comment.user.login != "github-actions[bot]"
):
# Opens the issue if someone other than Stalebot commented.
issue.edit(state='open' )
issue.remove_from_labels('stale' )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Post a Stalebot notification after 23 days of inactivity.
issue.create_comment(
'This issue has been automatically marked as stale because it has not had '
'recent activity. If you think this still needs to be addressed '
'please comment on this thread.\n\nPlease note that issues that do not follow the '
'[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) '
'are likely to be ignored.' )
issue.add_to_labels('stale' )
if __name__ == "__main__":
main()
| 490
| 0
|
"""simple docstring"""
import os
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers.models.realm.configuration_realm import RealmConfig
from transformers.models.realm.retrieval_realm import _REALM_BLOCK_RECORDS_FILENAME, RealmRetriever
from transformers.models.realm.tokenization_realm import VOCAB_FILES_NAMES, RealmTokenizer
class lowercase__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def _a ( self ):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = tempfile.mkdtemp()
UpperCamelCase : str = 5
# Realm tok
UpperCamelCase : Dict = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""[PAD]""",
"""[MASK]""",
"""test""",
"""question""",
"""this""",
"""is""",
"""the""",
"""first""",
"""second""",
"""third""",
"""fourth""",
"""fifth""",
"""record""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
UpperCamelCase : str = os.path.join(self.tmpdirname , """realm_tokenizer""" )
os.makedirs(_A , exist_ok=_A )
UpperCamelCase : Optional[Any] = os.path.join(_A , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
UpperCamelCase : int = os.path.join(self.tmpdirname , """realm_block_records""" )
os.makedirs(_A , exist_ok=_A )
def _a ( self ):
'''simple docstring'''
return RealmTokenizer.from_pretrained(os.path.join(self.tmpdirname , """realm_tokenizer""" ) )
def _a ( self ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def _a ( self ):
'''simple docstring'''
UpperCamelCase : Dict = RealmConfig(num_block_records=self.num_block_records )
return config
def _a ( self ):
'''simple docstring'''
UpperCamelCase : List[str] = Dataset.from_dict(
{
"""id""": ["""0""", """1"""],
"""question""": ["""foo""", """bar"""],
"""answers""": [["""Foo""", """Bar"""], ["""Bar"""]],
} )
return dataset
def _a ( self ):
'''simple docstring'''
UpperCamelCase : Tuple = np.array(
[
b"""This is the first record""",
b"""This is the second record""",
b"""This is the third record""",
b"""This is the fourth record""",
b"""This is the fifth record""",
b"""This is a longer longer longer record""",
] , dtype=_A , )
return block_records
def _a ( self ):
'''simple docstring'''
UpperCamelCase : List[str] = RealmRetriever(
block_records=self.get_dummy_block_records() , tokenizer=self.get_tokenizer() , )
return retriever
def _a ( self ):
'''simple docstring'''
UpperCamelCase : int = self.get_config()
UpperCamelCase : Optional[int] = self.get_dummy_retriever()
UpperCamelCase : Any = retriever.tokenizer
UpperCamelCase : Any = np.array([0, 3] , dtype="""long""" )
UpperCamelCase : Optional[Any] = tokenizer(["""Test question"""] ).input_ids
UpperCamelCase : Tuple = tokenizer(
["""the fourth"""] , add_special_tokens=_A , return_token_type_ids=_A , return_attention_mask=_A , ).input_ids
UpperCamelCase : str = config.reader_seq_len
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase : Any = retriever(
_A , _A , answer_ids=_A , max_length=_A , return_tensors="""np""" )
self.assertEqual(len(_A ) , 2 )
self.assertEqual(len(_A ) , 2 )
self.assertEqual(len(_A ) , 2 )
self.assertEqual(concat_inputs.input_ids.shape , (2, 1_0) )
self.assertEqual(concat_inputs.attention_mask.shape , (2, 1_0) )
self.assertEqual(concat_inputs.token_type_ids.shape , (2, 1_0) )
self.assertEqual(concat_inputs.special_tokens_mask.shape , (2, 1_0) )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[0] ) , ["""[CLS]""", """test""", """question""", """[SEP]""", """this""", """is""", """the""", """first""", """record""", """[SEP]"""] , )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[1] ) , ["""[CLS]""", """test""", """question""", """[SEP]""", """this""", """is""", """the""", """fourth""", """record""", """[SEP]"""] , )
def _a ( self ):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = self.get_config()
UpperCamelCase : List[str] = self.get_dummy_retriever()
UpperCamelCase : List[Any] = retriever.tokenizer
UpperCamelCase : Optional[int] = np.array([0, 3, 5] , dtype="""long""" )
UpperCamelCase : Any = tokenizer(["""Test question"""] ).input_ids
UpperCamelCase : str = tokenizer(
["""the fourth""", """longer longer"""] , add_special_tokens=_A , return_token_type_ids=_A , return_attention_mask=_A , ).input_ids
UpperCamelCase : Dict = config.reader_seq_len
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase : Optional[int] = retriever(
_A , _A , answer_ids=_A , max_length=_A , return_tensors="""np""" )
self.assertEqual([False, True, True] , _A )
self.assertEqual([[-1, -1, -1], [6, -1, -1], [6, 7, 8]] , _A )
self.assertEqual([[-1, -1, -1], [7, -1, -1], [7, 8, 9]] , _A )
def _a ( self ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = self.get_dummy_retriever()
retriever.save_pretrained(os.path.join(self.tmpdirname , """realm_block_records""" ) )
# Test local path
UpperCamelCase : Optional[int] = retriever.from_pretrained(os.path.join(self.tmpdirname , """realm_block_records""" ) )
self.assertEqual(retriever.block_records[0] , b"""This is the first record""" )
# Test mocked remote path
with patch("""transformers.models.realm.retrieval_realm.hf_hub_download""" ) as mock_hf_hub_download:
UpperCamelCase : Optional[Any] = os.path.join(
os.path.join(self.tmpdirname , """realm_block_records""" ) , _REALM_BLOCK_RECORDS_FILENAME )
UpperCamelCase : str = RealmRetriever.from_pretrained("""google/realm-cc-news-pretrained-openqa""" )
self.assertEqual(retriever.block_records[0] , b"""This is the first record""" )
| 102
|
"""simple docstring"""
import time
from contextlib import contextmanager
from pathlib import Path
import pytest
import requests
from huggingface_hub.hf_api import HfApi, HfFolder
A : Tuple = '__DUMMY_TRANSFORMERS_USER__'
A : List[str] = 'Dummy User'
A : Dict = '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'
A : Dict = 'https://hub-ci.huggingface.co'
A : Optional[Any] = CI_HUB_ENDPOINT + '/datasets/{repo_id}/resolve/{revision}/{path}'
A : Dict = CI_HUB_ENDPOINT + '/{repo_id}/resolve/{revision}/{filename}'
A : str = Path('~/.huggingface/hub_ci_token').expanduser()
@pytest.fixture
def snake_case__ ( _snake_case : Union[str, Any] ):
"""simple docstring"""
monkeypatch.setattr(
"huggingface_hub.file_download.HUGGINGFACE_CO_URL_TEMPLATE" , _snake_case )
@pytest.fixture
def snake_case__ ( _snake_case : int ):
"""simple docstring"""
monkeypatch.setattr("datasets.config.HF_ENDPOINT" , _snake_case )
monkeypatch.setattr("datasets.config.HUB_DATASETS_URL" , _snake_case )
@pytest.fixture
def snake_case__ ( _snake_case : Union[str, Any] ):
"""simple docstring"""
monkeypatch.setattr("huggingface_hub.hf_api.HfFolder.path_token" , _snake_case )
@pytest.fixture
def snake_case__ ( _snake_case : Union[str, Any] , _snake_case : Optional[int] ):
"""simple docstring"""
HfFolder.save_token(_snake_case )
yield
HfFolder.delete_token()
@pytest.fixture(scope="session" )
def snake_case__ ( ):
"""simple docstring"""
return HfApi(endpoint=_snake_case )
@pytest.fixture(scope="session" )
def snake_case__ ( _snake_case : HfApi ):
"""simple docstring"""
UpperCamelCase__ = HfFolder.get_token()
HfFolder.save_token(_snake_case )
yield CI_HUB_USER_TOKEN
if previous_token is not None:
HfFolder.save_token(_snake_case )
@pytest.fixture
def snake_case__ ( _snake_case : Any ):
"""simple docstring"""
def _cleanup_repo(_snake_case : Dict ):
hf_api.delete_repo(_snake_case , token=_snake_case , repo_type="dataset" )
return _cleanup_repo
@pytest.fixture
def snake_case__ ( _snake_case : int ):
"""simple docstring"""
@contextmanager
def _temporary_repo(_snake_case : List[str] ):
try:
yield repo_id
finally:
cleanup_repo(_snake_case )
return _temporary_repo
@pytest.fixture(scope="session" )
def snake_case__ ( _snake_case : HfApi , _snake_case : Tuple , _snake_case : Any ):
"""simple docstring"""
UpperCamelCase__ = F'repo_txt_data-{int(time.time() * 10E3 )}'
UpperCamelCase__ = F'{CI_HUB_USER}/{repo_name}'
hf_api.create_repo(_snake_case , token=_snake_case , repo_type="dataset" , private=_snake_case )
hf_api.upload_file(
token=_snake_case , path_or_fileobj=str(_snake_case ) , path_in_repo="data/text_data.txt" , repo_id=_snake_case , repo_type="dataset" , )
yield repo_id
try:
hf_api.delete_repo(_snake_case , token=_snake_case , repo_type="dataset" )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def snake_case__ ( _snake_case : int , _snake_case : Any , _snake_case : str ):
"""simple docstring"""
return hf_private_dataset_repo_txt_data_
@pytest.fixture(scope="session" )
def snake_case__ ( _snake_case : HfApi , _snake_case : Optional[int] , _snake_case : List[Any] ):
"""simple docstring"""
UpperCamelCase__ = F'repo_zipped_txt_data-{int(time.time() * 10E3 )}'
UpperCamelCase__ = F'{CI_HUB_USER}/{repo_name}'
hf_api.create_repo(_snake_case , token=_snake_case , repo_type="dataset" , private=_snake_case )
hf_api.upload_file(
token=_snake_case , path_or_fileobj=str(_snake_case ) , path_in_repo="data.zip" , repo_id=_snake_case , repo_type="dataset" , )
yield repo_id
try:
hf_api.delete_repo(_snake_case , token=_snake_case , repo_type="dataset" )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def snake_case__ ( _snake_case : Optional[int] , _snake_case : List[str] , _snake_case : List[str] ):
"""simple docstring"""
return hf_private_dataset_repo_zipped_txt_data_
@pytest.fixture(scope="session" )
def snake_case__ ( _snake_case : HfApi , _snake_case : Optional[Any] , _snake_case : Optional[Any] ):
"""simple docstring"""
UpperCamelCase__ = F'repo_zipped_img_data-{int(time.time() * 10E3 )}'
UpperCamelCase__ = F'{CI_HUB_USER}/{repo_name}'
hf_api.create_repo(_snake_case , token=_snake_case , repo_type="dataset" , private=_snake_case )
hf_api.upload_file(
token=_snake_case , path_or_fileobj=str(_snake_case ) , path_in_repo="data.zip" , repo_id=_snake_case , repo_type="dataset" , )
yield repo_id
try:
hf_api.delete_repo(_snake_case , token=_snake_case , repo_type="dataset" )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def snake_case__ ( _snake_case : str , _snake_case : str , _snake_case : Union[str, Any] ):
"""simple docstring"""
return hf_private_dataset_repo_zipped_img_data_
| 516
| 0
|
'''simple docstring'''
import torch
from torch import nn
class a_ ( nn.Module ):
def __init__( self : List[Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : List[str] , __lowerCAmelCase : Any , __lowerCAmelCase : Optional[Any]=1 , __lowerCAmelCase : str=False ):
super().__init__()
__snake_case = n_token
__snake_case = d_embed
__snake_case = d_proj
__snake_case = cutoffs + [n_token]
__snake_case = [0] + self.cutoffs
__snake_case = div_val
__snake_case = self.cutoffs[0]
__snake_case = len(self.cutoffs ) - 1
__snake_case = self.shortlist_size + self.n_clusters
if self.n_clusters > 0:
__snake_case = nn.Parameter(torch.zeros(self.n_clusters , self.d_embed ) )
__snake_case = nn.Parameter(torch.zeros(self.n_clusters ) )
__snake_case = nn.ModuleList()
__snake_case = nn.ParameterList()
if div_val == 1:
for i in range(len(self.cutoffs ) ):
if d_proj != d_embed:
self.out_projs.append(nn.Parameter(torch.FloatTensor(__lowerCAmelCase , __lowerCAmelCase ) ) )
else:
self.out_projs.append(__lowerCAmelCase )
self.out_layers.append(nn.Linear(__lowerCAmelCase , __lowerCAmelCase ) )
else:
for i in range(len(self.cutoffs ) ):
__snake_case = self.cutoff_ends[i], self.cutoff_ends[i + 1]
__snake_case = d_embed // (div_val**i)
self.out_projs.append(nn.Parameter(torch.FloatTensor(__lowerCAmelCase , __lowerCAmelCase ) ) )
self.out_layers.append(nn.Linear(__lowerCAmelCase , r_idx - l_idx ) )
__snake_case = keep_order
def lowercase__ ( self : Union[str, Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : str , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[Any] ):
if proj is None:
__snake_case = nn.functional.linear(__lowerCAmelCase , __lowerCAmelCase , bias=__lowerCAmelCase )
else:
# if CUDA_MAJOR <= 9 and CUDA_MINOR <= 1:
__snake_case = nn.functional.linear(__lowerCAmelCase , proj.t().contiguous() )
__snake_case = nn.functional.linear(__lowerCAmelCase , __lowerCAmelCase , bias=__lowerCAmelCase )
# else:
# logit = torch.einsum('bd,de,ev->bv', (hidden, proj, weight.t()))
# if bias is not None:
# logit = logit + bias
return logit
def lowercase__ ( self : List[Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : str=None , __lowerCAmelCase : int=False ):
if labels is not None:
# Shift so that tokens < n predict n
__snake_case = hidden[..., :-1, :].contiguous()
__snake_case = labels[..., 1:].contiguous()
__snake_case = hidden.view(-1 , hidden.size(-1 ) )
__snake_case = labels.view(-1 )
if hidden.size(0 ) != labels.size(0 ):
raise RuntimeError('Input and labels should have the same size in the batch dimension.' )
else:
__snake_case = hidden.view(-1 , hidden.size(-1 ) )
if self.n_clusters == 0:
__snake_case = self._compute_logit(__lowerCAmelCase , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
if labels is not None:
__snake_case = labels != -1_0_0
__snake_case = torch.zeros_like(__lowerCAmelCase , dtype=hidden.dtype , device=hidden.device )
__snake_case = (
-nn.functional.log_softmax(__lowerCAmelCase , dim=-1 )[mask].gather(1 , labels[mask].unsqueeze(1 ) ).squeeze(1 )
)
else:
__snake_case = nn.functional.log_softmax(__lowerCAmelCase , dim=-1 )
else:
# construct weights and biases
__snake_case = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
__snake_case = self.cutoff_ends[i], self.cutoff_ends[i + 1]
__snake_case = self.out_layers[0].weight[l_idx:r_idx]
__snake_case = self.out_layers[0].bias[l_idx:r_idx]
else:
__snake_case = self.out_layers[i].weight
__snake_case = self.out_layers[i].bias
if i == 0:
__snake_case = torch.cat([weight_i, self.cluster_weight] , dim=0 )
__snake_case = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(__lowerCAmelCase )
biases.append(__lowerCAmelCase )
__snake_case = weights[0], biases[0], self.out_projs[0]
__snake_case = self._compute_logit(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
__snake_case = nn.functional.log_softmax(__lowerCAmelCase , dim=1 )
if labels is None:
__snake_case = hidden.new_empty((head_logit.size(0 ), self.n_token) )
else:
__snake_case = torch.zeros_like(__lowerCAmelCase , dtype=hidden.dtype , device=hidden.device )
__snake_case = 0
__snake_case = [0] + self.cutoffs
for i in range(len(__lowerCAmelCase ) - 1 ):
__snake_case = cutoff_values[i], cutoff_values[i + 1]
if labels is not None:
__snake_case = (labels >= l_idx) & (labels < r_idx)
__snake_case = mask_i.nonzero().squeeze()
if indices_i.numel() == 0:
continue
__snake_case = labels.index_select(0 , __lowerCAmelCase ) - l_idx
__snake_case = head_logprob.index_select(0 , __lowerCAmelCase )
__snake_case = hidden.index_select(0 , __lowerCAmelCase )
else:
__snake_case = hidden
if i == 0:
if labels is not None:
__snake_case = head_logprob_i.gather(1 , target_i[:, None] ).squeeze(1 )
else:
__snake_case = head_logprob[:, : self.cutoffs[0]]
else:
__snake_case = weights[i], biases[i], self.out_projs[i]
__snake_case = self._compute_logit(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
__snake_case = nn.functional.log_softmax(__lowerCAmelCase , dim=1 )
__snake_case = self.cutoffs[0] + i - 1 # No probability for the head cluster
if labels is not None:
__snake_case = head_logprob_i[:, cluster_prob_idx] + tail_logprob_i.gather(
1 , target_i[:, None] ).squeeze(1 )
else:
__snake_case = head_logprob[:, cluster_prob_idx, None] + tail_logprob_i
__snake_case = logprob_i
if labels is not None:
if (hasattr(self , 'keep_order' ) and self.keep_order) or keep_order:
out.index_copy_(0 , __lowerCAmelCase , -logprob_i )
else:
out[offset : offset + logprob_i.size(0 )].copy_(-logprob_i )
offset += logprob_i.size(0 )
return out
def lowercase__ ( self : int , __lowerCAmelCase : Optional[Any] ):
if self.n_clusters == 0:
__snake_case = self._compute_logit(__lowerCAmelCase , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
return nn.functional.log_softmax(__lowerCAmelCase , dim=-1 )
else:
# construct weights and biases
__snake_case = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
__snake_case = self.cutoff_ends[i], self.cutoff_ends[i + 1]
__snake_case = self.out_layers[0].weight[l_idx:r_idx]
__snake_case = self.out_layers[0].bias[l_idx:r_idx]
else:
__snake_case = self.out_layers[i].weight
__snake_case = self.out_layers[i].bias
if i == 0:
__snake_case = torch.cat([weight_i, self.cluster_weight] , dim=0 )
__snake_case = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(__lowerCAmelCase )
biases.append(__lowerCAmelCase )
__snake_case = weights[0], biases[0], self.out_projs[0]
__snake_case = self._compute_logit(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
__snake_case = hidden.new_empty((head_logit.size(0 ), self.n_token) )
__snake_case = nn.functional.log_softmax(__lowerCAmelCase , dim=1 )
__snake_case = [0] + self.cutoffs
for i in range(len(__lowerCAmelCase ) - 1 ):
__snake_case = cutoff_values[i], cutoff_values[i + 1]
if i == 0:
__snake_case = head_logprob[:, : self.cutoffs[0]]
else:
__snake_case = weights[i], biases[i], self.out_projs[i]
__snake_case = self._compute_logit(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
__snake_case = nn.functional.log_softmax(__lowerCAmelCase , dim=1 )
__snake_case = head_logprob[:, -i] + tail_logprob_i
__snake_case = logprob_i
return out
| 706
|
'''simple docstring'''
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowercase = {
"""configuration_mgp_str""": ["""MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MgpstrConfig"""],
"""processing_mgp_str""": ["""MgpstrProcessor"""],
"""tokenization_mgp_str""": ["""MgpstrTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
"""MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MgpstrModel""",
"""MgpstrPreTrainedModel""",
"""MgpstrForSceneTextRecognition""",
]
if TYPE_CHECKING:
from .configuration_mgp_str import MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP, MgpstrConfig
from .processing_mgp_str import MgpstrProcessor
from .tokenization_mgp_str import MgpstrTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mgp_str import (
MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST,
MgpstrForSceneTextRecognition,
MgpstrModel,
MgpstrPreTrainedModel,
)
else:
import sys
_lowercase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 427
| 0
|
from __future__ import annotations
from typing import Any
class lowerCAmelCase_ :
"""simple docstring"""
def __init__( self , _SCREAMING_SNAKE_CASE = 6 ) -> str:
__UpperCamelCase = None
__UpperCamelCase = None
self.create_linked_list(UpperCAmelCase_ )
def __lowercase( self , _SCREAMING_SNAKE_CASE ) -> Tuple:
__UpperCamelCase = Node()
__UpperCamelCase = current_node
__UpperCamelCase = current_node
__UpperCamelCase = current_node
for _ in range(1 , UpperCAmelCase_ ):
__UpperCamelCase = Node()
__UpperCamelCase = current_node
__UpperCamelCase = previous_node
__UpperCamelCase = current_node
__UpperCamelCase = self.front
__UpperCamelCase = previous_node
def __lowercase( self ) -> str:
return (
self.front == self.rear
and self.front is not None
and self.front.data is None
)
def __lowercase( self ) -> Tuple:
self.check_can_perform_operation()
return self.front.data if self.front else None
def __lowercase( self , _SCREAMING_SNAKE_CASE ) -> List[Any]:
if self.rear is None:
return
self.check_is_full()
if not self.is_empty():
__UpperCamelCase = self.rear.next
if self.rear:
__UpperCamelCase = data
def __lowercase( self ) -> Any:
self.check_can_perform_operation()
if self.rear is None or self.front is None:
return None
if self.front == self.rear:
__UpperCamelCase = self.front.data
__UpperCamelCase = None
return data
__UpperCamelCase = self.front
__UpperCamelCase = old_front.next
__UpperCamelCase = old_front.data
__UpperCamelCase = None
return data
def __lowercase( self ) -> Tuple:
if self.is_empty():
raise Exception('Empty Queue' )
def __lowercase( self ) -> Tuple:
if self.rear and self.rear.next == self.front:
raise Exception('Full Queue' )
class lowerCAmelCase_ :
"""simple docstring"""
def __init__( self ) -> Any:
__UpperCamelCase = None
__UpperCamelCase = None
__UpperCamelCase = None
if __name__ == "__main__":
import doctest
doctest.testmod()
| 383
|
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
__A : Tuple = '''
Hugging Face was founded in 2016 by French entrepreneurs Clément Delangue, Julien Chaumond, and Thomas Wolf originally as a company that developed a chatbot app targeted at teenagers.[2] After open-sourcing the model behind the chatbot, the company pivoted to focus on being a platform for machine learning.
In March 2021, Hugging Face raised $40 million in a Series B funding round.[3]
On April 28, 2021, the company launched the BigScience Research Workshop in collaboration with several other research groups to release an open large language model.[4] In 2022, the workshop concluded with the announcement of BLOOM, a multilingual large language model with 176 billion parameters.[5]
'''
class __A ( unittest.TestCase , lowerCAmelCase ):
def lowercase__ ( self : str ):
lowerCAmelCase : List[Any] = load_tool('text-question-answering' )
self.tool.setup()
lowerCAmelCase : List[Any] = load_tool('text-question-answering' , remote=UpperCAmelCase_ )
def lowercase__ ( self : str ):
lowerCAmelCase : List[str] = self.tool(UpperCAmelCase_ , 'What did Hugging Face do in April 2021?' )
self.assertEqual(UpperCAmelCase_ , 'launched the BigScience Research Workshop' )
def lowercase__ ( self : str ):
lowerCAmelCase : Tuple = self.remote_tool(UpperCAmelCase_ , 'What did Hugging Face do in April 2021?' )
self.assertEqual(UpperCAmelCase_ , 'launched the BigScience Research Workshop' )
def lowercase__ ( self : str ):
lowerCAmelCase : str = self.tool(text=UpperCAmelCase_ , question='What did Hugging Face do in April 2021?' )
self.assertEqual(UpperCAmelCase_ , 'launched the BigScience Research Workshop' )
def lowercase__ ( self : Optional[Any] ):
lowerCAmelCase : Dict = self.remote_tool(text=UpperCAmelCase_ , question='What did Hugging Face do in April 2021?' )
self.assertEqual(UpperCAmelCase_ , 'launched the BigScience Research Workshop' )
| 343
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
__snake_case = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = ["""MLukeTokenizer"""]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mluke import MLukeTokenizer
else:
import sys
__snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 603
|
'''simple docstring'''
from typing import List, Optional, Tuple, Union
import torch
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class _a ( __a ):
"""simple docstring"""
def __init__( self : Optional[int] , lowercase_ : Tuple , lowercase_ : Tuple ):
'''simple docstring'''
super().__init__()
# make sure scheduler can always be converted to DDIM
lowercase_ = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=lowercase_ , scheduler=lowercase_ )
@torch.no_grad()
def __call__( self : Dict , lowercase_ : int = 1 , lowercase_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowercase_ : float = 0.0 , lowercase_ : int = 50 , lowercase_ : Optional[bool] = None , lowercase_ : Optional[str] = "pil" , lowercase_ : bool = True , ):
'''simple docstring'''
if isinstance(self.unet.config.sample_size , lowercase_ ):
lowercase_ = (
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size,
self.unet.config.sample_size,
)
else:
lowercase_ = (batch_size, self.unet.config.in_channels, *self.unet.config.sample_size)
if isinstance(lowercase_ , lowercase_ ) and len(lowercase_ ) != batch_size:
raise ValueError(
F"""You have passed a list of generators of length {len(lowercase_ )}, but requested an effective batch"""
F""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
lowercase_ = randn_tensor(lowercase_ , generator=lowercase_ , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(lowercase_ )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
lowercase_ = self.unet(lowercase_ , lowercase_ ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
lowercase_ = self.scheduler.step(
lowercase_ , lowercase_ , lowercase_ , eta=lowercase_ , use_clipped_model_output=lowercase_ , generator=lowercase_ ).prev_sample
lowercase_ = (image / 2 + 0.5).clamp(0 , 1 )
lowercase_ = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
lowercase_ = self.numpy_to_pil(lowercase_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowercase_ )
| 603
| 1
|
from __future__ import annotations
def A__ ( SCREAMING_SNAKE_CASE_ : list[int] , SCREAMING_SNAKE_CASE_ : list[int] , SCREAMING_SNAKE_CASE_ : int ) -> tuple[float, list[float]]:
"""simple docstring"""
_UpperCAmelCase = list(range(len(SCREAMING_SNAKE_CASE_ ) ) )
_UpperCAmelCase = [v / w for v, w in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )]
index.sort(key=lambda SCREAMING_SNAKE_CASE_ : ratio[i] , reverse=SCREAMING_SNAKE_CASE_ )
_UpperCAmelCase = 0
_UpperCAmelCase = [0] * len(SCREAMING_SNAKE_CASE_ )
for i in index:
if weight[i] <= capacity:
_UpperCAmelCase = 1
max_value += value[i]
capacity -= weight[i]
else:
_UpperCAmelCase = capacity / weight[i]
max_value += value[i] * capacity / weight[i]
break
return max_value, fractions
if __name__ == "__main__":
import doctest
doctest.testmod()
| 32
|
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BertTokenizer, BlipImageProcessor, BlipProcessor, PreTrainedTokenizerFast
@require_vision
class lowercase_ (unittest.TestCase ):
def __UpperCamelCase ( self) -> int:
a__ =tempfile.mkdtemp()
a__ =BlipImageProcessor()
a__ =BertTokenizer.from_pretrained('hf-internal-testing/tiny-random-BertModel')
a__ =BlipProcessor(lowercase_ , lowercase_)
processor.save_pretrained(self.tmpdirname)
def __UpperCamelCase ( self , **lowercase_) -> str:
return AutoProcessor.from_pretrained(self.tmpdirname , **lowercase_).tokenizer
def __UpperCamelCase ( self , **lowercase_) -> List[str]:
return AutoProcessor.from_pretrained(self.tmpdirname , **lowercase_).image_processor
def __UpperCamelCase ( self) -> Optional[int]:
shutil.rmtree(self.tmpdirname)
def __UpperCamelCase ( self) -> str:
a__ =[np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta)]
a__ =[Image.fromarray(np.moveaxis(lowercase_ , 0 , -1)) for x in image_inputs]
return image_inputs
def __UpperCamelCase ( self) -> str:
a__ =BlipProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor())
processor.save_pretrained(self.tmpdirname)
a__ =self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)')
a__ =self.get_image_processor(do_normalize=lowercase_ , padding_value=1.0)
a__ =BlipProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=lowercase_ , padding_value=1.0)
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.tokenizer , lowercase_)
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor , lowercase_)
def __UpperCamelCase ( self) -> int:
a__ =self.get_image_processor()
a__ =self.get_tokenizer()
a__ =BlipProcessor(tokenizer=lowercase_ , image_processor=lowercase_)
a__ =self.prepare_image_inputs()
a__ =image_processor(lowercase_ , return_tensors='np')
a__ =processor(images=lowercase_ , return_tensors='np')
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2)
def __UpperCamelCase ( self) -> List[str]:
a__ =self.get_image_processor()
a__ =self.get_tokenizer()
a__ =BlipProcessor(tokenizer=lowercase_ , image_processor=lowercase_)
a__ ='lower newer'
a__ =processor(text=lowercase_)
a__ =tokenizer(lowercase_ , return_token_type_ids=lowercase_)
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key])
def __UpperCamelCase ( self) -> int:
a__ =self.get_image_processor()
a__ =self.get_tokenizer()
a__ =BlipProcessor(tokenizer=lowercase_ , image_processor=lowercase_)
a__ ='lower newer'
a__ =self.prepare_image_inputs()
a__ =processor(text=lowercase_ , images=lowercase_)
self.assertListEqual(list(inputs.keys()) , ['pixel_values', 'input_ids', 'attention_mask'])
# test if it raises when no input is passed
with pytest.raises(lowercase_):
processor()
def __UpperCamelCase ( self) -> Tuple:
a__ =self.get_image_processor()
a__ =self.get_tokenizer()
a__ =BlipProcessor(tokenizer=lowercase_ , image_processor=lowercase_)
a__ =[[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
a__ =processor.batch_decode(lowercase_)
a__ =tokenizer.batch_decode(lowercase_)
self.assertListEqual(lowercase_ , lowercase_)
def __UpperCamelCase ( self) -> List[Any]:
a__ =self.get_image_processor()
a__ =self.get_tokenizer()
a__ =BlipProcessor(tokenizer=lowercase_ , image_processor=lowercase_)
a__ ='lower newer'
a__ =self.prepare_image_inputs()
a__ =processor(text=lowercase_ , images=lowercase_)
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys()) , ['pixel_values', 'input_ids', 'attention_mask'])
| 20
| 0
|
import os
import shutil
from pathlib import Path
from typing import Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ..utils import ONNX_EXTERNAL_WEIGHTS_NAME, ONNX_WEIGHTS_NAME, is_onnx_available, logging
if is_onnx_available():
import onnxruntime as ort
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = {
"tensor(bool)": np.bool_,
"tensor(int8)": np.inta,
"tensor(uint8)": np.uinta,
"tensor(int16)": np.intaa,
"tensor(uint16)": np.uintaa,
"tensor(int32)": np.intaa,
"tensor(uint32)": np.uintaa,
"tensor(int64)": np.intaa,
"tensor(uint64)": np.uintaa,
"tensor(float16)": np.floataa,
"tensor(float)": np.floataa,
"tensor(double)": np.floataa,
}
class __magic_name__ :
"""simple docstring"""
def __init__( self , a__=None , **a__ ):
logger.info('''`diffusers.OnnxRuntimeModel` is experimental and might change in the future.''' )
_lowerCamelCase = model
_lowerCamelCase = kwargs.get('''model_save_dir''' , a__ )
_lowerCamelCase = kwargs.get('''latest_model_name''' , a__ )
def __call__( self , **a__ ):
_lowerCamelCase = {k: np.array(a__ ) for k, v in kwargs.items()}
return self.model.run(a__ , a__ )
@staticmethod
def _UpperCAmelCase ( a__ , a__=None , a__=None ):
if provider is None:
logger.info('''No onnxruntime provider specified, using CPUExecutionProvider''' )
_lowerCamelCase = '''CPUExecutionProvider'''
return ort.InferenceSession(a__ , providers=[provider] , sess_options=a__ )
def _UpperCAmelCase ( self , a__ , a__ = None , **a__ ):
_lowerCamelCase = file_name if file_name is not None else ONNX_WEIGHTS_NAME
_lowerCamelCase = self.model_save_dir.joinpath(self.latest_model_name )
_lowerCamelCase = Path(a__ ).joinpath(a__ )
try:
shutil.copyfile(a__ , a__ )
except shutil.SameFileError:
pass
# copy external weights (for models >2GB)
_lowerCamelCase = self.model_save_dir.joinpath(a__ )
if src_path.exists():
_lowerCamelCase = Path(a__ ).joinpath(a__ )
try:
shutil.copyfile(a__ , a__ )
except shutil.SameFileError:
pass
def _UpperCAmelCase ( self , a__ , **a__ , ):
if os.path.isfile(a__ ):
logger.error(f'''Provided path ({save_directory}) should be a directory, not a file''' )
return
os.makedirs(a__ , exist_ok=a__ )
# saving model weights/files
self._save_pretrained(a__ , **a__ )
@classmethod
def _UpperCAmelCase ( cls , a__ , a__ = None , a__ = None , a__ = False , a__ = None , a__ = None , a__ = None , a__ = None , **a__ , ):
_lowerCamelCase = file_name if file_name is not None else ONNX_WEIGHTS_NAME
# load model from local directory
if os.path.isdir(a__ ):
_lowerCamelCase = OnnxRuntimeModel.load_model(
os.path.join(a__ , a__ ) , provider=a__ , sess_options=a__ )
_lowerCamelCase = Path(a__ )
# load model from hub
else:
# download model
_lowerCamelCase = hf_hub_download(
repo_id=a__ , filename=a__ , use_auth_token=a__ , revision=a__ , cache_dir=a__ , force_download=a__ , )
_lowerCamelCase = Path(a__ ).parent
_lowerCamelCase = Path(a__ ).name
_lowerCamelCase = OnnxRuntimeModel.load_model(a__ , provider=a__ , sess_options=a__ )
return cls(model=a__ , **a__ )
@classmethod
def _UpperCAmelCase ( cls , a__ , a__ = True , a__ = None , a__ = None , **a__ , ):
_lowerCamelCase = None
if len(str(a__ ).split('''@''' ) ) == 2:
_lowerCamelCase , _lowerCamelCase = model_id.split('''@''' )
return cls._from_pretrained(
model_id=a__ , revision=a__ , cache_dir=a__ , force_download=a__ , use_auth_token=a__ , **a__ , )
| 719
|
def _lowerCamelCase ( _a ):
"""simple docstring"""
if bit_count < 0:
raise ValueError('''The given input must be positive''' )
# get the generated string sequence
_lowerCamelCase = gray_code_sequence_string(_a )
#
# convert them to integers
for i in range(len(_a ) ):
_lowerCamelCase = int(sequence[i] , 2 )
return sequence
def _lowerCamelCase ( _a ):
"""simple docstring"""
if bit_count == 0:
return ["0"]
if bit_count == 1:
return ["0", "1"]
_lowerCamelCase = 1 << bit_count # defines the length of the sequence
# 1<< n is equivalent to 2^n
# recursive answer will generate answer for n-1 bits
_lowerCamelCase = gray_code_sequence_string(bit_count - 1 )
_lowerCamelCase = []
# append 0 to first half of the smaller sequence generated
for i in range(seq_len // 2 ):
_lowerCamelCase = '''0''' + smaller_sequence[i]
sequence.append(_a )
# append 1 to second half ... start from the end of the list
for i in reversed(range(seq_len // 2 ) ):
_lowerCamelCase = '''1''' + smaller_sequence[i]
sequence.append(_a )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
| 297
| 0
|
"""simple docstring"""
import unittest
from transformers import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING, is_vision_available, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class lowerCAmelCase__ :
'''simple docstring'''
@staticmethod
def _lowerCAmelCase ( *_SCREAMING_SNAKE_CASE : List[Any] , **_SCREAMING_SNAKE_CASE : Tuple ) -> List[Any]:
"""simple docstring"""
pass
@is_pipeline_test
@require_vision
@require_torch
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE : Dict = MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
def _lowerCAmelCase ( self : str , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Tuple ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = pipeline(
'zero-shot-object-detection' , model='hf-internal-testing/tiny-random-owlvit-object-detection' )
SCREAMING_SNAKE_CASE : Dict = [
{
'image': './tests/fixtures/tests_samples/COCO/000000039769.png',
'candidate_labels': ['cat', 'remote', 'couch'],
}
]
return object_detector, examples
def _lowerCAmelCase ( self : List[Any] , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = object_detector(examples[0] , threshold=0.0 )
SCREAMING_SNAKE_CASE : Optional[int] = len(_SCREAMING_SNAKE_CASE )
self.assertGreater(_SCREAMING_SNAKE_CASE , 0 )
self.assertEqual(
_SCREAMING_SNAKE_CASE , [
{
'score': ANY(_SCREAMING_SNAKE_CASE ),
'label': ANY(_SCREAMING_SNAKE_CASE ),
'box': {'xmin': ANY(_SCREAMING_SNAKE_CASE ), 'ymin': ANY(_SCREAMING_SNAKE_CASE ), 'xmax': ANY(_SCREAMING_SNAKE_CASE ), 'ymax': ANY(_SCREAMING_SNAKE_CASE )},
}
for i in range(_SCREAMING_SNAKE_CASE )
] , )
@require_tf
@unittest.skip('Zero Shot Object Detection not implemented in TF' )
def _lowerCAmelCase ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
pass
@require_torch
def _lowerCAmelCase ( self : List[str] ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = pipeline(
'zero-shot-object-detection' , model='hf-internal-testing/tiny-random-owlvit-object-detection' )
SCREAMING_SNAKE_CASE : List[str] = object_detector(
'./tests/fixtures/tests_samples/COCO/000000039769.png' , candidate_labels=['cat', 'remote', 'couch'] , threshold=0.6_4 , )
self.assertEqual(
nested_simplify(_SCREAMING_SNAKE_CASE , decimals=4 ) , [
{'score': 0.7_2_3_5, 'label': 'cat', 'box': {'xmin': 204, 'ymin': 167, 'xmax': 232, 'ymax': 190}},
{'score': 0.7_2_1_8, 'label': 'remote', 'box': {'xmin': 204, 'ymin': 167, 'xmax': 232, 'ymax': 190}},
{'score': 0.7_1_8_4, 'label': 'couch', 'box': {'xmin': 204, 'ymin': 167, 'xmax': 232, 'ymax': 190}},
{'score': 0.6_7_4_8, 'label': 'remote', 'box': {'xmin': 571, 'ymin': 83, 'xmax': 598, 'ymax': 103}},
{'score': 0.6_6_5_6, 'label': 'cat', 'box': {'xmin': 571, 'ymin': 83, 'xmax': 598, 'ymax': 103}},
{'score': 0.6_6_1_4, 'label': 'couch', 'box': {'xmin': 571, 'ymin': 83, 'xmax': 598, 'ymax': 103}},
{'score': 0.6_4_5_6, 'label': 'remote', 'box': {'xmin': 494, 'ymin': 105, 'xmax': 521, 'ymax': 127}},
{'score': 0.6_4_2, 'label': 'remote', 'box': {'xmin': 67, 'ymin': 274, 'xmax': 93, 'ymax': 297}},
{'score': 0.6_4_1_9, 'label': 'cat', 'box': {'xmin': 494, 'ymin': 105, 'xmax': 521, 'ymax': 127}},
] , )
SCREAMING_SNAKE_CASE : Optional[Any] = object_detector(
[
{
'image': './tests/fixtures/tests_samples/COCO/000000039769.png',
'candidate_labels': ['cat', 'remote', 'couch'],
}
] , threshold=0.6_4 , )
self.assertEqual(
nested_simplify(_SCREAMING_SNAKE_CASE , decimals=4 ) , [
[
{'score': 0.7_2_3_5, 'label': 'cat', 'box': {'xmin': 204, 'ymin': 167, 'xmax': 232, 'ymax': 190}},
{'score': 0.7_2_1_8, 'label': 'remote', 'box': {'xmin': 204, 'ymin': 167, 'xmax': 232, 'ymax': 190}},
{'score': 0.7_1_8_4, 'label': 'couch', 'box': {'xmin': 204, 'ymin': 167, 'xmax': 232, 'ymax': 190}},
{'score': 0.6_7_4_8, 'label': 'remote', 'box': {'xmin': 571, 'ymin': 83, 'xmax': 598, 'ymax': 103}},
{'score': 0.6_6_5_6, 'label': 'cat', 'box': {'xmin': 571, 'ymin': 83, 'xmax': 598, 'ymax': 103}},
{'score': 0.6_6_1_4, 'label': 'couch', 'box': {'xmin': 571, 'ymin': 83, 'xmax': 598, 'ymax': 103}},
{'score': 0.6_4_5_6, 'label': 'remote', 'box': {'xmin': 494, 'ymin': 105, 'xmax': 521, 'ymax': 127}},
{'score': 0.6_4_2, 'label': 'remote', 'box': {'xmin': 67, 'ymin': 274, 'xmax': 93, 'ymax': 297}},
{'score': 0.6_4_1_9, 'label': 'cat', 'box': {'xmin': 494, 'ymin': 105, 'xmax': 521, 'ymax': 127}},
]
] , )
@require_torch
@slow
def _lowerCAmelCase ( self : Tuple ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = pipeline('zero-shot-object-detection' )
SCREAMING_SNAKE_CASE : List[str] = object_detector(
'http://images.cocodataset.org/val2017/000000039769.jpg' , candidate_labels=['cat', 'remote', 'couch'] , )
self.assertEqual(
nested_simplify(_SCREAMING_SNAKE_CASE , decimals=4 ) , [
{'score': 0.2_8_6_8, 'label': 'cat', 'box': {'xmin': 324, 'ymin': 20, 'xmax': 640, 'ymax': 373}},
{'score': 0.2_7_7, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 72, 'xmax': 177, 'ymax': 115}},
{'score': 0.2_5_3_7, 'label': 'cat', 'box': {'xmin': 1, 'ymin': 55, 'xmax': 315, 'ymax': 472}},
{'score': 0.1_4_7_4, 'label': 'remote', 'box': {'xmin': 335, 'ymin': 74, 'xmax': 371, 'ymax': 187}},
{'score': 0.1_2_0_8, 'label': 'couch', 'box': {'xmin': 4, 'ymin': 0, 'xmax': 642, 'ymax': 476}},
] , )
SCREAMING_SNAKE_CASE : str = object_detector(
[
{
'image': 'http://images.cocodataset.org/val2017/000000039769.jpg',
'candidate_labels': ['cat', 'remote', 'couch'],
},
{
'image': 'http://images.cocodataset.org/val2017/000000039769.jpg',
'candidate_labels': ['cat', 'remote', 'couch'],
},
] , )
self.assertEqual(
nested_simplify(_SCREAMING_SNAKE_CASE , decimals=4 ) , [
[
{'score': 0.2_8_6_8, 'label': 'cat', 'box': {'xmin': 324, 'ymin': 20, 'xmax': 640, 'ymax': 373}},
{'score': 0.2_7_7, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 72, 'xmax': 177, 'ymax': 115}},
{'score': 0.2_5_3_7, 'label': 'cat', 'box': {'xmin': 1, 'ymin': 55, 'xmax': 315, 'ymax': 472}},
{'score': 0.1_4_7_4, 'label': 'remote', 'box': {'xmin': 335, 'ymin': 74, 'xmax': 371, 'ymax': 187}},
{'score': 0.1_2_0_8, 'label': 'couch', 'box': {'xmin': 4, 'ymin': 0, 'xmax': 642, 'ymax': 476}},
],
[
{'score': 0.2_8_6_8, 'label': 'cat', 'box': {'xmin': 324, 'ymin': 20, 'xmax': 640, 'ymax': 373}},
{'score': 0.2_7_7, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 72, 'xmax': 177, 'ymax': 115}},
{'score': 0.2_5_3_7, 'label': 'cat', 'box': {'xmin': 1, 'ymin': 55, 'xmax': 315, 'ymax': 472}},
{'score': 0.1_4_7_4, 'label': 'remote', 'box': {'xmin': 335, 'ymin': 74, 'xmax': 371, 'ymax': 187}},
{'score': 0.1_2_0_8, 'label': 'couch', 'box': {'xmin': 4, 'ymin': 0, 'xmax': 642, 'ymax': 476}},
],
] , )
@require_tf
@unittest.skip('Zero Shot Object Detection not implemented in TF' )
def _lowerCAmelCase ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
pass
@require_torch
@slow
def _lowerCAmelCase ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = 0.2
SCREAMING_SNAKE_CASE : List[Any] = pipeline('zero-shot-object-detection' )
SCREAMING_SNAKE_CASE : List[str] = object_detector(
'http://images.cocodataset.org/val2017/000000039769.jpg' , candidate_labels=['cat', 'remote', 'couch'] , threshold=_SCREAMING_SNAKE_CASE , )
self.assertEqual(
nested_simplify(_SCREAMING_SNAKE_CASE , decimals=4 ) , [
{'score': 0.2_8_6_8, 'label': 'cat', 'box': {'xmin': 324, 'ymin': 20, 'xmax': 640, 'ymax': 373}},
{'score': 0.2_7_7, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 72, 'xmax': 177, 'ymax': 115}},
{'score': 0.2_5_3_7, 'label': 'cat', 'box': {'xmin': 1, 'ymin': 55, 'xmax': 315, 'ymax': 472}},
] , )
@require_torch
@slow
def _lowerCAmelCase ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = 2
SCREAMING_SNAKE_CASE : List[Any] = pipeline('zero-shot-object-detection' )
SCREAMING_SNAKE_CASE : List[str] = object_detector(
'http://images.cocodataset.org/val2017/000000039769.jpg' , candidate_labels=['cat', 'remote', 'couch'] , top_k=_SCREAMING_SNAKE_CASE , )
self.assertEqual(
nested_simplify(_SCREAMING_SNAKE_CASE , decimals=4 ) , [
{'score': 0.2_8_6_8, 'label': 'cat', 'box': {'xmin': 324, 'ymin': 20, 'xmax': 640, 'ymax': 373}},
{'score': 0.2_7_7, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 72, 'xmax': 177, 'ymax': 115}},
] , )
| 265
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ..utils import _LazyModule
A_ : str = {
'config': [
'EXTERNAL_DATA_FORMAT_SIZE_LIMIT',
'OnnxConfig',
'OnnxConfigWithPast',
'OnnxSeq2SeqConfigWithPast',
'PatchingSpec',
],
'convert': ['export', 'validate_model_outputs'],
'features': ['FeaturesManager'],
'utils': ['ParameterFormat', 'compute_serialized_parameters_size'],
}
if TYPE_CHECKING:
from .config import (
EXTERNAL_DATA_FORMAT_SIZE_LIMIT,
OnnxConfig,
OnnxConfigWithPast,
OnnxSeqaSeqConfigWithPast,
PatchingSpec,
)
from .convert import export, validate_model_outputs
from .features import FeaturesManager
from .utils import ParameterFormat, compute_serialized_parameters_size
else:
import sys
A_ : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 265
| 1
|
'''simple docstring'''
import inspect
import unittest
from transformers import MobileViTVaConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, MobileViTVaModel
from transformers.models.mobilevitva.modeling_mobilevitva import (
MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST,
make_divisible,
)
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class lowerCAmelCase__ ( UpperCAmelCase__ ):
def lowerCAmelCase__ ( self : Optional[Any] ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase : Any = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(lowerCamelCase__ , "width_multiplier" ) )
class lowerCAmelCase__ :
def __init__( self : Optional[Any] , lowerCamelCase__ : Dict , lowerCamelCase__ : Any=13 , lowerCamelCase__ : int=64 , lowerCamelCase__ : Any=2 , lowerCamelCase__ : List[Any]=3 , lowerCamelCase__ : Optional[Any]="swish" , lowerCamelCase__ : Union[str, Any]=3 , lowerCamelCase__ : Any=32 , lowerCamelCase__ : Tuple=0.1 , lowerCamelCase__ : Union[str, Any]=0.0_2 , lowerCamelCase__ : Union[str, Any]=True , lowerCamelCase__ : Optional[Any]=True , lowerCamelCase__ : List[Any]=10 , lowerCamelCase__ : Dict=None , lowerCamelCase__ : Dict=0.2_5 , lowerCamelCase__ : Union[str, Any]=0.0 , lowerCamelCase__ : int=0.0 , ) ->Dict:
'''simple docstring'''
_UpperCAmelCase : Tuple = parent
_UpperCAmelCase : List[str] = batch_size
_UpperCAmelCase : Union[str, Any] = image_size
_UpperCAmelCase : Optional[int] = patch_size
_UpperCAmelCase : List[str] = num_channels
_UpperCAmelCase : List[str] = make_divisible(5_12 * width_multiplier , divisor=8 )
_UpperCAmelCase : Any = hidden_act
_UpperCAmelCase : Optional[int] = conv_kernel_size
_UpperCAmelCase : List[Any] = output_stride
_UpperCAmelCase : List[Any] = classifier_dropout_prob
_UpperCAmelCase : Optional[Any] = use_labels
_UpperCAmelCase : Any = is_training
_UpperCAmelCase : Optional[Any] = num_labels
_UpperCAmelCase : Optional[Any] = initializer_range
_UpperCAmelCase : Optional[int] = scope
_UpperCAmelCase : Dict = width_multiplier
_UpperCAmelCase : str = ffn_dropout
_UpperCAmelCase : Tuple = attn_dropout
def lowerCAmelCase__ ( self : List[str] ) ->Tuple:
'''simple docstring'''
_UpperCAmelCase : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_UpperCAmelCase : int = None
_UpperCAmelCase : Any = None
if self.use_labels:
_UpperCAmelCase : Optional[int] = ids_tensor([self.batch_size] , self.num_labels )
_UpperCAmelCase : Any = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
_UpperCAmelCase : Tuple = self.get_config()
return config, pixel_values, labels, pixel_labels
def lowerCAmelCase__ ( self : Union[str, Any] ) ->Optional[Any]:
'''simple docstring'''
return MobileViTVaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , width_multiplier=self.width_multiplier , ffn_dropout=self.ffn_dropout_prob , attn_dropout=self.attn_dropout_prob , )
def lowerCAmelCase__ ( self : int , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : Dict , lowerCamelCase__ : List[Any] , lowerCamelCase__ : Union[str, Any] ) ->Dict:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = MobileViTVaModel(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
_UpperCAmelCase : Optional[int] = model(lowerCamelCase__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def lowerCAmelCase__ ( self : Union[str, Any] , lowerCamelCase__ : Any , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : str ) ->Any:
'''simple docstring'''
_UpperCAmelCase : Any = self.num_labels
_UpperCAmelCase : Optional[int] = MobileViTVaForImageClassification(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
_UpperCAmelCase : Optional[int] = model(lowerCamelCase__ , labels=lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase__ ( self : List[Any] , lowerCamelCase__ : Tuple , lowerCamelCase__ : List[Any] , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : int ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase : List[Any] = self.num_labels
_UpperCAmelCase : Optional[int] = MobileViTVaForSemanticSegmentation(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
_UpperCAmelCase : Optional[Any] = model(lowerCamelCase__ )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
_UpperCAmelCase : List[Any] = model(lowerCamelCase__ , labels=lowerCamelCase__ )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def lowerCAmelCase__ ( self : Union[str, Any] ) ->Any:
'''simple docstring'''
_UpperCAmelCase : Dict = self.prepare_config_and_inputs()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : List[str] = config_and_inputs
_UpperCAmelCase : Any = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class lowerCAmelCase__ ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
lowerCAmelCase : Dict = (
(MobileViTVaModel, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation)
if is_torch_available()
else ()
)
lowerCAmelCase : Optional[Any] = (
{
"feature-extraction": MobileViTVaModel,
"image-classification": MobileViTVaForImageClassification,
"image-segmentation": MobileViTVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
lowerCAmelCase : Optional[int] = False
lowerCAmelCase : Optional[Any] = False
lowerCAmelCase : Dict = False
lowerCAmelCase : str = False
def lowerCAmelCase__ ( self : Optional[Any] ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase : Dict = MobileViTVaModelTester(self )
_UpperCAmelCase : List[Any] = MobileViTVaConfigTester(self , config_class=lowerCamelCase__ , has_text_modality=lowerCamelCase__ )
def lowerCAmelCase__ ( self : List[Any] ) ->List[str]:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="MobileViTV2 does not use inputs_embeds" )
def lowerCAmelCase__ ( self : int ) ->Dict:
'''simple docstring'''
pass
@unittest.skip(reason="MobileViTV2 does not support input and output embeddings" )
def lowerCAmelCase__ ( self : Tuple ) ->Dict:
'''simple docstring'''
pass
@unittest.skip(reason="MobileViTV2 does not output attentions" )
def lowerCAmelCase__ ( self : Union[str, Any] ) ->Any:
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(reason="Got `CUDA error: misaligned address` for tests after this one being run." )
def lowerCAmelCase__ ( self : int ) ->Any:
'''simple docstring'''
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def lowerCAmelCase__ ( self : Optional[int] ) ->Tuple:
'''simple docstring'''
pass
def lowerCAmelCase__ ( self : Tuple ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase : str = model_class(lowerCamelCase__ )
_UpperCAmelCase : List[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCAmelCase : List[str] = [*signature.parameters.keys()]
_UpperCAmelCase : Any = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowerCamelCase__ )
def lowerCAmelCase__ ( self : Tuple ) ->str:
'''simple docstring'''
_UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase__ )
def lowerCAmelCase__ ( self : Union[str, Any] ) ->Tuple:
'''simple docstring'''
def check_hidden_states_output(lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Dict , lowerCamelCase__ : int ):
_UpperCAmelCase : str = model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
with torch.no_grad():
_UpperCAmelCase : Tuple = model(**self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ ) )
_UpperCAmelCase : Optional[Any] = outputs.hidden_states
_UpperCAmelCase : Tuple = 5
self.assertEqual(len(lowerCamelCase__ ) , lowerCamelCase__ )
# MobileViTV2's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
_UpperCAmelCase : Optional[Any] = 2
for i in range(len(lowerCamelCase__ ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
_UpperCAmelCase , _UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase : Union[str, Any] = True
check_hidden_states_output(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_UpperCAmelCase : int = True
check_hidden_states_output(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def lowerCAmelCase__ ( self : str ) ->Tuple:
'''simple docstring'''
_UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase__ )
def lowerCAmelCase__ ( self : str ) ->Dict:
'''simple docstring'''
_UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*lowerCamelCase__ )
@slow
def lowerCAmelCase__ ( self : Optional[int] ) ->List[Any]:
'''simple docstring'''
for model_name in MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase : int = MobileViTVaModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
def __lowerCAmelCase ():
_UpperCAmelCase : Union[str, Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class lowerCAmelCase__ ( unittest.TestCase ):
@cached_property
def lowerCAmelCase__ ( self : str ) ->Any:
'''simple docstring'''
return (
MobileViTImageProcessor.from_pretrained("apple/mobilevitv2-1.0-imagenet1k-256" )
if is_vision_available()
else None
)
@slow
def lowerCAmelCase__ ( self : List[Any] ) ->int:
'''simple docstring'''
_UpperCAmelCase : Any = MobileViTVaForImageClassification.from_pretrained("apple/mobilevitv2-1.0-imagenet1k-256" ).to(
lowerCamelCase__ )
_UpperCAmelCase : str = self.default_image_processor
_UpperCAmelCase : str = prepare_img()
_UpperCAmelCase : Union[str, Any] = image_processor(images=lowerCamelCase__ , return_tensors="pt" ).to(lowerCamelCase__ )
# forward pass
with torch.no_grad():
_UpperCAmelCase : Optional[Any] = model(**lowerCamelCase__ )
# verify the logits
_UpperCAmelCase : List[Any] = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , lowerCamelCase__ )
_UpperCAmelCase : Tuple = torch.tensor([-1.6336E00, -7.3204E-02, -5.1883E-01] ).to(lowerCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCamelCase__ , atol=1E-4 ) )
@slow
def lowerCAmelCase__ ( self : Any ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase : str = MobileViTVaForSemanticSegmentation.from_pretrained("shehan97/mobilevitv2-1.0-voc-deeplabv3" )
_UpperCAmelCase : Union[str, Any] = model.to(lowerCamelCase__ )
_UpperCAmelCase : Optional[Any] = MobileViTImageProcessor.from_pretrained("shehan97/mobilevitv2-1.0-voc-deeplabv3" )
_UpperCAmelCase : Union[str, Any] = prepare_img()
_UpperCAmelCase : List[Any] = image_processor(images=lowerCamelCase__ , return_tensors="pt" ).to(lowerCamelCase__ )
# forward pass
with torch.no_grad():
_UpperCAmelCase : List[Any] = model(**lowerCamelCase__ )
_UpperCAmelCase : List[Any] = outputs.logits
# verify the logits
_UpperCAmelCase : Optional[Any] = torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape , lowerCamelCase__ )
_UpperCAmelCase : int = torch.tensor(
[
[[7.0_8_6_3, 7.1_5_2_5, 6.8_2_0_1], [6.6_9_3_1, 6.8_7_7_0, 6.8_9_3_3], [6.2_9_7_8, 7.0_3_6_6, 6.9_6_3_6]],
[[-3.7_1_3_4, -3.6_7_1_2, -3.6_6_7_5], [-3.5_8_2_5, -3.3_5_4_9, -3.4_7_7_7], [-3.3_4_3_5, -3.3_9_7_9, -3.2_8_5_7]],
[[-2.9_3_2_9, -2.8_0_0_3, -2.7_3_6_9], [-3.0_5_6_4, -2.4_7_8_0, -2.0_2_0_7], [-2.6_8_8_9, -1.9_2_9_8, -1.7_6_4_0]],
] , device=lowerCamelCase__ , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , lowerCamelCase__ , atol=1E-4 ) )
@slow
def lowerCAmelCase__ ( self : Any ) ->Any:
'''simple docstring'''
_UpperCAmelCase : Any = MobileViTVaForSemanticSegmentation.from_pretrained("shehan97/mobilevitv2-1.0-voc-deeplabv3" )
_UpperCAmelCase : int = model.to(lowerCamelCase__ )
_UpperCAmelCase : Any = MobileViTImageProcessor.from_pretrained("shehan97/mobilevitv2-1.0-voc-deeplabv3" )
_UpperCAmelCase : List[str] = prepare_img()
_UpperCAmelCase : Tuple = image_processor(images=lowerCamelCase__ , return_tensors="pt" ).to(lowerCamelCase__ )
# forward pass
with torch.no_grad():
_UpperCAmelCase : Optional[Any] = model(**lowerCamelCase__ )
_UpperCAmelCase : List[Any] = outputs.logits.detach().cpu()
_UpperCAmelCase : Optional[Any] = image_processor.post_process_semantic_segmentation(outputs=lowerCamelCase__ , target_sizes=[(50, 60)] )
_UpperCAmelCase : Tuple = torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape , lowerCamelCase__ )
_UpperCAmelCase : List[Any] = image_processor.post_process_semantic_segmentation(outputs=lowerCamelCase__ )
_UpperCAmelCase : Tuple = torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape , lowerCamelCase__ )
| 40
|
'''simple docstring'''
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCamelCase__ = {'configuration_mra': ['MRA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MraConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
'MRA_PRETRAINED_MODEL_ARCHIVE_LIST',
'MraForMaskedLM',
'MraForMultipleChoice',
'MraForQuestionAnswering',
'MraForSequenceClassification',
'MraForTokenClassification',
'MraLayer',
'MraModel',
'MraPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mra import (
MRA_PRETRAINED_MODEL_ARCHIVE_LIST,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraLayer,
MraModel,
MraPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 40
| 1
|
'''simple docstring'''
import argparse
import requests
import torch
from PIL import Image
from transformers import CLIPProcessor, GroupViTConfig, GroupViTModel
def lowerCAmelCase (__A):
"""simple docstring"""
if "img_encoder.pos_embed" in name:
_a = name.replace('''img_encoder.pos_embed''' , '''vision_model.embeddings.position_embeddings''')
if "img_encoder.patch_embed.proj" in name:
_a = name.replace('''img_encoder.patch_embed.proj''' , '''vision_model.embeddings.patch_embeddings.projection''')
if "img_encoder.patch_embed.norm" in name:
_a = name.replace('''img_encoder.patch_embed.norm''' , '''vision_model.embeddings.layernorm''')
if "img_encoder.layers" in name:
_a = name.replace('''img_encoder.layers''' , '''vision_model.encoder.stages''')
if "blocks" in name and "res" not in name:
_a = name.replace('''blocks''' , '''layers''')
if "attn" in name and "pre_assign" not in name:
_a = name.replace('''attn''' , '''self_attn''')
if "proj" in name and "self_attn" in name and "text" not in name:
_a = name.replace('''proj''' , '''out_proj''')
if "pre_assign_attn.attn.proj" in name:
_a = name.replace('''pre_assign_attn.attn.proj''' , '''pre_assign_attn.attn.out_proj''')
if "norm1" in name:
_a = name.replace('''norm1''' , '''layer_norm1''')
if "norm2" in name and "pre_assign" not in name:
_a = name.replace('''norm2''' , '''layer_norm2''')
if "img_encoder.norm" in name:
_a = name.replace('''img_encoder.norm''' , '''vision_model.layernorm''')
# text encoder
if "text_encoder.token_embedding" in name:
_a = name.replace('''text_encoder.token_embedding''' , '''text_model.embeddings.token_embedding''')
if "text_encoder.positional_embedding" in name:
_a = name.replace('''text_encoder.positional_embedding''' , '''text_model.embeddings.position_embedding.weight''')
if "text_encoder.transformer.resblocks." in name:
_a = name.replace('''text_encoder.transformer.resblocks.''' , '''text_model.encoder.layers.''')
if "ln_1" in name:
_a = name.replace('''ln_1''' , '''layer_norm1''')
if "ln_2" in name:
_a = name.replace('''ln_2''' , '''layer_norm2''')
if "c_fc" in name:
_a = name.replace('''c_fc''' , '''fc1''')
if "c_proj" in name:
_a = name.replace('''c_proj''' , '''fc2''')
if "text_encoder" in name:
_a = name.replace('''text_encoder''' , '''text_model''')
if "ln_final" in name:
_a = name.replace('''ln_final''' , '''final_layer_norm''')
# projection layers
if "img_projector.linear_hidden." in name:
_a = name.replace('''img_projector.linear_hidden.''' , '''visual_projection.''')
if "img_projector.linear_out." in name:
_a = name.replace('''img_projector.linear_out.''' , '''visual_projection.3.''')
if "text_projector.linear_hidden" in name:
_a = name.replace('''text_projector.linear_hidden''' , '''text_projection''')
if "text_projector.linear_out" in name:
_a = name.replace('''text_projector.linear_out''' , '''text_projection.3''')
return name
def lowerCAmelCase (__A , __A):
"""simple docstring"""
for key in orig_state_dict.copy().keys():
_a = orig_state_dict.pop(__A)
if "qkv" in key:
# weights and biases of the key, value and query projections of vision encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
_a = key.split('''.''')
_a , _a = int(key_split[2]), int(key_split[4])
_a = config.vision_config.hidden_size
if "weight" in key:
_a = val[:dim, :]
_a = val[dim : dim * 2, :]
_a = val[-dim:, :]
else:
_a = val[:dim]
_a = val[dim : dim * 2]
_a = val[-dim:]
elif "in_proj" in key:
# weights and biases of the key, value and query projections of text encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
_a = key.split('''.''')
_a = int(key_split[3])
_a = config.text_config.hidden_size
if "weight" in key:
_a = val[:dim, :]
_a = val[
dim : dim * 2, :
]
_a = val[-dim:, :]
else:
_a = val[:dim]
_a = val[dim : dim * 2]
_a = val[-dim:]
else:
_a = rename_key(__A)
# squeeze if necessary
if (
"text_projection.0" in new_name
or "text_projection.3" in new_name
or "visual_projection.0" in new_name
or "visual_projection.3" in new_name
):
_a = val.squeeze_()
else:
_a = val
return orig_state_dict
def lowerCAmelCase ():
"""simple docstring"""
_a = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
_a = Image.open(requests.get(__A , stream=__A).raw)
return im
@torch.no_grad()
def lowerCAmelCase (__A , __A , __A="groupvit-gcc-yfcc" , __A=False):
"""simple docstring"""
_a = GroupViTConfig()
_a = GroupViTModel(__A).eval()
_a = torch.load(__A , map_location='''cpu''')['''model''']
_a = convert_state_dict(__A , __A)
_a , _a = model.load_state_dict(__A , strict=__A)
assert missing_keys == ["text_model.embeddings.position_ids"]
assert (unexpected_keys == ["multi_label_logit_scale"]) or (len(__A) == 0)
# verify result
_a = CLIPProcessor.from_pretrained('''openai/clip-vit-base-patch32''')
_a = prepare_img()
_a = processor(text=['''a photo of a cat''', '''a photo of a dog'''] , images=__A , padding=__A , return_tensors='''pt''')
with torch.no_grad():
_a = model(**__A)
if model_name == "groupvit-gcc-yfcc":
_a = torch.tensor([[13.35_23, 6.36_29]])
elif model_name == "groupvit-gcc-redcaps":
_a = torch.tensor([[16.18_73, 8.62_30]])
else:
raise ValueError(F'''Model name {model_name} not supported.''')
assert torch.allclose(outputs.logits_per_image , __A , atol=1e-3)
processor.save_pretrained(__A)
model.save_pretrained(__A)
print('''Successfully saved processor and model to''' , __A)
if push_to_hub:
print('''Pushing to the hub...''')
processor.push_to_hub(__A , organization='''nielsr''')
model.push_to_hub(__A , organization='''nielsr''')
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to dump the processor and PyTorch model."
)
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to GroupViT checkpoint")
parser.add_argument(
"--model_name",
default="groupvit-gccy-fcc",
type=str,
help="Name of the model. Expecting either 'groupvit-gcc-yfcc' or 'groupvit-gcc-redcaps'",
)
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether or not to push the converted model and processor to the 🤗 hub using the provided `model_name`.",
)
lowercase_ = parser.parse_args()
convert_groupvit_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 11
|
'''simple docstring'''
import json
import multiprocessing as mp
import re
from collections import defaultdict
from functools import partial
from typing import Dict, List, Optional, Set, Tuple, Type
from datasets import Dataset
from datasketch import MinHash, MinHashLSH
from dpu_utils.utils.iterators import ThreadedIterator
from tqdm import tqdm
lowercase_ = re.compile("[^A-Za-z_0-9]")
# parameters used in DuplicationIndex
lowercase_ = 10
lowercase_ = 256
def lowerCAmelCase (__A):
"""simple docstring"""
if len(__A) < MIN_NUM_TOKENS:
return None
_a = MinHash(num_perm=__A)
for token in set(__A):
min_hash.update(token.encode())
return min_hash
def lowerCAmelCase (__A):
"""simple docstring"""
return {t for t in NON_ALPHA.split(__A) if len(t.strip()) > 0}
class __A :
'''simple docstring'''
def __init__(self , *,
A = 0.85 , ) -> Optional[int]:
"""simple docstring"""
_a = duplication_jaccard_threshold
_a = NUM_PERM
_a = MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm )
_a = defaultdict(A )
def a__ (self , A , A ) -> None:
"""simple docstring"""
_a = self._index.query(A )
if code_key in self._index.keys:
print(f'''Duplicate key {code_key}''' )
return
self._index.insert(A , A )
if len(A ) > 0:
for base_duplicate in close_duplicates:
if base_duplicate in self._duplicate_clusters:
self._duplicate_clusters[base_duplicate].add(A )
break
else:
self._duplicate_clusters[close_duplicates[0]].add(A )
def a__ (self ) -> List[List[Dict]]:
"""simple docstring"""
_a = []
for base, duplicates in self._duplicate_clusters.items():
_a = [base] + list(A )
# reformat the cluster to be a list of dict
_a = [{'''base_index''': el[0], '''repo_name''': el[1], '''path''': el[2]} for el in cluster]
duplicate_clusters.append(A )
return duplicate_clusters
def a__ (self , A ) -> None:
"""simple docstring"""
_a = self.get_duplicate_clusters()
with open(A , '''w''' ) as f:
json.dump(A , A )
def lowerCAmelCase (__A):
"""simple docstring"""
_a , _a = element
_a = get_min_hash([t for t in NON_ALPHA.split(data['''content''']) if len(t.strip()) > 0])
if min_hash is not None:
return (index, data["repo_name"], data["path"]), min_hash
def lowerCAmelCase (__A):
"""simple docstring"""
with mp.Pool() as pool:
for data in pool.imap_unordered(
_compute_min_hash , ThreadedIterator(__A , max_queue_size=10_000) , chunksize=100 , ):
if data is not None:
yield data
def lowerCAmelCase (__A , __A):
"""simple docstring"""
_a = DuplicationIndex(duplication_jaccard_threshold=__A)
for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(__A)) , max_queue_size=100)):
di.add(__A , __A)
# Returns a List[Cluster] where Cluster is List[str] with the filenames.
return di.get_duplicate_clusters()
def lowerCAmelCase (__A , __A):
"""simple docstring"""
_a = get_tokens(__A)
_a = get_tokens(__A)
return len(tokensa & tokensa) / len(tokensa | tokensa)
lowercase_ = None
def lowerCAmelCase (__A , __A):
"""simple docstring"""
_a = []
for elementa in cluster:
_a = _shared_dataset[elementa['''base_index''']]['''content''']
for elementa in extremes:
_a = _shared_dataset[elementa['''base_index''']]['''content''']
if jaccard_similarity(__A , __A) >= jaccard_threshold:
elementa["copies"] += 1
break
else:
_a = 1
extremes.append(__A)
return extremes
def lowerCAmelCase (__A , __A , __A):
"""simple docstring"""
global _shared_dataset
_a = dataset
_a = []
_a = partial(_find_cluster_extremes_shared , jaccard_threshold=__A)
with mp.Pool() as pool:
for extremes in tqdm(
pool.imap_unordered(
__A , __A , ) , total=len(__A) , ):
extremes_list.append(__A)
return extremes_list
def lowerCAmelCase (__A , __A = 0.85):
"""simple docstring"""
_a = make_duplicate_clusters(__A , __A)
_a = {x['''base_index'''] for cluster in duplicate_clusters for x in cluster}
_a = {}
_a = find_extremes(__A , __A , __A)
for extremes in extremes_clusters:
for element in extremes:
_a = element
_a = duplicate_indices - set(extreme_dict.keys())
_a = dataset.filter(lambda __A , __A: idx not in remove_indices , with_indices=__A)
# update duplicate_clusters
for cluster in duplicate_clusters:
for element in cluster:
_a = element['''base_index'''] in extreme_dict
if element["is_extreme"]:
_a = extreme_dict[element['''base_index''']]['''copies''']
print(F'''Original dataset size: {len(__A)}''')
print(F'''Number of duplicate clusters: {len(__A)}''')
print(F'''Files in duplicate cluster: {len(__A)}''')
print(F'''Unique files in duplicate cluster: {len(__A)}''')
print(F'''Filtered dataset size: {len(__A)}''')
return ds_filter, duplicate_clusters
| 11
| 1
|
import sys
from typing import Tuple
import numpy as np
import torch
from PIL import Image
from torch import nn
from transformers.image_utils import PILImageResampling
from utils import img_tensorize
class lowerCamelCase__ :
'''simple docstring'''
def __init__( self : str , UpperCamelCase_ : List[str] , UpperCamelCase_ : str=sys.maxsize ) -> Union[str, Any]:
'''simple docstring'''
_lowercase : Optional[Any] = 'bilinear'
_lowercase : str = max_size
_lowercase : List[str] = short_edge_length
def __call__( self : Optional[Any] , UpperCamelCase_ : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
_lowercase : Dict = []
for img in imgs:
_lowercase , _lowercase : Optional[Any] = img.shape[:2]
# later: provide list and randomly choose index for resize
_lowercase : Tuple = np.random.randint(self.short_edge_length[0] , self.short_edge_length[1] + 1 )
if size == 0:
return img
_lowercase : List[Any] = size * 1.0 / min(_lowercase , _lowercase )
if h < w:
_lowercase , _lowercase : str = size, scale * w
else:
_lowercase , _lowercase : Optional[Any] = scale * h, size
if max(_lowercase , _lowercase ) > self.max_size:
_lowercase : Any = self.max_size * 1.0 / max(_lowercase , _lowercase )
_lowercase : str = newh * scale
_lowercase : Dict = neww * scale
_lowercase : Union[str, Any] = int(neww + 0.5 )
_lowercase : str = int(newh + 0.5 )
if img.dtype == np.uinta:
_lowercase : Tuple = Image.fromarray(_lowercase )
_lowercase : Tuple = pil_image.resize((neww, newh) , PILImageResampling.BILINEAR )
_lowercase : Optional[Any] = np.asarray(_lowercase )
else:
_lowercase : Optional[int] = img.permute(2 , 0 , 1 ).unsqueeze(0 ) # 3, 0, 1) # hw(c) -> nchw
_lowercase : List[str] = nn.functional.interpolate(
_lowercase , (newh, neww) , mode=self.interp_method , align_corners=_lowercase ).squeeze(0 )
img_augs.append(_lowercase )
return img_augs
class lowerCamelCase__ :
'''simple docstring'''
def __init__( self : Union[str, Any] , UpperCamelCase_ : int ) -> str:
'''simple docstring'''
_lowercase : str = ResizeShortestEdge([cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST] , cfg.INPUT.MAX_SIZE_TEST )
_lowercase : Optional[int] = cfg.INPUT.FORMAT
_lowercase : Union[str, Any] = cfg.SIZE_DIVISIBILITY
_lowercase : int = cfg.PAD_VALUE
_lowercase : Tuple = cfg.INPUT.MAX_SIZE_TEST
_lowercase : Union[str, Any] = cfg.MODEL.DEVICE
_lowercase : str = torch.tensor(cfg.MODEL.PIXEL_STD ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 )
_lowercase : str = torch.tensor(cfg.MODEL.PIXEL_MEAN ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 )
_lowercase : Tuple = lambda UpperCamelCase_ : (x - self.pixel_mean) / self.pixel_std
def __UpperCAmelCase ( self : Any , UpperCamelCase_ : str ) -> Optional[Any]:
'''simple docstring'''
_lowercase : Any = tuple(max(_lowercase ) for s in zip(*[img.shape for img in images] ) )
_lowercase : int = [im.shape[-2:] for im in images]
_lowercase : int = [
nn.functional.pad(
_lowercase , [0, max_size[-1] - size[1], 0, max_size[-2] - size[0]] , value=self.pad_value , )
for size, im in zip(_lowercase , _lowercase )
]
return torch.stack(_lowercase ), torch.tensor(_lowercase )
def __call__( self : Tuple , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Dict=False ) -> str:
'''simple docstring'''
with torch.no_grad():
if not isinstance(_lowercase , _lowercase ):
_lowercase : Optional[Any] = [images]
if single_image:
assert len(_lowercase ) == 1
for i in range(len(_lowercase ) ):
if isinstance(images[i] , torch.Tensor ):
images.insert(_lowercase , images.pop(_lowercase ).to(self.device ).float() )
elif not isinstance(images[i] , torch.Tensor ):
images.insert(
_lowercase , torch.as_tensor(img_tensorize(images.pop(_lowercase ) , input_format=self.input_format ) )
.to(self.device )
.float() , )
# resize smallest edge
_lowercase : List[Any] = torch.tensor([im.shape[:2] for im in images] )
_lowercase : Optional[Any] = self.aug(_lowercase )
# transpose images and convert to torch tensors
# images = [torch.as_tensor(i.astype("float32")).permute(2, 0, 1).to(self.device) for i in images]
# now normalize before pad to avoid useless arithmetic
_lowercase : Tuple = [self.normalizer(_lowercase ) for x in images]
# now pad them to do the following operations
_lowercase , _lowercase : Any = self.pad(_lowercase )
# Normalize
if self.size_divisibility > 0:
raise NotImplementedError()
# pad
_lowercase : List[str] = torch.true_divide(_lowercase , _lowercase )
if single_image:
return images[0], sizes[0], scales_yx[0]
else:
return images, sizes, scales_yx
def __UpperCamelCase ( _lowercase, _lowercase ) -> Dict:
boxes[:, 0::2] *= scale_yx[:, 1]
boxes[:, 1::2] *= scale_yx[:, 0]
return boxes
def __UpperCamelCase ( _lowercase, _lowercase ) -> Dict:
assert torch.isfinite(UpperCamelCase__ ).all(), "Box tensor contains infinite or NaN!"
_lowercase , _lowercase : str = box_size
tensor[:, 0].clamp_(min=0, max=UpperCamelCase__ )
tensor[:, 1].clamp_(min=0, max=UpperCamelCase__ )
tensor[:, 2].clamp_(min=0, max=UpperCamelCase__ )
tensor[:, 3].clamp_(min=0, max=UpperCamelCase__ )
| 702
|
'''simple docstring'''
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
_A : Optional[int] =logging.get_logger(__name__)
class lowerCamelCase__ ( A ):
'''simple docstring'''
A_ = ["""input_features""", """is_longer"""]
def __init__( self : List[Any] , UpperCamelCase_ : List[Any]=64 , UpperCamelCase_ : int=4_8000 , UpperCamelCase_ : Union[str, Any]=480 , UpperCamelCase_ : Any=10 , UpperCamelCase_ : Optional[int]=1024 , UpperCamelCase_ : Optional[int]=0.0 , UpperCamelCase_ : Tuple=False , UpperCamelCase_ : float = 0 , UpperCamelCase_ : float = 1_4000 , UpperCamelCase_ : int = None , UpperCamelCase_ : str = "fusion" , UpperCamelCase_ : str = "repeatpad" , **UpperCamelCase_ : Optional[Any] , ) -> Dict:
'''simple docstring'''
super().__init__(
feature_size=UpperCamelCase_ , sampling_rate=UpperCamelCase_ , padding_value=UpperCamelCase_ , return_attention_mask=UpperCamelCase_ , **UpperCamelCase_ , )
_lowercase : Tuple = top_db
_lowercase : Any = truncation
_lowercase : str = padding
_lowercase : int = fft_window_size
_lowercase : Any = (fft_window_size >> 1) + 1
_lowercase : int = hop_length
_lowercase : Any = max_length_s
_lowercase : str = max_length_s * sampling_rate
_lowercase : Any = sampling_rate
_lowercase : List[Any] = frequency_min
_lowercase : Tuple = frequency_max
_lowercase : Tuple = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=UpperCamelCase_ , min_frequency=UpperCamelCase_ , max_frequency=UpperCamelCase_ , sampling_rate=UpperCamelCase_ , norm=UpperCamelCase_ , mel_scale='htk' , )
_lowercase : Any = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=UpperCamelCase_ , min_frequency=UpperCamelCase_ , max_frequency=UpperCamelCase_ , sampling_rate=UpperCamelCase_ , norm='slaney' , mel_scale='slaney' , )
def __UpperCAmelCase ( self : Tuple ) -> Dict[str, Any]:
'''simple docstring'''
_lowercase : Tuple = copy.deepcopy(self.__dict__ )
_lowercase : int = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
if "mel_filters_slaney" in output:
del output["mel_filters_slaney"]
return output
def __UpperCAmelCase ( self : Dict , UpperCamelCase_ : np.array , UpperCamelCase_ : Optional[np.array] = None ) -> np.ndarray:
'''simple docstring'''
_lowercase : List[str] = spectrogram(
UpperCamelCase_ , window_function(self.fft_window_size , 'hann' ) , frame_length=self.fft_window_size , hop_length=self.hop_length , power=2.0 , mel_filters=UpperCamelCase_ , log_mel='dB' , )
return log_mel_spectrogram.T
def __UpperCAmelCase ( self : List[Any] , UpperCamelCase_ : Any , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
_lowercase : Tuple = np.array_split(list(range(0 , total_frames - chunk_frames + 1 ) ) , 3 )
if len(ranges[1] ) == 0:
# if the audio is too short, we just use the first chunk
_lowercase : int = [0]
if len(ranges[2] ) == 0:
# if the audio is too short, we just use the first chunk
_lowercase : Union[str, Any] = [0]
# randomly choose index for each part
_lowercase : Tuple = np.random.choice(ranges[0] )
_lowercase : int = np.random.choice(ranges[1] )
_lowercase : Any = np.random.choice(ranges[2] )
_lowercase : int = mel[idx_front : idx_front + chunk_frames, :]
_lowercase : int = mel[idx_middle : idx_middle + chunk_frames, :]
_lowercase : Tuple = mel[idx_back : idx_back + chunk_frames, :]
_lowercase : List[Any] = torch.tensor(mel[None, None, :] )
_lowercase : Optional[int] = torch.nn.functional.interpolate(
UpperCamelCase_ , size=[chunk_frames, 64] , mode='bilinear' , align_corners=UpperCamelCase_ )
_lowercase : str = mel_shrink[0][0].numpy()
_lowercase : int = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] , axis=0 )
return mel_fusion
def __UpperCAmelCase ( self : List[str] , UpperCamelCase_ : np.array , UpperCamelCase_ : List[Any] , UpperCamelCase_ : int , UpperCamelCase_ : Optional[int] ) -> np.array:
'''simple docstring'''
if waveform.shape[0] > max_length:
if truncation == "rand_trunc":
_lowercase : Tuple = True
# random crop to max_length (for compatibility) -> this should be handled by self.pad
_lowercase : Any = len(UpperCamelCase_ ) - max_length
_lowercase : Dict = np.random.randint(0 , overflow + 1 )
_lowercase : Optional[int] = waveform[idx : idx + max_length]
_lowercase : Dict = self._np_extract_fbank_features(UpperCamelCase_ , self.mel_filters_slaney )[None, :]
elif truncation == "fusion":
_lowercase : List[Any] = self._np_extract_fbank_features(UpperCamelCase_ , self.mel_filters )
_lowercase : List[Any] = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed
_lowercase : Optional[int] = mel.shape[0]
if chunk_frames == total_frames:
# there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length.
# In this case, we just use the whole audio.
_lowercase : Optional[Any] = np.stack([mel, mel, mel, mel] , axis=0 )
_lowercase : List[Any] = False
else:
_lowercase : Union[str, Any] = self._random_mel_fusion(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
_lowercase : int = True
else:
raise NotImplementedError(F'''data_truncating {truncation} not implemented''' )
else:
_lowercase : Any = False
# only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding
if waveform.shape[0] < max_length:
if padding == "repeat":
_lowercase : List[Any] = int(max_length / len(UpperCamelCase_ ) )
_lowercase : List[str] = np.stack(np.tile(UpperCamelCase_ , n_repeat + 1 ) )[:max_length]
if padding == "repeatpad":
_lowercase : Union[str, Any] = int(max_length / len(UpperCamelCase_ ) )
_lowercase : Union[str, Any] = np.stack(np.tile(UpperCamelCase_ , UpperCamelCase_ ) )
_lowercase : Dict = np.pad(UpperCamelCase_ , (0, max_length - waveform.shape[0]) , mode='constant' , constant_values=0 )
if truncation == "fusion":
_lowercase : str = self._np_extract_fbank_features(UpperCamelCase_ , self.mel_filters )
_lowercase : Dict = np.stack([input_mel, input_mel, input_mel, input_mel] , axis=0 )
else:
_lowercase : List[Any] = self._np_extract_fbank_features(UpperCamelCase_ , self.mel_filters_slaney )[None, :]
return input_mel, longer
def __call__( self : Union[str, Any] , UpperCamelCase_ : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , UpperCamelCase_ : str = None , UpperCamelCase_ : Optional[str] = None , UpperCamelCase_ : Optional[int] = None , UpperCamelCase_ : Optional[int] = None , UpperCamelCase_ : Optional[Union[str, TensorType]] = None , **UpperCamelCase_ : Dict , ) -> BatchFeature:
'''simple docstring'''
_lowercase : Dict = truncation if truncation is not None else self.truncation
_lowercase : int = padding if padding else self.padding
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'''The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a'''
F''' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input'''
F''' was sampled with {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
'It is strongly recommended to pass the `sampling_rate` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
_lowercase : Optional[Any] = isinstance(UpperCamelCase_ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' )
_lowercase : List[str] = is_batched_numpy or (
isinstance(UpperCamelCase_ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
_lowercase : Dict = [np.asarray(UpperCamelCase_ , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(UpperCamelCase_ , np.ndarray ):
_lowercase : Any = np.asarray(UpperCamelCase_ , dtype=np.floataa )
elif isinstance(UpperCamelCase_ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
_lowercase : Tuple = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
_lowercase : int = [np.asarray(UpperCamelCase_ )]
# convert to mel spectrogram, truncate and pad if needed.
_lowercase : Optional[Any] = [
self._get_input_mel(UpperCamelCase_ , max_length if max_length else self.nb_max_samples , UpperCamelCase_ , UpperCamelCase_ )
for waveform in raw_speech
]
_lowercase : List[Any] = []
_lowercase : Dict = []
for mel, longer in padded_inputs:
input_mel.append(UpperCamelCase_ )
is_longer.append(UpperCamelCase_ )
if truncation == "fusion" and sum(UpperCamelCase_ ) == 0:
# if no audio is longer than 10s, then randomly select one audio to be longer
_lowercase : Optional[Any] = np.random.randint(0 , len(UpperCamelCase_ ) )
_lowercase : str = True
if isinstance(input_mel[0] , UpperCamelCase_ ):
_lowercase : str = [np.asarray(UpperCamelCase_ , dtype=np.floataa ) for feature in input_mel]
# is_longer is a list of bool
_lowercase : Tuple = [[longer] for longer in is_longer]
_lowercase : Optional[Any] = {'input_features': input_mel, 'is_longer': is_longer}
_lowercase : Optional[int] = BatchFeature(UpperCamelCase_ )
if return_tensors is not None:
_lowercase : List[Any] = input_features.convert_to_tensors(UpperCamelCase_ )
return input_features
| 4
| 0
|
"""simple docstring"""
import argparse
import logging
import os
from datetime import datetime
import numpy as np
import torch
from torch import nn
from torch.utils.data import DataLoader, RandomSampler, TensorDataset
from tqdm import tqdm
from transformers import GPTaLMHeadModel
_lowerCAmelCase : Optional[int] = logging.getLogger(__name__)
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> Tuple:
'''simple docstring'''
if os.path.exists(_lowerCamelCase ):
if os.path.exists(os.path.join(_lowerCamelCase , "config.json" ) ) and os.path.isfile(
os.path.join(_lowerCamelCase , "config.json" ) ):
os.remove(os.path.join(_lowerCamelCase , "config.json" ) )
if os.path.exists(os.path.join(_lowerCamelCase , "pytorch_model.bin" ) ) and os.path.isfile(
os.path.join(_lowerCamelCase , "pytorch_model.bin" ) ):
os.remove(os.path.join(_lowerCamelCase , "pytorch_model.bin" ) )
else:
os.makedirs(_lowerCamelCase )
model.save_pretrained(_lowerCamelCase )
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase=False ) -> str:
'''simple docstring'''
_lowerCamelCase : List[str] = 2
if unlogit:
_lowerCamelCase : List[Any] = torch.pow(_lowerCamelCase , _lowerCamelCase )
_lowerCamelCase : Dict = p * torch.log(_lowerCamelCase )
_lowerCamelCase : List[Any] = 0
return -plogp.sum(dim=-1 )
def lowerCamelCase_( _lowerCamelCase ) -> Dict:
'''simple docstring'''
logger.info("lv, h >\t" + "\t".join(F"""{x + 1}""" for x in range(len(_lowerCamelCase ) ) ) )
for row in range(len(_lowerCamelCase ) ):
if tensor.dtype != torch.long:
logger.info(F"""layer {row + 1}:\t""" + "\t".join(F"""{x:.5f}""" for x in tensor[row].cpu().data ) )
else:
logger.info(F"""layer {row + 1}:\t""" + "\t".join(F"""{x:d}""" for x in tensor[row].cpu().data ) )
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=None , _lowerCamelCase=False ) -> Tuple:
'''simple docstring'''
_lowerCamelCase : Any = model.config.num_hidden_layers, model.config.num_attention_heads
_lowerCamelCase : Optional[Any] = torch.zeros(_lowerCamelCase , _lowerCamelCase ).to(args.device )
_lowerCamelCase : Tuple = torch.zeros(_lowerCamelCase , _lowerCamelCase ).to(args.device )
if head_mask is None:
_lowerCamelCase : Optional[int] = torch.ones(_lowerCamelCase , _lowerCamelCase ).to(args.device )
head_mask.requires_grad_(requires_grad=_lowerCamelCase )
# If actually pruned attention multi-head, set head mask to None to avoid shape mismatch
if actually_pruned:
_lowerCamelCase : str = None
_lowerCamelCase : str = 0.0
_lowerCamelCase : Any = 0.0
for step, inputs in enumerate(tqdm(_lowerCamelCase , desc="Iteration" , disable=args.local_rank not in [-1, 0] ) ):
_lowerCamelCase : List[str] = tuple(t.to(args.device ) for t in inputs )
(_lowerCamelCase ) : int = inputs
# Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below)
_lowerCamelCase : List[Any] = model(_lowerCamelCase , labels=_lowerCamelCase , head_mask=_lowerCamelCase )
# (loss), lm_logits, presents, (all hidden_states), (attentions)
_lowerCamelCase : List[str] = (
outputs[0],
outputs[1],
outputs[-1],
) # Loss and logits are the first, attention the last
loss.backward() # Backpropagate to populate the gradients in the head mask
total_loss += loss.detach().cpu().numpy()
if compute_entropy:
for layer, attn in enumerate(_lowerCamelCase ):
_lowerCamelCase : int = entropy(attn.detach() , _lowerCamelCase )
attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach()
if compute_importance:
head_importance += head_mask.grad.abs().detach()
tot_tokens += torch.ones_like(_lowerCamelCase ).float().detach().sum().data
# Normalize
attn_entropy /= tot_tokens
head_importance /= tot_tokens
# Layerwise importance normalization
if not args.dont_normalize_importance_by_layer:
_lowerCamelCase : Any = 2
_lowerCamelCase : Dict = torch.pow(torch.pow(_lowerCamelCase , _lowerCamelCase ).sum(-1 ) , 1 / exponent )
head_importance /= norm_by_layer.unsqueeze(-1 ) + 1e-20
if not args.dont_normalize_global_importance:
_lowerCamelCase : Union[str, Any] = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min())
# Print matrices
if compute_entropy:
logger.info("Attention entropies" )
print_ad_tensor(_lowerCamelCase )
if compute_importance:
logger.info("Head importance scores" )
print_ad_tensor(_lowerCamelCase )
logger.info("Head ranked by importance scores" )
_lowerCamelCase : str = torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device )
_lowerCamelCase : Optional[int] = torch.arange(
head_importance.numel() , device=args.device )
_lowerCamelCase : Optional[Any] = head_ranks.view_as(_lowerCamelCase )
print_ad_tensor(_lowerCamelCase )
return attn_entropy, head_importance, total_loss
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Dict:
'''simple docstring'''
_lowerCamelCase : List[Any] = compute_heads_importance(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , compute_entropy=_lowerCamelCase )
_lowerCamelCase : int = 1 / loss # instead of downsteam score use the LM loss
logger.info("Pruning: original score: %f, threshold: %f" , _lowerCamelCase , original_score * args.masking_threshold )
_lowerCamelCase : List[str] = torch.ones_like(_lowerCamelCase )
_lowerCamelCase : int = max(1 , int(new_head_mask.numel() * args.masking_amount ) )
_lowerCamelCase : Dict = original_score
while current_score >= original_score * args.masking_threshold:
_lowerCamelCase : Any = new_head_mask.clone().detach() # save current head mask
# heads from least important to most - keep only not-masked heads
_lowerCamelCase : Tuple = float("Inf" )
_lowerCamelCase : Dict = head_importance.view(-1 ).sort()[1]
if len(_lowerCamelCase ) <= num_to_mask:
print("BREAK BY num_to_mask" )
break
# mask heads
_lowerCamelCase : List[str] = current_heads_to_mask[:num_to_mask]
logger.info("Heads to mask: %s" , str(current_heads_to_mask.tolist() ) )
_lowerCamelCase : Tuple = new_head_mask.view(-1 )
_lowerCamelCase : str = 0.0
_lowerCamelCase : Any = new_head_mask.view_as(_lowerCamelCase )
_lowerCamelCase : List[str] = new_head_mask.clone().detach()
print_ad_tensor(_lowerCamelCase )
# Compute metric and head importance again
_lowerCamelCase : Dict = compute_heads_importance(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , compute_entropy=_lowerCamelCase , head_mask=_lowerCamelCase )
_lowerCamelCase : Tuple = 1 / loss
logger.info(
"Masking: current score: %f, remaining heads %d (%.1f percents)" , _lowerCamelCase , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 100 , )
logger.info("Final head mask" )
print_ad_tensor(_lowerCamelCase )
np.save(os.path.join(args.output_dir , "head_mask.npy" ) , head_mask.detach().cpu().numpy() )
return head_mask
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Any:
'''simple docstring'''
_lowerCamelCase : Optional[Any] = datetime.now()
_lowerCamelCase : List[Any] = compute_heads_importance(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , compute_entropy=_lowerCamelCase , compute_importance=_lowerCamelCase , head_mask=_lowerCamelCase )
_lowerCamelCase : Any = 1 / loss
_lowerCamelCase : Union[str, Any] = datetime.now() - before_time
_lowerCamelCase : Any = sum(p.numel() for p in model.parameters() )
_lowerCamelCase : int = {
layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(_lowerCamelCase ) )
}
for k, v in heads_to_prune.items():
if isinstance(_lowerCamelCase , _lowerCamelCase ):
_lowerCamelCase : Dict = [
v,
]
assert sum(len(_lowerCamelCase ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item()
model.prune_heads(_lowerCamelCase )
_lowerCamelCase : List[Any] = sum(p.numel() for p in model.parameters() )
_lowerCamelCase : Any = datetime.now()
_lowerCamelCase : List[Any] = compute_heads_importance(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , compute_entropy=_lowerCamelCase , compute_importance=_lowerCamelCase , head_mask=_lowerCamelCase , actually_pruned=_lowerCamelCase , )
_lowerCamelCase : Optional[int] = 1 / loss
_lowerCamelCase : Any = datetime.now() - before_time
logger.info(
"Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)" , _lowerCamelCase , _lowerCamelCase , pruned_num_params / original_num_params * 100 , )
logger.info("Pruning: score with masking: %f score with pruning: %f" , _lowerCamelCase , _lowerCamelCase )
logger.info("Pruning: speed ratio (original timing / new timing): %f percents" , original_time / new_time * 100 )
save_model(_lowerCamelCase , args.output_dir )
def lowerCamelCase_( ) -> Dict:
'''simple docstring'''
_lowerCamelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--data_dir" , default=_lowerCamelCase , type=_lowerCamelCase , required=_lowerCamelCase , help="The input data dir. Should contain the .tsv files (or other data files) for the task." , )
parser.add_argument(
"--model_name_or_path" , default=_lowerCamelCase , type=_lowerCamelCase , required=_lowerCamelCase , help="Path to pretrained model or model identifier from huggingface.co/models" , )
parser.add_argument(
"--output_dir" , default=_lowerCamelCase , type=_lowerCamelCase , required=_lowerCamelCase , help="The output directory where the model predictions and checkpoints will be written." , )
# Other parameters
parser.add_argument(
"--config_name" , default="" , type=_lowerCamelCase , help="Pretrained config name or path if not the same as model_name_or_path" , )
parser.add_argument(
"--tokenizer_name" , default="" , type=_lowerCamelCase , help="Pretrained tokenizer name or path if not the same as model_name_or_path" , )
parser.add_argument(
"--cache_dir" , default=_lowerCamelCase , type=_lowerCamelCase , help="Where do you want to store the pre-trained models downloaded from s3" , )
parser.add_argument(
"--data_subset" , type=_lowerCamelCase , default=-1 , help="If > 0: limit the data to a subset of data_subset instances." )
parser.add_argument(
"--overwrite_output_dir" , action="store_true" , help="Whether to overwrite data in output directory" )
parser.add_argument(
"--overwrite_cache" , action="store_true" , help="Overwrite the cached training and evaluation sets" )
parser.add_argument(
"--dont_normalize_importance_by_layer" , action="store_true" , help="Don\'t normalize importance score by layers" )
parser.add_argument(
"--dont_normalize_global_importance" , action="store_true" , help="Don\'t normalize all importance scores between 0 and 1" , )
parser.add_argument(
"--try_masking" , action="store_true" , help="Whether to try to mask head until a threshold of accuracy." )
parser.add_argument(
"--masking_threshold" , default=0.9 , type=_lowerCamelCase , help="masking threshold in term of metrics (stop masking when metric < threshold * original metric value)." , )
parser.add_argument(
"--masking_amount" , default=0.1 , type=_lowerCamelCase , help="Amount to heads to masking at each masking step." )
parser.add_argument("--metric_name" , default="acc" , type=_lowerCamelCase , help="Metric to use for head masking." )
parser.add_argument(
"--max_seq_length" , default=128 , type=_lowerCamelCase , help=(
"The maximum total input sequence length after WordPiece tokenization. \n"
"Sequences longer than this will be truncated, sequences shorter padded."
) , )
parser.add_argument("--batch_size" , default=1 , type=_lowerCamelCase , help="Batch size." )
parser.add_argument("--seed" , type=_lowerCamelCase , default=42 )
parser.add_argument("--local_rank" , type=_lowerCamelCase , default=-1 , help="local_rank for distributed training on gpus" )
parser.add_argument("--no_cuda" , action="store_true" , help="Whether not to use CUDA when available" )
parser.add_argument("--server_ip" , type=_lowerCamelCase , default="" , help="Can be used for distant debugging." )
parser.add_argument("--server_port" , type=_lowerCamelCase , default="" , help="Can be used for distant debugging." )
_lowerCamelCase : Tuple = parser.parse_args()
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("Waiting for debugger attach" )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=_lowerCamelCase )
ptvsd.wait_for_attach()
# Setup devices and distributed training
if args.local_rank == -1 or args.no_cuda:
_lowerCamelCase : str = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu" )
_lowerCamelCase : Optional[Any] = 0 if args.no_cuda else torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank )
_lowerCamelCase : Dict = torch.device("cuda" , args.local_rank )
_lowerCamelCase : Optional[Any] = 1
torch.distributed.init_process_group(backend="nccl" ) # Initializes the distributed backend
# Setup logging
logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN )
logger.info("device: {} n_gpu: {}, distributed: {}".format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) )
_lowerCamelCase : List[Any] = GPTaLMHeadModel.from_pretrained(args.model_name_or_path )
# Distributed and parallel training
model.to(args.device )
if args.local_rank != -1:
_lowerCamelCase : Tuple = nn.parallel.DistributedDataParallel(
_lowerCamelCase , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=_lowerCamelCase )
elif args.n_gpu > 1:
_lowerCamelCase : Optional[Any] = nn.DataParallel(_lowerCamelCase )
# Print/save training arguments
os.makedirs(args.output_dir , exist_ok=_lowerCamelCase )
torch.save(_lowerCamelCase , os.path.join(args.output_dir , "run_args.bin" ) )
logger.info("Training/evaluation parameters %s" , _lowerCamelCase )
# Prepare dataset
_lowerCamelCase : Any = np.concatenate(
[
np.loadtxt(args.data_dir , dtype=np.intaa ),
] )
_lowerCamelCase : int = (torch.from_numpy(_lowerCamelCase ),)
_lowerCamelCase : int = TensorDataset(*_lowerCamelCase )
_lowerCamelCase : List[str] = RandomSampler(_lowerCamelCase )
_lowerCamelCase : List[str] = DataLoader(_lowerCamelCase , sampler=_lowerCamelCase , batch_size=args.batch_size )
# Compute head entropy and importance score
compute_heads_importance(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# Try head masking (set heads to zero until the score goes under a threshole)
# and head pruning (remove masked heads and see the effect on the network)
if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0:
_lowerCamelCase : int = mask_heads(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
prune_heads(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
if __name__ == "__main__":
main()
| 46
|
'''simple docstring'''
import json
import os
from collections import Counter
import torch
import torchvision
import torchvision.transforms as transforms
from PIL import Image
from torch import nn
from torch.utils.data import Dataset
_UpperCamelCase : str = {1: (1, 1), 2: (2, 1), 3: (3, 1), 4: (2, 2), 5: (5, 1), 6: (3, 2), 7: (7, 1), 8: (4, 2), 9: (3, 3)}
class snake_case__ ( nn.Module):
def __init__( self : Optional[int] , _A : Tuple ) -> List[str]:
super().__init__()
UpperCAmelCase_ : Tuple = torchvision.models.resnetaaa(pretrained=_A )
UpperCAmelCase_ : Union[str, Any] = list(model.children() )[:-2]
UpperCAmelCase_ : Union[str, Any] = nn.Sequential(*_A )
UpperCAmelCase_ : List[Any] = nn.AdaptiveAvgPoolad(POOLING_BREAKDOWN[args.num_image_embeds] )
def A ( self : str , _A : Optional[int] ) -> str:
# Bx3x224x224 -> Bx2048x7x7 -> Bx2048xN -> BxNx2048
UpperCAmelCase_ : List[str] = self.pool(self.model(_A ) )
UpperCAmelCase_ : Tuple = torch.flatten(_A , start_dim=2 )
UpperCAmelCase_ : Union[str, Any] = out.transpose(1 , 2 ).contiguous()
return out # BxNx2048
class snake_case__ ( UpperCamelCase):
def __init__( self : Optional[int] , _A : int , _A : str , _A : int , _A : Dict , _A : int ) -> List[str]:
UpperCAmelCase_ : Any = [json.loads(_A ) for l in open(_A )]
UpperCAmelCase_ : Tuple = os.path.dirname(_A )
UpperCAmelCase_ : Any = tokenizer
UpperCAmelCase_ : Optional[Any] = labels
UpperCAmelCase_ : List[str] = len(_A )
UpperCAmelCase_ : int = max_seq_length
UpperCAmelCase_ : str = transforms
def __len__( self : Tuple ) -> Tuple:
return len(self.data )
def __getitem__( self : Union[str, Any] , _A : Tuple ) -> List[Any]:
UpperCAmelCase_ : Tuple = torch.LongTensor(self.tokenizer.encode(self.data[index]['''text'''] , add_special_tokens=_A ) )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : int = sentence[0], sentence[1:-1], sentence[-1]
UpperCAmelCase_ : List[str] = sentence[: self.max_seq_length]
UpperCAmelCase_ : List[str] = torch.zeros(self.n_classes )
UpperCAmelCase_ : Optional[int] = 1
UpperCAmelCase_ : Tuple = Image.open(os.path.join(self.data_dir , self.data[index]['''img'''] ) ).convert('''RGB''' )
UpperCAmelCase_ : List[Any] = self.transforms(_A )
return {
"image_start_token": start_token,
"image_end_token": end_token,
"sentence": sentence,
"image": image,
"label": label,
}
def A ( self : Tuple ) -> Union[str, Any]:
UpperCAmelCase_ : Tuple = Counter()
for row in self.data:
label_freqs.update(row['''label'''] )
return label_freqs
def __UpperCAmelCase ( A : Tuple ) -> Union[str, Any]:
UpperCAmelCase_ : int = [len(row['''sentence'''] ) for row in batch]
UpperCAmelCase_ , UpperCAmelCase_ : Dict = len(A ), max(A )
UpperCAmelCase_ : Tuple = torch.zeros(A , A , dtype=torch.long )
UpperCAmelCase_ : Dict = torch.zeros(A , A , dtype=torch.long )
for i_batch, (input_row, length) in enumerate(zip(A , A ) ):
UpperCAmelCase_ : int = input_row['''sentence''']
UpperCAmelCase_ : Optional[Any] = 1
UpperCAmelCase_ : Union[str, Any] = torch.stack([row['''image'''] for row in batch] )
UpperCAmelCase_ : Optional[int] = torch.stack([row['''label'''] for row in batch] )
UpperCAmelCase_ : Any = torch.stack([row['''image_start_token'''] for row in batch] )
UpperCAmelCase_ : int = torch.stack([row['''image_end_token'''] for row in batch] )
return text_tensor, mask_tensor, img_tensor, img_start_token, img_end_token, tgt_tensor
def __UpperCAmelCase ( ) -> Union[str, Any]:
return [
"Crime",
"Drama",
"Thriller",
"Action",
"Comedy",
"Romance",
"Documentary",
"Short",
"Mystery",
"History",
"Family",
"Adventure",
"Fantasy",
"Sci-Fi",
"Western",
"Horror",
"Sport",
"War",
"Music",
"Musical",
"Animation",
"Biography",
"Film-Noir",
]
def __UpperCAmelCase ( ) -> Union[str, Any]:
return transforms.Compose(
[
transforms.Resize(2_5_6 ),
transforms.CenterCrop(2_2_4 ),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.46777044, 0.44531429, 0.40661017] , std=[0.12221994, 0.12145835, 0.14380469] , ),
] )
| 541
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase : Union[str, Any] = logging.get_logger(__name__)
UpperCAmelCase : Union[str, Any] = {
'''google/vivit-b-16x2-kinetics400''': (
'''https://huggingface.co/google/vivit-b-16x2-kinetics400/resolve/main/config.json'''
),
# See all Vivit models at https://huggingface.co/models?filter=vivit
}
class _A( snake_case__ ):
"""simple docstring"""
UpperCamelCase : Any = '''vivit'''
def __init__( self , _A=224 , _A=32 , _A=[2, 16, 16] , _A=3 , _A=768 , _A=12 , _A=12 , _A=3072 , _A="gelu_fast" , _A=0.0 , _A=0.0 , _A=0.0_2 , _A=1e-0_6 , _A=True , **_A , ):
__A : List[str] = hidden_size
__A : Tuple = num_hidden_layers
__A : Any = num_attention_heads
__A : str = intermediate_size
__A : Union[str, Any] = hidden_act
__A : int = hidden_dropout_prob
__A : Dict = attention_probs_dropout_prob
__A : Union[str, Any] = initializer_range
__A : List[Any] = layer_norm_eps
__A : int = image_size
__A : Optional[int] = num_frames
__A : Optional[Any] = tubelet_size
__A : Union[str, Any] = num_channels
__A : Union[str, Any] = qkv_bias
super().__init__(**_A )
| 77
|
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
UpperCAmelCase : Dict = logging.get_logger(__name__)
UpperCAmelCase : str = {
'''Salesforce/codegen-350M-nl''': '''https://huggingface.co/Salesforce/codegen-350M-nl/resolve/main/config.json''',
'''Salesforce/codegen-350M-multi''': '''https://huggingface.co/Salesforce/codegen-350M-multi/resolve/main/config.json''',
'''Salesforce/codegen-350M-mono''': '''https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/config.json''',
'''Salesforce/codegen-2B-nl''': '''https://huggingface.co/Salesforce/codegen-2B-nl/resolve/main/config.json''',
'''Salesforce/codegen-2B-multi''': '''https://huggingface.co/Salesforce/codegen-2B-multi/resolve/main/config.json''',
'''Salesforce/codegen-2B-mono''': '''https://huggingface.co/Salesforce/codegen-2B-mono/resolve/main/config.json''',
'''Salesforce/codegen-6B-nl''': '''https://huggingface.co/Salesforce/codegen-6B-nl/resolve/main/config.json''',
'''Salesforce/codegen-6B-multi''': '''https://huggingface.co/Salesforce/codegen-6B-multi/resolve/main/config.json''',
'''Salesforce/codegen-6B-mono''': '''https://huggingface.co/Salesforce/codegen-6B-mono/resolve/main/config.json''',
'''Salesforce/codegen-16B-nl''': '''https://huggingface.co/Salesforce/codegen-16B-nl/resolve/main/config.json''',
'''Salesforce/codegen-16B-multi''': '''https://huggingface.co/Salesforce/codegen-16B-multi/resolve/main/config.json''',
'''Salesforce/codegen-16B-mono''': '''https://huggingface.co/Salesforce/codegen-16B-mono/resolve/main/config.json''',
}
class _A( snake_case__ ):
"""simple docstring"""
UpperCamelCase : List[str] = '''codegen'''
UpperCamelCase : List[str] = {
'''max_position_embeddings''': '''n_positions''',
'''hidden_size''': '''n_embd''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self , _A=50400 , _A=2048 , _A=2048 , _A=4096 , _A=28 , _A=16 , _A=64 , _A=None , _A="gelu_new" , _A=0.0 , _A=0.0 , _A=0.0 , _A=1e-5 , _A=0.0_2 , _A=True , _A=50256 , _A=50256 , _A=False , **_A , ):
__A : Any = vocab_size
__A : Tuple = n_ctx
__A : Union[str, Any] = n_positions
__A : Optional[Any] = n_embd
__A : Any = n_layer
__A : Dict = n_head
__A : Union[str, Any] = n_inner
__A : List[Any] = rotary_dim
__A : str = activation_function
__A : Any = resid_pdrop
__A : Tuple = embd_pdrop
__A : Tuple = attn_pdrop
__A : Union[str, Any] = layer_norm_epsilon
__A : str = initializer_range
__A : Optional[Any] = use_cache
__A : Union[str, Any] = bos_token_id
__A : Tuple = eos_token_id
super().__init__(
bos_token_id=_A , eos_token_id=_A , tie_word_embeddings=_A , **_A )
class _A( snake_case__ ):
"""simple docstring"""
def __init__( self , _A , _A = "default" , _A = None , _A = False , ):
super().__init__(_A , task=_A , patching_specs=_A , use_past=_A )
if not getattr(self._config , 'pad_token_id' , _A ):
# TODO: how to do that better?
__A : Dict = 0
@property
def UpperCAmelCase_ ( self ):
__A : List[str] = OrderedDict({'input_ids': {0: 'batch', 1: 'sequence'}} )
if self.use_past:
self.fill_with_past_key_values_(_A , direction='inputs' )
__A : Tuple = {0: 'batch', 1: 'past_sequence + sequence'}
else:
__A : int = {0: 'batch', 1: 'sequence'}
return common_inputs
@property
def UpperCAmelCase_ ( self ):
return self._config.n_layer
@property
def UpperCAmelCase_ ( self ):
return self._config.n_head
def UpperCAmelCase_ ( self , _A , _A = -1 , _A = -1 , _A = False , _A = None , ):
__A : Any = super(_A , self ).generate_dummy_inputs(
_A , batch_size=_A , seq_length=_A , is_pair=_A , framework=_A )
# We need to order the input in the way they appears in the forward()
__A : str = OrderedDict({'input_ids': common_inputs['input_ids']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
__A , __A : Any = common_inputs['input_ids'].shape
# Not using the same length for past_key_values
__A : Any = seqlen + 2
__A : List[str] = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
__A : Optional[Any] = [
(torch.zeros(_A ), torch.zeros(_A )) for _ in range(self.num_layers )
]
__A : Tuple = common_inputs['attention_mask']
if self.use_past:
__A : str = ordered_inputs['attention_mask'].dtype
__A : List[Any] = torch.cat(
[ordered_inputs['attention_mask'], torch.ones(_A , _A , dtype=_A )] , dim=1 )
return ordered_inputs
@property
def UpperCAmelCase_ ( self ):
return 13
| 77
| 1
|
'''simple docstring'''
import os
from typing import List, Optional, Union
from ...image_processing_utils import BatchFeature
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
from ..auto import AutoTokenizer
class a_ ( A__ ):
lowercase_ : Dict = ['''image_processor''', '''tokenizer''']
lowercase_ : str = '''BlipImageProcessor'''
lowercase_ : Any = '''AutoTokenizer'''
def __init__( self : Optional[int] , __lowerCAmelCase : Tuple , __lowerCAmelCase : List[str] , __lowerCAmelCase : Any ):
super().__init__(__lowerCAmelCase , __lowerCAmelCase )
# add QFormer tokenizer
__snake_case = qformer_tokenizer
def __call__( self : Tuple , __lowerCAmelCase : ImageInput = None , __lowerCAmelCase : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , __lowerCAmelCase : bool = True , __lowerCAmelCase : Union[bool, str, PaddingStrategy] = False , __lowerCAmelCase : Union[bool, str, TruncationStrategy] = None , __lowerCAmelCase : Optional[int] = None , __lowerCAmelCase : int = 0 , __lowerCAmelCase : Optional[int] = None , __lowerCAmelCase : Optional[bool] = None , __lowerCAmelCase : bool = False , __lowerCAmelCase : bool = False , __lowerCAmelCase : bool = False , __lowerCAmelCase : bool = False , __lowerCAmelCase : bool = False , __lowerCAmelCase : bool = True , __lowerCAmelCase : Optional[Union[str, TensorType]] = None , **__lowerCAmelCase : str , ):
if images is None and text is None:
raise ValueError('You have to specify at least images or text.' )
__snake_case = BatchFeature()
if text is not None:
__snake_case = self.tokenizer(
text=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , padding=__lowerCAmelCase , truncation=__lowerCAmelCase , max_length=__lowerCAmelCase , stride=__lowerCAmelCase , pad_to_multiple_of=__lowerCAmelCase , return_attention_mask=__lowerCAmelCase , return_overflowing_tokens=__lowerCAmelCase , return_special_tokens_mask=__lowerCAmelCase , return_offsets_mapping=__lowerCAmelCase , return_token_type_ids=__lowerCAmelCase , return_length=__lowerCAmelCase , verbose=__lowerCAmelCase , return_tensors=__lowerCAmelCase , **__lowerCAmelCase , )
encoding.update(__lowerCAmelCase )
__snake_case = self.qformer_tokenizer(
text=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , padding=__lowerCAmelCase , truncation=__lowerCAmelCase , max_length=__lowerCAmelCase , stride=__lowerCAmelCase , pad_to_multiple_of=__lowerCAmelCase , return_attention_mask=__lowerCAmelCase , return_overflowing_tokens=__lowerCAmelCase , return_special_tokens_mask=__lowerCAmelCase , return_offsets_mapping=__lowerCAmelCase , return_token_type_ids=__lowerCAmelCase , return_length=__lowerCAmelCase , verbose=__lowerCAmelCase , return_tensors=__lowerCAmelCase , **__lowerCAmelCase , )
__snake_case = qformer_text_encoding.pop('input_ids' )
__snake_case = qformer_text_encoding.pop('attention_mask' )
if images is not None:
__snake_case = self.image_processor(__lowerCAmelCase , return_tensors=__lowerCAmelCase )
encoding.update(__lowerCAmelCase )
return encoding
def lowercase__ ( self : List[Any] , *__lowerCAmelCase : Optional[Any] , **__lowerCAmelCase : Optional[Any] ):
return self.tokenizer.batch_decode(*__lowerCAmelCase , **__lowerCAmelCase )
def lowercase__ ( self : Dict , *__lowerCAmelCase : int , **__lowerCAmelCase : List[str] ):
return self.tokenizer.decode(*__lowerCAmelCase , **__lowerCAmelCase )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def lowercase__ ( self : List[str] ):
__snake_case = self.tokenizer.model_input_names
__snake_case = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
def lowercase__ ( self : Union[str, Any] , __lowerCAmelCase : Optional[int] , **__lowerCAmelCase : str ):
if os.path.isfile(__lowerCAmelCase ):
raise ValueError(F'Provided path ({save_directory}) should be a directory, not a file' )
os.makedirs(__lowerCAmelCase , exist_ok=__lowerCAmelCase )
__snake_case = os.path.join(__lowerCAmelCase , 'qformer_tokenizer' )
self.qformer_tokenizer.save_pretrained(__lowerCAmelCase )
return super().save_pretrained(__lowerCAmelCase , **__lowerCAmelCase )
@classmethod
def lowercase__ ( cls : str , __lowerCAmelCase : List[str] , **__lowerCAmelCase : Dict ):
__snake_case = AutoTokenizer.from_pretrained(__lowerCAmelCase , subfolder='qformer_tokenizer' )
__snake_case = cls._get_arguments_from_pretrained(__lowerCAmelCase , **__lowerCAmelCase )
args.append(__lowerCAmelCase )
return cls(*__lowerCAmelCase )
| 356
|
"""simple docstring"""
__lowerCamelCase = {
"A": ["B", "C", "E"],
"B": ["A", "D", "E"],
"C": ["A", "F", "G"],
"D": ["B"],
"E": ["A", "B", "D"],
"F": ["C"],
"G": ["C"],
}
def a ( __snake_case : dict, __snake_case : str, __snake_case : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase_ :List[Any] = set()
# keep track of all the paths to be checked
UpperCAmelCase_ :Any = [[start]]
# return path if start is goal
if start == goal:
return [start]
# keeps looping until all possible paths have been checked
while queue:
# pop the first path from the queue
UpperCAmelCase_ :Tuple = queue.pop(0 )
# get the last node from the path
UpperCAmelCase_ :str = path[-1]
if node not in explored:
UpperCAmelCase_ :Any = graph[node]
# go through all neighbour nodes, construct a new path and
# push it into the queue
for neighbour in neighbours:
UpperCAmelCase_ :Union[str, Any] = list(__snake_case )
new_path.append(__snake_case )
queue.append(__snake_case )
# return path if neighbour is goal
if neighbour == goal:
return new_path
# mark node as explored
explored.add(__snake_case )
# in case there's no path between the 2 nodes
return []
def a ( __snake_case : dict, __snake_case : int, __snake_case : str ):
'''simple docstring'''
if not graph or start not in graph or target not in graph:
return -1
if start == target:
return 0
UpperCAmelCase_ :Optional[Any] = [start]
UpperCAmelCase_ :str = set(__snake_case )
# Keep tab on distances from `start` node.
UpperCAmelCase_ :Optional[Any] = {start: 0, target: -1}
while queue:
UpperCAmelCase_ :Optional[Any] = queue.pop(0 )
if node == target:
UpperCAmelCase_ :str = (
dist[node] if dist[target] == -1 else min(dist[target], dist[node] )
)
for adjacent in graph[node]:
if adjacent not in visited:
visited.add(__snake_case )
queue.append(__snake_case )
UpperCAmelCase_ :Optional[int] = dist[node] + 1
return dist[target]
if __name__ == "__main__":
print(bfs_shortest_path(demo_graph, "G", "D")) # returns ['G', 'C', 'A', 'B', 'D']
print(bfs_shortest_path_distance(demo_graph, "G", "D")) # returns 4
| 608
| 0
|
import numpy as np
import datasets
_UpperCamelCase: List[Any] ='\nCompute the Mahalanobis Distance\n\nMahalonobis distance is the distance between a point and a distribution.\nAnd not between two distinct points. It is effectively a multivariate equivalent of the Euclidean distance.\nIt was introduced by Prof. P. C. Mahalanobis in 1936\nand has been used in various statistical applications ever since\n[source: https://www.machinelearningplus.com/statistics/mahalanobis-distance/]\n'
_UpperCamelCase: Dict ='\\n@article{de2000mahalanobis,\n title={The mahalanobis distance},\n author={De Maesschalck, Roy and Jouan-Rimbaud, Delphine and Massart, D{\'e}sir{\'e} L},\n journal={Chemometrics and intelligent laboratory systems},\n volume={50},\n number={1},\n pages={1--18},\n year={2000},\n publisher={Elsevier}\n}\n'
_UpperCamelCase: int ='\nArgs:\n X: List of datapoints to be compared with the `reference_distribution`.\n reference_distribution: List of datapoints from the reference distribution we want to compare to.\nReturns:\n mahalanobis: The Mahalonobis distance for each datapoint in `X`.\nExamples:\n\n >>> mahalanobis_metric = datasets.load_metric(\"mahalanobis\")\n >>> results = mahalanobis_metric.compute(reference_distribution=[[0, 1], [1, 0]], X=[[0, 1]])\n >>> print(results)\n {\'mahalanobis\': array([0.5])}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowercase( datasets.Metric ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE_ ( self : Any ) -> str:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'X': datasets.Sequence(datasets.Value('float' , id='sequence' ) , id='X' ),
} ) , )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , _lowerCAmelCase : Dict , _lowerCAmelCase : Dict ) -> Tuple:
# convert to numpy arrays
_lowerCAmelCase = np.array(lowercase_ )
_lowerCAmelCase = np.array(lowercase_ )
# Assert that arrays are 2D
if len(X.shape ) != 2:
raise ValueError('Expected `X` to be a 2D vector' )
if len(reference_distribution.shape ) != 2:
raise ValueError('Expected `reference_distribution` to be a 2D vector' )
if reference_distribution.shape[0] < 2:
raise ValueError(
'Expected `reference_distribution` to be a 2D vector with more than one element in the first dimension' )
# Get mahalanobis distance for each prediction
_lowerCAmelCase = X - np.mean(lowercase_ )
_lowerCAmelCase = np.cov(reference_distribution.T )
try:
_lowerCAmelCase = np.linalg.inv(lowercase_ )
except np.linalg.LinAlgError:
_lowerCAmelCase = np.linalg.pinv(lowercase_ )
_lowerCAmelCase = np.dot(lowercase_ , lowercase_ )
_lowerCAmelCase = np.dot(lowercase_ , X_minus_mu.T ).diagonal()
return {"mahalanobis": mahal_dist}
| 704
|
from typing import Dict
import numpy as np
import torch
from . import residue_constants as rc
from .tensor_utils import tensor_tree_map, tree_map
def _a ( __SCREAMING_SNAKE_CASE : Dict[str, torch.Tensor] ):
"""simple docstring"""
_lowerCAmelCase = []
_lowerCAmelCase = []
_lowerCAmelCase = []
for rt in rc.restypes:
_lowerCAmelCase = rc.restype_name_to_atomaa_names[rc.restype_atoa[rt]]
restype_atomaa_to_atomaa_list.append([(rc.atom_order[name] if name else 0) for name in atom_names] )
_lowerCAmelCase = {name: i for i, name in enumerate(__SCREAMING_SNAKE_CASE )}
restype_atomaa_to_atomaa_list.append(
[(atom_name_to_idxaa[name] if name in atom_name_to_idxaa else 0) for name in rc.atom_types] )
restype_atomaa_mask_list.append([(1.0 if name else 0.0) for name in atom_names] )
# Add dummy mapping for restype 'UNK'
restype_atomaa_to_atomaa_list.append([0] * 14 )
restype_atomaa_to_atomaa_list.append([0] * 37 )
restype_atomaa_mask_list.append([0.0] * 14 )
_lowerCAmelCase = torch.tensor(
__SCREAMING_SNAKE_CASE , dtype=torch.intaa , device=protein['aatype'].device , )
_lowerCAmelCase = torch.tensor(
__SCREAMING_SNAKE_CASE , dtype=torch.intaa , device=protein['aatype'].device , )
_lowerCAmelCase = torch.tensor(
__SCREAMING_SNAKE_CASE , dtype=torch.floataa , device=protein['aatype'].device , )
_lowerCAmelCase = protein['aatype'].to(torch.long )
# create the mapping for (residx, atom14) --> atom37, i.e. an array
# with shape (num_res, 14) containing the atom37 indices for this protein
_lowerCAmelCase = restype_atomaa_to_atomaa[protein_aatype]
_lowerCAmelCase = restype_atomaa_mask[protein_aatype]
_lowerCAmelCase = residx_atomaa_mask
_lowerCAmelCase = residx_atomaa_to_atomaa.long()
# create the gather indices for mapping back
_lowerCAmelCase = restype_atomaa_to_atomaa[protein_aatype]
_lowerCAmelCase = residx_atomaa_to_atomaa.long()
# create the corresponding mask
_lowerCAmelCase = torch.zeros([21, 37] , dtype=torch.floataa , device=protein['aatype'].device )
for restype, restype_letter in enumerate(rc.restypes ):
_lowerCAmelCase = rc.restype_atoa[restype_letter]
_lowerCAmelCase = rc.residue_atoms[restype_name]
for atom_name in atom_names:
_lowerCAmelCase = rc.atom_order[atom_name]
_lowerCAmelCase = 1
_lowerCAmelCase = restype_atomaa_mask[protein_aatype]
_lowerCAmelCase = residx_atomaa_mask
return protein
def _a ( __SCREAMING_SNAKE_CASE : Dict[str, torch.Tensor] ):
"""simple docstring"""
_lowerCAmelCase = tree_map(lambda __SCREAMING_SNAKE_CASE : torch.tensor(__SCREAMING_SNAKE_CASE , device=batch['aatype'].device ) , __SCREAMING_SNAKE_CASE , np.ndarray )
_lowerCAmelCase = tensor_tree_map(lambda __SCREAMING_SNAKE_CASE : np.array(__SCREAMING_SNAKE_CASE ) , make_atomaa_masks(__SCREAMING_SNAKE_CASE ) )
return out
| 585
| 0
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A_ : List[str] = logging.get_logger(__name__)
A_ : Dict = {
"google/mobilenet_v1_1.0_224": "https://huggingface.co/google/mobilenet_v1_1.0_224/resolve/main/config.json",
"google/mobilenet_v1_0.75_192": "https://huggingface.co/google/mobilenet_v1_0.75_192/resolve/main/config.json",
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
}
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCamelCase__ = '''mobilenet_v1'''
def __init__( self , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=2_2_4 , __SCREAMING_SNAKE_CASE=1.0 , __SCREAMING_SNAKE_CASE=8 , __SCREAMING_SNAKE_CASE="relu6" , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=0.999 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=0.001 , **__SCREAMING_SNAKE_CASE , ):
super().__init__(**__SCREAMING_SNAKE_CASE )
if depth_multiplier <= 0:
raise ValueError("""depth_multiplier must be greater than zero.""" )
snake_case__ : Optional[Any] = num_channels
snake_case__ : Tuple = image_size
snake_case__ : str = depth_multiplier
snake_case__ : Tuple = min_depth
snake_case__ : List[Any] = hidden_act
snake_case__ : int = tf_padding
snake_case__ : Tuple = classifier_dropout_prob
snake_case__ : Optional[Any] = initializer_range
snake_case__ : List[Any] = layer_norm_eps
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCamelCase__ = version.parse('''1.11''' )
@property
def __UpperCamelCase ( self ):
return OrderedDict([("""pixel_values""", {0: """batch"""})] )
@property
def __UpperCamelCase ( self ):
if self.task == "image-classification":
return OrderedDict([("""logits""", {0: """batch"""})] )
else:
return OrderedDict([("""last_hidden_state""", {0: """batch"""}), ("""pooler_output""", {0: """batch"""})] )
@property
def __UpperCamelCase ( self ):
return 1e-4
| 38
|
class A__ :
"""simple docstring"""
def __init__( self , __snake_case ):
snake_case = n
snake_case = [None] * self.n
snake_case = 0 # index of the first element
snake_case = 0
snake_case = 0
def __len__( self ):
return self.size
def a_ ( self ):
return self.size == 0
def a_ ( self ):
return False if self.is_empty() else self.array[self.front]
def a_ ( self , __snake_case ):
if self.size >= self.n:
raise Exception('''QUEUE IS FULL''' )
snake_case = data
snake_case = (self.rear + 1) % self.n
self.size += 1
return self
def a_ ( self ):
if self.size == 0:
raise Exception('''UNDERFLOW''' )
snake_case = self.array[self.front]
snake_case = None
snake_case = (self.front + 1) % self.n
self.size -= 1
return temp
| 550
| 0
|
def _A ( lowerCAmelCase_ : list ):
"""simple docstring"""
if len(lowerCAmelCase_ ) <= 1:
return [tuple(lowerCAmelCase_ )]
lowerCAmelCase__ = []
def generate(lowerCAmelCase_ : int , lowerCAmelCase_ : list ):
if k == 1:
res.append(tuple(arr[:] ) )
return
generate(k - 1 , lowerCAmelCase_ )
for i in range(k - 1 ):
if k % 2 == 0: # k is even
lowerCAmelCase__ , lowerCAmelCase__ = arr[k - 1], arr[i]
else: # k is odd
lowerCAmelCase__ , lowerCAmelCase__ = arr[k - 1], arr[0]
generate(k - 1 , lowerCAmelCase_ )
generate(len(lowerCAmelCase_ ) , lowerCAmelCase_ )
return res
if __name__ == "__main__":
UpperCamelCase = input('Enter numbers separated by a comma:\n').strip()
UpperCamelCase = [int(item) for item in user_input.split(',')]
print(heaps(arr))
| 125
|
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer
@dataclass
class __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
snake_case__ = 42
class __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
@register_to_config
def __init__( self : Any , SCREAMING_SNAKE_CASE__ : int = 3 , SCREAMING_SNAKE_CASE__ : int = 3 , SCREAMING_SNAKE_CASE__ : Tuple[str] = ("DownEncoderBlock2D",) , SCREAMING_SNAKE_CASE__ : Tuple[str] = ("UpDecoderBlock2D",) , SCREAMING_SNAKE_CASE__ : Tuple[int] = (64,) , SCREAMING_SNAKE_CASE__ : int = 1 , SCREAMING_SNAKE_CASE__ : str = "silu" , SCREAMING_SNAKE_CASE__ : int = 3 , SCREAMING_SNAKE_CASE__ : int = 32 , SCREAMING_SNAKE_CASE__ : int = 256 , SCREAMING_SNAKE_CASE__ : int = 32 , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : float = 0.18_215 , SCREAMING_SNAKE_CASE__ : str = "group" , ) -> Optional[int]:
super().__init__()
# pass init params to Encoder
lowerCAmelCase__ = Encoder(
in_channels=SCREAMING_SNAKE_CASE__ , out_channels=SCREAMING_SNAKE_CASE__ , down_block_types=SCREAMING_SNAKE_CASE__ , block_out_channels=SCREAMING_SNAKE_CASE__ , layers_per_block=SCREAMING_SNAKE_CASE__ , act_fn=SCREAMING_SNAKE_CASE__ , norm_num_groups=SCREAMING_SNAKE_CASE__ , double_z=SCREAMING_SNAKE_CASE__ , )
lowerCAmelCase__ = vq_embed_dim if vq_embed_dim is not None else latent_channels
lowerCAmelCase__ = nn.Convad(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 1 )
lowerCAmelCase__ = VectorQuantizer(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , beta=0.25 , remap=SCREAMING_SNAKE_CASE__ , sane_index_shape=SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = nn.Convad(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 1 )
# pass init params to Decoder
lowerCAmelCase__ = Decoder(
in_channels=SCREAMING_SNAKE_CASE__ , out_channels=SCREAMING_SNAKE_CASE__ , up_block_types=SCREAMING_SNAKE_CASE__ , block_out_channels=SCREAMING_SNAKE_CASE__ , layers_per_block=SCREAMING_SNAKE_CASE__ , act_fn=SCREAMING_SNAKE_CASE__ , norm_num_groups=SCREAMING_SNAKE_CASE__ , norm_type=SCREAMING_SNAKE_CASE__ , )
@apply_forward_hook
def a ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : torch.FloatTensor , SCREAMING_SNAKE_CASE__ : bool = True ) -> VQEncoderOutput:
lowerCAmelCase__ = self.encoder(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = self.quant_conv(SCREAMING_SNAKE_CASE__ )
if not return_dict:
return (h,)
return VQEncoderOutput(latents=SCREAMING_SNAKE_CASE__ )
@apply_forward_hook
def a ( self : int , SCREAMING_SNAKE_CASE__ : torch.FloatTensor , SCREAMING_SNAKE_CASE__ : bool = False , SCREAMING_SNAKE_CASE__ : bool = True ) -> Union[DecoderOutput, torch.FloatTensor]:
# also go through quantization layer
if not force_not_quantize:
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = self.quantize(SCREAMING_SNAKE_CASE__ )
else:
lowerCAmelCase__ = h
lowerCAmelCase__ = self.post_quant_conv(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = self.decoder(SCREAMING_SNAKE_CASE__ , quant if self.config.norm_type == "spatial" else None )
if not return_dict:
return (dec,)
return DecoderOutput(sample=SCREAMING_SNAKE_CASE__ )
def a ( self : str , SCREAMING_SNAKE_CASE__ : torch.FloatTensor , SCREAMING_SNAKE_CASE__ : bool = True ) -> Union[DecoderOutput, torch.FloatTensor]:
lowerCAmelCase__ = sample
lowerCAmelCase__ = self.encode(SCREAMING_SNAKE_CASE__ ).latents
lowerCAmelCase__ = self.decode(SCREAMING_SNAKE_CASE__ ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=SCREAMING_SNAKE_CASE__ )
| 125
| 1
|
# Author: OMKAR PATHAK, Nwachukwu Chidiebere
# Use a Python dictionary to construct the graph.
from __future__ import annotations
from pprint import pformat
from typing import Generic, TypeVar
a_ :Any = TypeVar('T')
class lowercase ( Generic[T] ):
def __init__( self : Union[str, Any] , _lowercase : bool = True ):
SCREAMING_SNAKE_CASE__ : dict[T, list[T]] = {} # dictionary of lists
SCREAMING_SNAKE_CASE__ : int = directed
def lowercase__ ( self : Optional[Any] , _lowercase : T , _lowercase : T ):
if not self.directed: # For undirected graphs
# if both source vertex and destination vertex are both present in the
# adjacency list, add destination vertex to source vertex list of adjacent
# vertices and add source vertex to destination vertex list of adjacent
# vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(_lowercase )
self.adj_list[destination_vertex].append(_lowercase )
# if only source vertex is present in adjacency list, add destination vertex
# to source vertex list of adjacent vertices, then create a new vertex with
# destination vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(_lowercase )
SCREAMING_SNAKE_CASE__ : List[str] = [source_vertex]
# if only destination vertex is present in adjacency list, add source vertex
# to destination vertex list of adjacent vertices, then create a new vertex
# with source vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif destination_vertex in self.adj_list:
self.adj_list[destination_vertex].append(_lowercase )
SCREAMING_SNAKE_CASE__ : List[Any] = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and assign a list
# containing the destination vertex as it's first adjacent vertex also
# create a new vertex with destination vertex as key and assign a list
# containing the source vertex as it's first adjacent vertex.
else:
SCREAMING_SNAKE_CASE__ : List[str] = [destination_vertex]
SCREAMING_SNAKE_CASE__ : str = [source_vertex]
else: # For directed graphs
# if both source vertex and destination vertex are present in adjacency
# list, add destination vertex to source vertex list of adjacent vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(_lowercase )
# if only source vertex is present in adjacency list, add destination
# vertex to source vertex list of adjacent vertices and create a new vertex
# with destination vertex as key, which has no adjacent vertex
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(_lowercase )
SCREAMING_SNAKE_CASE__ : Tuple = []
# if only destination vertex is present in adjacency list, create a new
# vertex with source vertex as key and assign a list containing destination
# vertex as first adjacent vertex
elif destination_vertex in self.adj_list:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and a list containing
# destination vertex as it's first adjacent vertex. Then create a new vertex
# with destination vertex as key, which has no adjacent vertex
else:
SCREAMING_SNAKE_CASE__ : List[Any] = [destination_vertex]
SCREAMING_SNAKE_CASE__ : Any = []
return self
def __repr__( self : Tuple ):
return pformat(self.adj_list )
| 35
|
"""simple docstring"""
def __A ( a_ : list , a_ : int = 0 )-> list:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = length or len(a_ )
SCREAMING_SNAKE_CASE : List[Any] = False
for i in range(length - 1 ):
if list_data[i] > list_data[i + 1]:
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Optional[Any] = list_data[i + 1], list_data[i]
SCREAMING_SNAKE_CASE : Optional[Any] = True
return list_data if not swapped else bubble_sort(a_ , length - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 698
| 0
|
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot import BlenderbotTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowerCamelCase__ : List[str] = logging.get_logger(__name__)
lowerCamelCase__ : Tuple = {
"""vocab_file""": """vocab.json""",
"""merges_file""": """merges.txt""",
"""tokenizer_config_file""": """tokenizer_config.json""",
}
lowerCamelCase__ : Dict = {
"""vocab_file""": {"""facebook/blenderbot-3B""": """https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json"""},
"""merges_file""": {"""facebook/blenderbot-3B""": """https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt"""},
"""tokenizer_config_file""": {
"""facebook/blenderbot-3B""": """https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json"""
},
}
lowerCamelCase__ : int = {"""facebook/blenderbot-3B""": 1_2_8}
class __magic_name__ (snake_case_ ):
'''simple docstring'''
__lowercase : Union[str, Any] = VOCAB_FILES_NAMES
__lowercase : List[str] = PRETRAINED_VOCAB_FILES_MAP
__lowercase : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowercase : List[Any] = ['input_ids', 'attention_mask']
__lowercase : List[Any] = BlenderbotTokenizer
def __init__( self:Union[str, Any] , _a:Optional[int]=None , _a:str=None , _a:Dict=None , _a:Optional[Any]="replace" , _a:Optional[int]="<s>" , _a:str="</s>" , _a:Dict="</s>" , _a:Optional[int]="<s>" , _a:str="<unk>" , _a:int="<pad>" , _a:Dict="<mask>" , _a:int=False , _a:Any=True , **_a:Optional[Any] , ):
super().__init__(
_a , _a , tokenizer_file=_a , errors=_a , bos_token=_a , eos_token=_a , sep_token=_a , cls_token=_a , unk_token=_a , pad_token=_a , mask_token=_a , add_prefix_space=_a , trim_offsets=_a , **_a , )
snake_case__ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , _a ) != add_prefix_space:
snake_case__ = getattr(_a , pre_tok_state.pop('''type''' ) )
snake_case__ = add_prefix_space
snake_case__ = pre_tok_class(**_a )
snake_case__ = add_prefix_space
snake_case__ = '''post_processor'''
snake_case__ = getattr(self.backend_tokenizer , _a , _a )
if tokenizer_component_instance:
snake_case__ = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
snake_case__ = tuple(state['''sep'''] )
if "cls" in state:
snake_case__ = tuple(state['''cls'''] )
snake_case__ = False
if state.get('''add_prefix_space''' , _a ) != add_prefix_space:
snake_case__ = add_prefix_space
snake_case__ = True
if state.get('''trim_offsets''' , _a ) != trim_offsets:
snake_case__ = trim_offsets
snake_case__ = True
if changes_to_apply:
snake_case__ = getattr(_a , state.pop('''type''' ) )
snake_case__ = component_class(**_a )
setattr(self.backend_tokenizer , _a , _a )
@property
# Copied from transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast.mask_token with Roberta->Blenderbot, RoBERTa->Blenderbot
def SCREAMING_SNAKE_CASE__ ( self:str ):
if self._mask_token is None:
if self.verbose:
logger.error('''Using mask_token, but it is not set yet.''' )
return None
return str(self._mask_token )
@mask_token.setter
def SCREAMING_SNAKE_CASE__ ( self:Tuple , _a:Optional[Any] ):
snake_case__ = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else value
snake_case__ = value
def SCREAMING_SNAKE_CASE__ ( self:str , *_a:str , **_a:Tuple ):
snake_case__ = kwargs.get('''is_split_into_words''' , _a )
assert self.add_prefix_space or not is_split_into_words, (
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*_a , **_a )
def SCREAMING_SNAKE_CASE__ ( self:str , *_a:Union[str, Any] , **_a:List[str] ):
snake_case__ = kwargs.get('''is_split_into_words''' , _a )
assert self.add_prefix_space or not is_split_into_words, (
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._encode_plus(*_a , **_a )
def SCREAMING_SNAKE_CASE__ ( self:List[str] , _a:str , _a:Optional[str] = None ):
snake_case__ = self._tokenizer.model.save(_a , name=_a )
return tuple(_a )
def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] , _a:List[int] , _a:Optional[List[int]] = None ):
snake_case__ = [self.sep_token_id]
snake_case__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def SCREAMING_SNAKE_CASE__ ( self:Dict , _a:List[int] , _a:Optional[List[int]] = None ):
return token_ids_a + [self.eos_token_id]
def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] , _a:"Conversation" ):
snake_case__ = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(''' ''' + text )
else:
# Generated responses should contain them already.
inputs.append(_a )
snake_case__ = ''' '''.join(_a )
snake_case__ = self.encode(_a )
if len(_a ) > self.model_max_length:
snake_case__ = input_ids[-self.model_max_length :]
logger.warning(F"""Trimmed input from conversation as it was longer than {self.model_max_length} tokens.""" )
return input_ids
| 208
|
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> int:
snake_case__ = abs(__lowerCAmelCase )
snake_case__ = 0
while n > 0:
res += n % 10
n //= 10
return res
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> int:
snake_case__ = abs(__lowerCAmelCase )
return n if n < 10 else n % 10 + sum_of_digits(n // 10 )
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> int:
return sum(int(__lowerCAmelCase ) for c in str(abs(__lowerCAmelCase ) ) )
def SCREAMING_SNAKE_CASE ( ) -> None:
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(__lowerCAmelCase , __lowerCAmelCase ) -> None:
snake_case__ = F"""{func.__name__}({value})"""
snake_case__ = timeit(F"""__main__.{call}""" , setup='''import __main__''' )
print(F"""{call:56} = {func(__lowerCAmelCase )} -- {timing:.4f} seconds""" )
for value in (26_2144, 1125_8999_0684_2624, 126_7650_6002_2822_9401_4967_0320_5376):
for func in (sum_of_digits, sum_of_digits_recursion, sum_of_digits_compact):
benchmark_a_function(__lowerCAmelCase , __lowerCAmelCase )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 208
| 1
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import convert_to_rgb, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
A_ : Optional[Any] = logging.get_logger(__name__)
class _lowerCAmelCase( UpperCAmelCase_ ):
"""simple docstring"""
a : Optional[Any] =['''pixel_values''']
def __init__( self , _lowerCamelCase = True , _lowerCamelCase = None , _lowerCamelCase = PILImageResampling.BICUBIC , _lowerCamelCase = True , _lowerCamelCase = 1 / 2_5_5 , _lowerCamelCase = True , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = True , **_lowerCamelCase , ):
super().__init__(**_lowerCamelCase )
UpperCamelCase_: List[str] = size if size is not None else {'height': 3_8_4, 'width': 3_8_4}
UpperCamelCase_: str = get_size_dict(_lowerCamelCase , default_to_square=_lowerCamelCase )
UpperCamelCase_: int = do_resize
UpperCamelCase_: Union[str, Any] = size
UpperCamelCase_: str = resample
UpperCamelCase_: str = do_rescale
UpperCamelCase_: Tuple = rescale_factor
UpperCamelCase_: Any = do_normalize
UpperCamelCase_: Optional[int] = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
UpperCamelCase_: List[Any] = image_std if image_std is not None else OPENAI_CLIP_STD
UpperCamelCase_: str = do_convert_rgb
def _a ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = PILImageResampling.BICUBIC , _lowerCamelCase = None , **_lowerCamelCase , ):
UpperCamelCase_: Union[str, Any] = get_size_dict(_lowerCamelCase , default_to_square=_lowerCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(f'''The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}''' )
UpperCamelCase_: int = (size['height'], size['width'])
return resize(_lowerCamelCase , size=_lowerCamelCase , resample=_lowerCamelCase , data_format=_lowerCamelCase , **_lowerCamelCase )
def _a ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = None , **_lowerCamelCase , ):
return rescale(_lowerCamelCase , scale=_lowerCamelCase , data_format=_lowerCamelCase , **_lowerCamelCase )
def _a ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = None , **_lowerCamelCase , ):
return normalize(_lowerCamelCase , mean=_lowerCamelCase , std=_lowerCamelCase , data_format=_lowerCamelCase , **_lowerCamelCase )
def _a ( self , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = ChannelDimension.FIRST , **_lowerCamelCase , ):
UpperCamelCase_: List[str] = do_resize if do_resize is not None else self.do_resize
UpperCamelCase_: str = resample if resample is not None else self.resample
UpperCamelCase_: List[Any] = do_rescale if do_rescale is not None else self.do_rescale
UpperCamelCase_: Union[str, Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCamelCase_: Tuple = do_normalize if do_normalize is not None else self.do_normalize
UpperCamelCase_: List[str] = image_mean if image_mean is not None else self.image_mean
UpperCamelCase_: Any = image_std if image_std is not None else self.image_std
UpperCamelCase_: Optional[Any] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
UpperCamelCase_: int = size if size is not None else self.size
UpperCamelCase_: str = get_size_dict(_lowerCamelCase , default_to_square=_lowerCamelCase )
UpperCamelCase_: Optional[Any] = make_list_of_images(_lowerCamelCase )
if not valid_images(_lowerCamelCase ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
UpperCamelCase_: str = [convert_to_rgb(_lowerCamelCase ) for image in images]
# All transformations expect numpy arrays.
UpperCamelCase_: int = [to_numpy_array(_lowerCamelCase ) for image in images]
if do_resize:
UpperCamelCase_: Union[str, Any] = [self.resize(image=_lowerCamelCase , size=_lowerCamelCase , resample=_lowerCamelCase ) for image in images]
if do_rescale:
UpperCamelCase_: List[str] = [self.rescale(image=_lowerCamelCase , scale=_lowerCamelCase ) for image in images]
if do_normalize:
UpperCamelCase_: Optional[Any] = [self.normalize(image=_lowerCamelCase , mean=_lowerCamelCase , std=_lowerCamelCase ) for image in images]
UpperCamelCase_: Dict = [to_channel_dimension_format(_lowerCamelCase , _lowerCamelCase ) for image in images]
UpperCamelCase_: int = BatchFeature(data={'pixel_values': images} , tensor_type=_lowerCamelCase )
return encoded_outputs
| 57
|
"""simple docstring"""
import shutil
import tempfile
import unittest
from unittest.mock import patch
from transformers import (
DefaultFlowCallback,
IntervalStrategy,
PrinterCallback,
ProgressCallback,
Trainer,
TrainerCallback,
TrainingArguments,
is_torch_available,
)
from transformers.testing_utils import require_torch
if is_torch_available():
from transformers.trainer import DEFAULT_CALLBACKS
from .test_trainer import RegressionDataset, RegressionModelConfig, RegressionPreTrainedModel
class UpperCamelCase__ ( a_):
"""simple docstring"""
def __init__( self : Union[str, Any] ):
'''simple docstring'''
__magic_name__ = []
def a__ ( self : Tuple , UpperCamelCase_ : Dict , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : int , **UpperCamelCase_ : int ):
'''simple docstring'''
self.events.append('on_init_end' )
def a__ ( self : Any , UpperCamelCase_ : int , UpperCamelCase_ : str , UpperCamelCase_ : List[str] , **UpperCamelCase_ : str ):
'''simple docstring'''
self.events.append('on_train_begin' )
def a__ ( self : str , UpperCamelCase_ : Tuple , UpperCamelCase_ : List[str] , UpperCamelCase_ : Optional[int] , **UpperCamelCase_ : Dict ):
'''simple docstring'''
self.events.append('on_train_end' )
def a__ ( self : List[str] , UpperCamelCase_ : Tuple , UpperCamelCase_ : int , UpperCamelCase_ : Tuple , **UpperCamelCase_ : Optional[Any] ):
'''simple docstring'''
self.events.append('on_epoch_begin' )
def a__ ( self : Optional[int] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : int , UpperCamelCase_ : Tuple , **UpperCamelCase_ : List[Any] ):
'''simple docstring'''
self.events.append('on_epoch_end' )
def a__ ( self : Union[str, Any] , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Any , UpperCamelCase_ : Union[str, Any] , **UpperCamelCase_ : Optional[Any] ):
'''simple docstring'''
self.events.append('on_step_begin' )
def a__ ( self : List[str] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : int , UpperCamelCase_ : Any , **UpperCamelCase_ : List[Any] ):
'''simple docstring'''
self.events.append('on_step_end' )
def a__ ( self : List[Any] , UpperCamelCase_ : List[str] , UpperCamelCase_ : int , UpperCamelCase_ : Tuple , **UpperCamelCase_ : Dict ):
'''simple docstring'''
self.events.append('on_evaluate' )
def a__ ( self : str , UpperCamelCase_ : Any , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Dict , **UpperCamelCase_ : Any ):
'''simple docstring'''
self.events.append('on_predict' )
def a__ ( self : Any , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : str , **UpperCamelCase_ : Optional[int] ):
'''simple docstring'''
self.events.append('on_save' )
def a__ ( self : Optional[Any] , UpperCamelCase_ : int , UpperCamelCase_ : str , UpperCamelCase_ : Union[str, Any] , **UpperCamelCase_ : Optional[int] ):
'''simple docstring'''
self.events.append('on_log' )
def a__ ( self : List[Any] , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : str , **UpperCamelCase_ : List[Any] ):
'''simple docstring'''
self.events.append('on_prediction_step' )
@require_torch
class UpperCamelCase__ ( unittest.TestCase):
"""simple docstring"""
def a__ ( self : Optional[Any] ):
'''simple docstring'''
__magic_name__ = tempfile.mkdtemp()
def a__ ( self : List[Any] ):
'''simple docstring'''
shutil.rmtree(self.output_dir )
def a__ ( self : List[str] , UpperCamelCase_ : Optional[int]=0 , UpperCamelCase_ : List[Any]=0 , UpperCamelCase_ : Optional[int]=6_4 , UpperCamelCase_ : Optional[int]=6_4 , UpperCamelCase_ : List[Any]=None , UpperCamelCase_ : Optional[Any]=False , **UpperCamelCase_ : str ):
'''simple docstring'''
__magic_name__ = RegressionDataset(length=UpperCamelCase_ )
__magic_name__ = RegressionDataset(length=UpperCamelCase_ )
__magic_name__ = RegressionModelConfig(a=UpperCamelCase_ , b=UpperCamelCase_ )
__magic_name__ = RegressionPreTrainedModel(UpperCamelCase_ )
__magic_name__ = TrainingArguments(self.output_dir , disable_tqdm=UpperCamelCase_ , report_to=[] , **UpperCamelCase_ )
return Trainer(
UpperCamelCase_ , UpperCamelCase_ , train_dataset=UpperCamelCase_ , eval_dataset=UpperCamelCase_ , callbacks=UpperCamelCase_ , )
def a__ ( self : Optional[int] , UpperCamelCase_ : str , UpperCamelCase_ : Dict ):
'''simple docstring'''
self.assertEqual(len(UpperCamelCase_ ) , len(UpperCamelCase_ ) )
# Order doesn't matter
__magic_name__ = sorted(UpperCamelCase_ , key=lambda UpperCamelCase_ : cb.__name__ if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else cb.__class__.__name__ )
__magic_name__ = sorted(UpperCamelCase_ , key=lambda UpperCamelCase_ : cb.__name__ if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else cb.__class__.__name__ )
for cba, cba in zip(UpperCamelCase_ , UpperCamelCase_ ):
if isinstance(UpperCamelCase_ , UpperCamelCase_ ) and isinstance(UpperCamelCase_ , UpperCamelCase_ ):
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
elif isinstance(UpperCamelCase_ , UpperCamelCase_ ) and not isinstance(UpperCamelCase_ , UpperCamelCase_ ):
self.assertEqual(UpperCamelCase_ , cba.__class__ )
elif not isinstance(UpperCamelCase_ , UpperCamelCase_ ) and isinstance(UpperCamelCase_ , UpperCamelCase_ ):
self.assertEqual(cba.__class__ , UpperCamelCase_ )
else:
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
def a__ ( self : Union[str, Any] , UpperCamelCase_ : Any ):
'''simple docstring'''
__magic_name__ = ['on_init_end', 'on_train_begin']
__magic_name__ = 0
__magic_name__ = len(trainer.get_eval_dataloader() )
__magic_name__ = ['on_prediction_step'] * len(trainer.get_eval_dataloader() ) + ['on_log', 'on_evaluate']
for _ in range(trainer.state.num_train_epochs ):
expected_events.append('on_epoch_begin' )
for _ in range(UpperCamelCase_ ):
step += 1
expected_events += ["on_step_begin", "on_step_end"]
if step % trainer.args.logging_steps == 0:
expected_events.append('on_log' )
if trainer.args.evaluation_strategy == IntervalStrategy.STEPS and step % trainer.args.eval_steps == 0:
expected_events += evaluation_events.copy()
if step % trainer.args.save_steps == 0:
expected_events.append('on_save' )
expected_events.append('on_epoch_end' )
if trainer.args.evaluation_strategy == IntervalStrategy.EPOCH:
expected_events += evaluation_events.copy()
expected_events += ["on_log", "on_train_end"]
return expected_events
def a__ ( self : List[Any] ):
'''simple docstring'''
__magic_name__ = self.get_trainer()
__magic_name__ = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , UpperCamelCase_ )
# Callbacks passed at init are added to the default callbacks
__magic_name__ = self.get_trainer(callbacks=[MyTestTrainerCallback] )
expected_callbacks.append(UpperCamelCase_ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , UpperCamelCase_ )
# TrainingArguments.disable_tqdm controls if use ProgressCallback or PrinterCallback
__magic_name__ = self.get_trainer(disable_tqdm=UpperCamelCase_ )
__magic_name__ = DEFAULT_CALLBACKS.copy() + [PrinterCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , UpperCamelCase_ )
def a__ ( self : List[str] ):
'''simple docstring'''
__magic_name__ = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
__magic_name__ = self.get_trainer()
# We can add, pop, or remove by class name
trainer.remove_callback(UpperCamelCase_ )
expected_callbacks.remove(UpperCamelCase_ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , UpperCamelCase_ )
__magic_name__ = self.get_trainer()
__magic_name__ = trainer.pop_callback(UpperCamelCase_ )
self.assertEqual(cb.__class__ , UpperCamelCase_ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , UpperCamelCase_ )
trainer.add_callback(UpperCamelCase_ )
expected_callbacks.insert(0 , UpperCamelCase_ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , UpperCamelCase_ )
# We can also add, pop, or remove by instance
__magic_name__ = self.get_trainer()
__magic_name__ = trainer.callback_handler.callbacks[0]
trainer.remove_callback(UpperCamelCase_ )
expected_callbacks.remove(UpperCamelCase_ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , UpperCamelCase_ )
__magic_name__ = self.get_trainer()
__magic_name__ = trainer.callback_handler.callbacks[0]
__magic_name__ = trainer.pop_callback(UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , UpperCamelCase_ )
trainer.add_callback(UpperCamelCase_ )
expected_callbacks.insert(0 , UpperCamelCase_ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , UpperCamelCase_ )
def a__ ( self : Any ):
'''simple docstring'''
import warnings
# XXX: for now ignore scatter_gather warnings in this test since it's not relevant to what's being tested
warnings.simplefilter(action='ignore' , category=UpperCamelCase_ )
__magic_name__ = self.get_trainer(callbacks=[MyTestTrainerCallback] )
trainer.train()
__magic_name__ = trainer.callback_handler.callbacks[-2].events
self.assertEqual(UpperCamelCase_ , self.get_expected_events(UpperCamelCase_ ) )
# Independent log/save/eval
__magic_name__ = self.get_trainer(callbacks=[MyTestTrainerCallback] , logging_steps=5 )
trainer.train()
__magic_name__ = trainer.callback_handler.callbacks[-2].events
self.assertEqual(UpperCamelCase_ , self.get_expected_events(UpperCamelCase_ ) )
__magic_name__ = self.get_trainer(callbacks=[MyTestTrainerCallback] , save_steps=5 )
trainer.train()
__magic_name__ = trainer.callback_handler.callbacks[-2].events
self.assertEqual(UpperCamelCase_ , self.get_expected_events(UpperCamelCase_ ) )
__magic_name__ = self.get_trainer(callbacks=[MyTestTrainerCallback] , eval_steps=5 , evaluation_strategy='steps' )
trainer.train()
__magic_name__ = trainer.callback_handler.callbacks[-2].events
self.assertEqual(UpperCamelCase_ , self.get_expected_events(UpperCamelCase_ ) )
__magic_name__ = self.get_trainer(callbacks=[MyTestTrainerCallback] , evaluation_strategy='epoch' )
trainer.train()
__magic_name__ = trainer.callback_handler.callbacks[-2].events
self.assertEqual(UpperCamelCase_ , self.get_expected_events(UpperCamelCase_ ) )
# A bit of everything
__magic_name__ = self.get_trainer(
callbacks=[MyTestTrainerCallback] , logging_steps=3 , save_steps=1_0 , eval_steps=5 , evaluation_strategy='steps' , )
trainer.train()
__magic_name__ = trainer.callback_handler.callbacks[-2].events
self.assertEqual(UpperCamelCase_ , self.get_expected_events(UpperCamelCase_ ) )
# warning should be emitted for duplicated callbacks
with patch('transformers.trainer_callback.logger.warning' ) as warn_mock:
__magic_name__ = self.get_trainer(
callbacks=[MyTestTrainerCallback, MyTestTrainerCallback] , )
assert str(UpperCamelCase_ ) in warn_mock.call_args[0][0]
| 545
| 0
|
def A ( __UpperCamelCase , __UpperCamelCase ) -> List[str]:
A__ = [0 for i in range(r + 1 )]
# nc0 = 1
A__ = 1
for i in range(1 , n + 1 ):
# to compute current row from previous row.
A__ = min(__UpperCamelCase , __UpperCamelCase )
while j > 0:
c[j] += c[j - 1]
j -= 1
return c[r]
print(binomial_coefficient(n=1_0, r=5))
| 52
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
'''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/config.json''',
'''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/config.json''',
'''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/config.json''',
'''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/config.json''',
'''roberta-base-openai-detector''': '''https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json''',
'''roberta-large-openai-detector''': '''https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json''',
}
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
A__ : List[str] = "roberta"
def __init__( self : List[str] , _snake_case : Union[str, Any]=5_02_65 , _snake_case : List[Any]=7_68 , _snake_case : List[str]=12 , _snake_case : List[str]=12 , _snake_case : Any=30_72 , _snake_case : Union[str, Any]="gelu" , _snake_case : int=0.1 , _snake_case : Union[str, Any]=0.1 , _snake_case : Tuple=5_12 , _snake_case : Union[str, Any]=2 , _snake_case : Any=0.02 , _snake_case : Any=1E-12 , _snake_case : List[Any]=1 , _snake_case : int=0 , _snake_case : Any=2 , _snake_case : Optional[Any]="absolute" , _snake_case : int=True , _snake_case : Any=None , **_snake_case : Any , ):
"""simple docstring"""
super().__init__(pad_token_id=_snake_case , bos_token_id=_snake_case , eos_token_id=_snake_case , **_snake_case )
A__ = vocab_size
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = hidden_act
A__ = intermediate_size
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = max_position_embeddings
A__ = type_vocab_size
A__ = initializer_range
A__ = layer_norm_eps
A__ = position_embedding_type
A__ = use_cache
A__ = classifier_dropout
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
@property
def _a ( self : Dict ):
"""simple docstring"""
if self.task == "multiple-choice":
A__ = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
A__ = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 52
| 1
|
import os
from glob import glob
import imageio
import torch
import torchvision
import wandb
from img_processing import custom_to_pil, loop_post_process, preprocess, preprocess_vqgan
from loaders import load_vqgan
from PIL import Image
from torch import nn
from transformers import CLIPModel, CLIPTokenizerFast
from utils import get_device, get_timestamp, show_pil
class lowerCamelCase_ :
def __init__( self : Tuple , __A : str = "cpu" , __A : str = "openai/clip-vit-large-patch14" ):
__A : List[str] = device
__A : List[Any] = CLIPTokenizerFast.from_pretrained(__A )
__A : Union[str, Any] = [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3]
__A : Dict = [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1]
__A : Optional[Any] = torchvision.transforms.Normalize(self.image_mean , self.image_std )
__A : Dict = torchvision.transforms.Resize(224 )
__A : Optional[int] = torchvision.transforms.CenterCrop(224 )
def lowerCAmelCase_ ( self : Optional[Any] , __A : str ):
__A : str = self.resize(__A )
__A : Any = self.center_crop(__A )
__A : Any = self.normalize(__A )
return images
def __call__( self : Union[str, Any] , __A : Any=None , __A : Optional[Any]=None , **__A : Tuple ):
__A : int = self.tokenizer(text=__A , **__A )
__A : List[str] = self.preprocess_img(__A )
__A : Any = {key: value.to(self.device ) for (key, value) in encoding.items()}
return encoding
class lowerCamelCase_ ( nn.Module ):
def __init__( self : Any , __A : List[str]=10 , __A : Dict=0.0_1 , __A : List[Any]=None , __A : Dict=None , __A : Any=None , __A : List[str]=None , __A : Optional[Any]=None , __A : Optional[Any]=None , __A : Optional[int]=False , __A : Optional[Any]=True , __A : List[Any]="image" , __A : str=True , __A : Optional[Any]=False , __A : Optional[int]=False , __A : Any=False , ):
super().__init__()
__A : List[str] = None
__A : Tuple = device if device else get_device()
if vqgan:
__A : Optional[Any] = vqgan
else:
__A : Optional[Any] = load_vqgan(self.device , conf_path=__A , ckpt_path=__A )
self.vqgan.eval()
if clip:
__A : List[Any] = clip
else:
__A : List[str] = CLIPModel.from_pretrained("""openai/clip-vit-base-patch32""" )
self.clip.to(self.device )
__A : Optional[int] = ProcessorGradientFlow(device=self.device )
__A : Any = iterations
__A : str = lr
__A : List[Any] = log
__A : Union[str, Any] = make_grid
__A : Union[str, Any] = return_val
__A : Optional[Any] = quantize
__A : str = self.vqgan.decoder.z_shape
def lowerCAmelCase_ ( self : Any , __A : Union[str, Any]=None , __A : Any=None , __A : Tuple=5 , __A : Optional[Any]=True ):
__A : Tuple = []
if output_path is None:
__A : Any = """./animation.gif"""
if input_path is None:
__A : int = self.save_path
__A : Optional[int] = sorted(glob(input_path + """/*""" ) )
if not len(__A ):
raise ValueError(
"""No images found in save path, aborting (did you pass save_intermediate=True to the generate"""
""" function?)""" )
if len(__A ) == 1:
print("""Only one image found in save path, (did you pass save_intermediate=True to the generate function?)""" )
__A : Tuple = total_duration / len(__A )
__A : int = [frame_duration] * len(__A )
if extend_frames:
__A : Union[str, Any] = 1.5
__A : Optional[Any] = 3
for file_name in paths:
if file_name.endswith(""".png""" ):
images.append(imageio.imread(__A ) )
imageio.mimsave(__A , __A , duration=__A )
print(F"""gif saved to {output_path}""" )
def lowerCAmelCase_ ( self : List[Any] , __A : List[str]=None , __A : Dict=None ):
if not (path or img):
raise ValueError("""Input either path or tensor""" )
if img is not None:
raise NotImplementedError
__A : Optional[int] = preprocess(Image.open(__A ) , target_image_size=256 ).to(self.device )
__A : List[str] = preprocess_vqgan(__A )
__A , *__A : Union[str, Any] = self.vqgan.encode(__A )
return z
def lowerCAmelCase_ ( self : Dict , __A : List[Any] ):
__A : Tuple = self.latent.detach().requires_grad_()
__A : Tuple = base_latent + transform_vector
if self.quantize:
__A , *__A : int = self.vqgan.quantize(__A )
else:
__A : List[Any] = trans_latent
return self.vqgan.decode(__A )
def lowerCAmelCase_ ( self : List[str] , __A : Tuple , __A : int , __A : Union[str, Any]=None ):
__A : int = self.clip_preprocessor(text=__A , images=__A , return_tensors="""pt""" , padding=__A )
__A : Optional[Any] = self.clip(**__A )
__A : Optional[int] = clip_outputs.logits_per_image
if weights is not None:
__A : Dict = similarity_logits * weights
return similarity_logits.sum()
def lowerCAmelCase_ ( self : Tuple , __A : int , __A : Optional[Any] , __A : Union[str, Any] ):
__A : int = self._get_clip_similarity(pos_prompts["""prompts"""] , __A , weights=(1 / pos_prompts["""weights"""]) )
if neg_prompts:
__A : str = self._get_clip_similarity(neg_prompts["""prompts"""] , __A , weights=neg_prompts["""weights"""] )
else:
__A : Any = torch.tensor([1] , device=self.device )
__A : List[Any] = -torch.log(__A ) + torch.log(__A )
return loss
def lowerCAmelCase_ ( self : Optional[Any] , __A : Dict , __A : Tuple , __A : Dict ):
__A : Union[str, Any] = torch.randn_like(self.latent , requires_grad=__A , device=self.device )
__A : Tuple = torch.optim.Adam([vector] , lr=self.lr )
for i in range(self.iterations ):
optim.zero_grad()
__A : Optional[Any] = self._add_vector(__A )
__A : List[str] = loop_post_process(__A )
__A : str = self._get_CLIP_loss(__A , __A , __A )
print("""CLIP loss""" , __A )
if self.log:
wandb.log({"""CLIP Loss""": clip_loss} )
clip_loss.backward(retain_graph=__A )
optim.step()
if self.return_val == "image":
yield custom_to_pil(transformed_img[0] )
else:
yield vector
def lowerCAmelCase_ ( self : List[str] , __A : Dict , __A : Any , __A : Tuple ):
wandb.init(reinit=__A , project="""face-editor""" )
wandb.config.update({"""Positive Prompts""": positive_prompts} )
wandb.config.update({"""Negative Prompts""": negative_prompts} )
wandb.config.update({"""lr""": self.lr, """iterations""": self.iterations} )
if image_path:
__A : str = Image.open(__A )
__A : Dict = image.resize((256, 256) )
wandb.log("""Original Image""" , wandb.Image(__A ) )
def lowerCAmelCase_ ( self : Tuple , __A : Optional[Any] ):
if not prompts:
return []
__A : List[str] = []
__A : str = []
if isinstance(__A , __A ):
__A : List[str] = [prompt.strip() for prompt in prompts.split("""|""" )]
for prompt in prompts:
if isinstance(__A , (tuple, list) ):
__A : Dict = prompt[0]
__A : List[Any] = float(prompt[1] )
elif ":" in prompt:
__A , __A : Union[str, Any] = prompt.split(""":""" )
__A : Union[str, Any] = float(__A )
else:
__A : Dict = prompt
__A : Optional[Any] = 1.0
processed_prompts.append(__A )
weights.append(__A )
return {
"prompts": processed_prompts,
"weights": torch.tensor(__A , device=self.device ),
}
def lowerCAmelCase_ ( self : List[Any] , __A : Union[str, Any] , __A : Optional[Any]=None , __A : Tuple=None , __A : Tuple=True , __A : Union[str, Any]=False , __A : Dict=True , __A : List[Any]=True , __A : Any=None , ):
if image_path:
__A : Optional[int] = self._get_latent(__A )
else:
__A : Union[str, Any] = torch.randn(self.latent_dim , device=self.device )
if self.log:
self._init_logging(__A , __A , __A )
assert pos_prompts, "You must provide at least one positive prompt."
__A : str = self.process_prompts(__A )
__A : Any = self.process_prompts(__A )
if save_final and save_path is None:
__A : List[str] = os.path.join("""./outputs/""" , """_""".join(pos_prompts["""prompts"""] ) )
if not os.path.exists(__A ):
os.makedirs(__A )
else:
__A : Tuple = save_path + """_""" + get_timestamp()
os.makedirs(__A )
__A : Any = save_path
__A : Dict = self.vqgan.decode(self.latent )[0]
if show_intermediate:
print("""Original Image""" )
show_pil(custom_to_pil(__A ) )
__A : List[str] = loop_post_process(__A )
for iter, transformed_img in enumerate(self._optimize_CLIP(__A , __A , __A ) ):
if show_intermediate:
show_pil(__A )
if save_intermediate:
transformed_img.save(os.path.join(self.save_path , F"""iter_{iter:03d}.png""" ) )
if self.log:
wandb.log({"""Image""": wandb.Image(__A )} )
if show_final:
show_pil(__A )
if save_final:
transformed_img.save(os.path.join(self.save_path , F"""iter_{iter:03d}_final.png""" ) )
| 17
|
from __future__ import annotations
def a__ ( lowercase__ , lowercase__ ):
'''simple docstring'''
if len(lowercase__ ) == 0:
return False
UpperCAmelCase_ =len(lowercase__ ) // 2
if a_list[midpoint] == item:
return True
if item < a_list[midpoint]:
return binary_search(a_list[:midpoint] , lowercase__ )
else:
return binary_search(a_list[midpoint + 1 :] , lowercase__ )
if __name__ == "__main__":
__lowercase : Tuple =input("""Enter numbers separated by comma:\n""").strip()
__lowercase : Optional[Any] =[int(item.strip()) for item in user_input.split(""",""")]
__lowercase : List[Any] =int(input("""Enter the number to be found in the list:\n""").strip())
__lowercase : Optional[Any] ="""""" if binary_search(sequence, target) else """not """
print(f"""{target} was {not_str}found in {sequence}""")
| 54
| 0
|
import unittest
import numpy as np
from transformers.testing_utils import require_flax, require_tf, require_torch
from transformers.utils import (
expand_dims,
flatten_dict,
is_flax_available,
is_tf_available,
is_torch_available,
reshape,
squeeze,
transpose,
)
if is_flax_available():
import jax.numpy as jnp
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
class _A ( unittest.TestCase ):
def UpperCAmelCase ( self ):
_UpperCAmelCase = {
"""task_specific_params""": {
"""summarization""": {"""length_penalty""": 1.0, """max_length""": 128, """min_length""": 12, """num_beams""": 4},
"""summarization_cnn""": {"""length_penalty""": 2.0, """max_length""": 142, """min_length""": 56, """num_beams""": 4},
"""summarization_xsum""": {"""length_penalty""": 1.0, """max_length""": 62, """min_length""": 11, """num_beams""": 6},
}
}
_UpperCAmelCase = {
"""task_specific_params.summarization.length_penalty""": 1.0,
"""task_specific_params.summarization.max_length""": 128,
"""task_specific_params.summarization.min_length""": 12,
"""task_specific_params.summarization.num_beams""": 4,
"""task_specific_params.summarization_cnn.length_penalty""": 2.0,
"""task_specific_params.summarization_cnn.max_length""": 142,
"""task_specific_params.summarization_cnn.min_length""": 56,
"""task_specific_params.summarization_cnn.num_beams""": 4,
"""task_specific_params.summarization_xsum.length_penalty""": 1.0,
"""task_specific_params.summarization_xsum.max_length""": 62,
"""task_specific_params.summarization_xsum.min_length""": 11,
"""task_specific_params.summarization_xsum.num_beams""": 6,
}
self.assertEqual(flatten_dict(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self ):
_UpperCAmelCase = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(transpose(_SCREAMING_SNAKE_CASE ) , x.transpose() ) )
_UpperCAmelCase = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(transpose(_SCREAMING_SNAKE_CASE , axes=(1, 2, 0) ) , x.transpose((1, 2, 0) ) ) )
@require_torch
def UpperCAmelCase ( self ):
_UpperCAmelCase = np.random.randn(3 , 4 )
_UpperCAmelCase = torch.tensor(_SCREAMING_SNAKE_CASE )
self.assertTrue(np.allclose(transpose(_SCREAMING_SNAKE_CASE ) , transpose(_SCREAMING_SNAKE_CASE ).numpy() ) )
_UpperCAmelCase = np.random.randn(3 , 4 , 5 )
_UpperCAmelCase = torch.tensor(_SCREAMING_SNAKE_CASE )
self.assertTrue(np.allclose(transpose(_SCREAMING_SNAKE_CASE , axes=(1, 2, 0) ) , transpose(_SCREAMING_SNAKE_CASE , axes=(1, 2, 0) ).numpy() ) )
@require_tf
def UpperCAmelCase ( self ):
_UpperCAmelCase = np.random.randn(3 , 4 )
_UpperCAmelCase = tf.constant(_SCREAMING_SNAKE_CASE )
self.assertTrue(np.allclose(transpose(_SCREAMING_SNAKE_CASE ) , transpose(_SCREAMING_SNAKE_CASE ).numpy() ) )
_UpperCAmelCase = np.random.randn(3 , 4 , 5 )
_UpperCAmelCase = tf.constant(_SCREAMING_SNAKE_CASE )
self.assertTrue(np.allclose(transpose(_SCREAMING_SNAKE_CASE , axes=(1, 2, 0) ) , transpose(_SCREAMING_SNAKE_CASE , axes=(1, 2, 0) ).numpy() ) )
@require_flax
def UpperCAmelCase ( self ):
_UpperCAmelCase = np.random.randn(3 , 4 )
_UpperCAmelCase = jnp.array(_SCREAMING_SNAKE_CASE )
self.assertTrue(np.allclose(transpose(_SCREAMING_SNAKE_CASE ) , np.asarray(transpose(_SCREAMING_SNAKE_CASE ) ) ) )
_UpperCAmelCase = np.random.randn(3 , 4 , 5 )
_UpperCAmelCase = jnp.array(_SCREAMING_SNAKE_CASE )
self.assertTrue(np.allclose(transpose(_SCREAMING_SNAKE_CASE , axes=(1, 2, 0) ) , np.asarray(transpose(_SCREAMING_SNAKE_CASE , axes=(1, 2, 0) ) ) ) )
def UpperCAmelCase ( self ):
_UpperCAmelCase = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(reshape(_SCREAMING_SNAKE_CASE , (4, 3) ) , np.reshape(_SCREAMING_SNAKE_CASE , (4, 3) ) ) )
_UpperCAmelCase = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(reshape(_SCREAMING_SNAKE_CASE , (12, 5) ) , np.reshape(_SCREAMING_SNAKE_CASE , (12, 5) ) ) )
@require_torch
def UpperCAmelCase ( self ):
_UpperCAmelCase = np.random.randn(3 , 4 )
_UpperCAmelCase = torch.tensor(_SCREAMING_SNAKE_CASE )
self.assertTrue(np.allclose(reshape(_SCREAMING_SNAKE_CASE , (4, 3) ) , reshape(_SCREAMING_SNAKE_CASE , (4, 3) ).numpy() ) )
_UpperCAmelCase = np.random.randn(3 , 4 , 5 )
_UpperCAmelCase = torch.tensor(_SCREAMING_SNAKE_CASE )
self.assertTrue(np.allclose(reshape(_SCREAMING_SNAKE_CASE , (12, 5) ) , reshape(_SCREAMING_SNAKE_CASE , (12, 5) ).numpy() ) )
@require_tf
def UpperCAmelCase ( self ):
_UpperCAmelCase = np.random.randn(3 , 4 )
_UpperCAmelCase = tf.constant(_SCREAMING_SNAKE_CASE )
self.assertTrue(np.allclose(reshape(_SCREAMING_SNAKE_CASE , (4, 3) ) , reshape(_SCREAMING_SNAKE_CASE , (4, 3) ).numpy() ) )
_UpperCAmelCase = np.random.randn(3 , 4 , 5 )
_UpperCAmelCase = tf.constant(_SCREAMING_SNAKE_CASE )
self.assertTrue(np.allclose(reshape(_SCREAMING_SNAKE_CASE , (12, 5) ) , reshape(_SCREAMING_SNAKE_CASE , (12, 5) ).numpy() ) )
@require_flax
def UpperCAmelCase ( self ):
_UpperCAmelCase = np.random.randn(3 , 4 )
_UpperCAmelCase = jnp.array(_SCREAMING_SNAKE_CASE )
self.assertTrue(np.allclose(reshape(_SCREAMING_SNAKE_CASE , (4, 3) ) , np.asarray(reshape(_SCREAMING_SNAKE_CASE , (4, 3) ) ) ) )
_UpperCAmelCase = np.random.randn(3 , 4 , 5 )
_UpperCAmelCase = jnp.array(_SCREAMING_SNAKE_CASE )
self.assertTrue(np.allclose(reshape(_SCREAMING_SNAKE_CASE , (12, 5) ) , np.asarray(reshape(_SCREAMING_SNAKE_CASE , (12, 5) ) ) ) )
def UpperCAmelCase ( self ):
_UpperCAmelCase = np.random.randn(1 , 3 , 4 )
self.assertTrue(np.allclose(squeeze(_SCREAMING_SNAKE_CASE ) , np.squeeze(_SCREAMING_SNAKE_CASE ) ) )
_UpperCAmelCase = np.random.randn(1 , 4 , 1 , 5 )
self.assertTrue(np.allclose(squeeze(_SCREAMING_SNAKE_CASE , axis=2 ) , np.squeeze(_SCREAMING_SNAKE_CASE , axis=2 ) ) )
@require_torch
def UpperCAmelCase ( self ):
_UpperCAmelCase = np.random.randn(1 , 3 , 4 )
_UpperCAmelCase = torch.tensor(_SCREAMING_SNAKE_CASE )
self.assertTrue(np.allclose(squeeze(_SCREAMING_SNAKE_CASE ) , squeeze(_SCREAMING_SNAKE_CASE ).numpy() ) )
_UpperCAmelCase = np.random.randn(1 , 4 , 1 , 5 )
_UpperCAmelCase = torch.tensor(_SCREAMING_SNAKE_CASE )
self.assertTrue(np.allclose(squeeze(_SCREAMING_SNAKE_CASE , axis=2 ) , squeeze(_SCREAMING_SNAKE_CASE , axis=2 ).numpy() ) )
@require_tf
def UpperCAmelCase ( self ):
_UpperCAmelCase = np.random.randn(1 , 3 , 4 )
_UpperCAmelCase = tf.constant(_SCREAMING_SNAKE_CASE )
self.assertTrue(np.allclose(squeeze(_SCREAMING_SNAKE_CASE ) , squeeze(_SCREAMING_SNAKE_CASE ).numpy() ) )
_UpperCAmelCase = np.random.randn(1 , 4 , 1 , 5 )
_UpperCAmelCase = tf.constant(_SCREAMING_SNAKE_CASE )
self.assertTrue(np.allclose(squeeze(_SCREAMING_SNAKE_CASE , axis=2 ) , squeeze(_SCREAMING_SNAKE_CASE , axis=2 ).numpy() ) )
@require_flax
def UpperCAmelCase ( self ):
_UpperCAmelCase = np.random.randn(1 , 3 , 4 )
_UpperCAmelCase = jnp.array(_SCREAMING_SNAKE_CASE )
self.assertTrue(np.allclose(squeeze(_SCREAMING_SNAKE_CASE ) , np.asarray(squeeze(_SCREAMING_SNAKE_CASE ) ) ) )
_UpperCAmelCase = np.random.randn(1 , 4 , 1 , 5 )
_UpperCAmelCase = jnp.array(_SCREAMING_SNAKE_CASE )
self.assertTrue(np.allclose(squeeze(_SCREAMING_SNAKE_CASE , axis=2 ) , np.asarray(squeeze(_SCREAMING_SNAKE_CASE , axis=2 ) ) ) )
def UpperCAmelCase ( self ):
_UpperCAmelCase = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(expand_dims(_SCREAMING_SNAKE_CASE , axis=1 ) , np.expand_dims(_SCREAMING_SNAKE_CASE , axis=1 ) ) )
@require_torch
def UpperCAmelCase ( self ):
_UpperCAmelCase = np.random.randn(3 , 4 )
_UpperCAmelCase = torch.tensor(_SCREAMING_SNAKE_CASE )
self.assertTrue(np.allclose(expand_dims(_SCREAMING_SNAKE_CASE , axis=1 ) , expand_dims(_SCREAMING_SNAKE_CASE , axis=1 ).numpy() ) )
@require_tf
def UpperCAmelCase ( self ):
_UpperCAmelCase = np.random.randn(3 , 4 )
_UpperCAmelCase = tf.constant(_SCREAMING_SNAKE_CASE )
self.assertTrue(np.allclose(expand_dims(_SCREAMING_SNAKE_CASE , axis=1 ) , expand_dims(_SCREAMING_SNAKE_CASE , axis=1 ).numpy() ) )
@require_flax
def UpperCAmelCase ( self ):
_UpperCAmelCase = np.random.randn(3 , 4 )
_UpperCAmelCase = jnp.array(_SCREAMING_SNAKE_CASE )
self.assertTrue(np.allclose(expand_dims(_SCREAMING_SNAKE_CASE , axis=1 ) , np.asarray(expand_dims(_SCREAMING_SNAKE_CASE , axis=1 ) ) ) )
| 175
|
from __future__ import annotations
def _SCREAMING_SNAKE_CASE ( snake_case , snake_case ) -> float:
_UpperCAmelCase = sorted(numsa + numsa )
_UpperCAmelCase , _UpperCAmelCase = divmod(len(snake_case ) , 2 )
if mod == 1:
return all_numbers[div]
else:
return (all_numbers[div] + all_numbers[div - 1]) / 2
if __name__ == "__main__":
import doctest
doctest.testmod()
a = [float(x) for x in input("Enter the elements of first array: ").split()]
a = [float(x) for x in input("Enter the elements of second array: ").split()]
print(F'The median of two arrays is: {median_of_two_arrays(array_a, array_a)}')
| 175
| 1
|
from datetime import datetime
import requests
from bsa import BeautifulSoup
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : Tuple = input("Enter image url: ").strip()
print(F"""Downloading image from {url} ...""")
SCREAMING_SNAKE_CASE__ : Any = BeautifulSoup(requests.get(url).content, "html.parser")
# The image URL is in the content field of the first meta tag with property og:image
SCREAMING_SNAKE_CASE__ : Optional[Any] = soup.find("meta", {"property": "og:image"})["content"]
SCREAMING_SNAKE_CASE__ : Dict = requests.get(image_url).content
SCREAMING_SNAKE_CASE__ : Tuple = F"""{datetime.now():%Y-%m-%d_%H:%M:%S}.jpg"""
with open(file_name, "wb") as fp:
fp.write(image_data)
print(F"""Done. Image saved to disk as {file_name}.""")
| 85
|
import logging
import numpy as np
import pytest
from scipy.linalg import eigh
logging.basicConfig(level=logging.INFO, format="%(message)s")
def _a ( lowercase__ : np.ndarray ):
'''simple docstring'''
return input_array.reshape((input_array.size, 1) )
def _a ( lowercase__ : np.ndarray , lowercase__ : np.ndarray , lowercase__ : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = np.nan
for i in range(lowercase__ ):
SCREAMING_SNAKE_CASE__ : int = features[:, labels == i]
SCREAMING_SNAKE_CASE__ : int = data.mean(1 )
# Centralize the data of class i
SCREAMING_SNAKE_CASE__ : Optional[Any] = data - column_reshape(lowercase__ )
if i > 0:
# If covariance_sum is not None
covariance_sum += np.dot(lowercase__ , centered_data.T )
else:
# If covariance_sum is np.nan (i.e. first loop)
SCREAMING_SNAKE_CASE__ : Any = np.dot(lowercase__ , centered_data.T )
return covariance_sum / features.shape[1]
def _a ( lowercase__ : np.ndarray , lowercase__ : np.ndarray , lowercase__ : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] = features.mean(1 )
SCREAMING_SNAKE_CASE__ : List[str] = np.nan
for i in range(lowercase__ ):
SCREAMING_SNAKE_CASE__ : Tuple = features[:, labels == i]
SCREAMING_SNAKE_CASE__ : int = data.shape[1]
SCREAMING_SNAKE_CASE__ : List[Any] = data.mean(1 )
if i > 0:
# If covariance_sum is not None
covariance_sum += device_data * np.dot(
column_reshape(lowercase__ ) - column_reshape(lowercase__ ) , (column_reshape(lowercase__ ) - column_reshape(lowercase__ )).T , )
else:
# If covariance_sum is np.nan (i.e. first loop)
SCREAMING_SNAKE_CASE__ : str = device_data * np.dot(
column_reshape(lowercase__ ) - column_reshape(lowercase__ ) , (column_reshape(lowercase__ ) - column_reshape(lowercase__ )).T , )
return covariance_sum / features.shape[1]
def _a ( lowercase__ : np.ndarray , lowercase__ : int ):
'''simple docstring'''
if features.any():
SCREAMING_SNAKE_CASE__ : Any = features.mean(1 )
# Center the dataset
SCREAMING_SNAKE_CASE__ : Optional[Any] = features - np.reshape(lowercase__ , (data_mean.size, 1) )
SCREAMING_SNAKE_CASE__ : List[Any] = np.dot(lowercase__ , centered_data.T ) / features.shape[1]
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] = np.linalg.eigh(lowercase__ )
# Take all the columns in the reverse order (-1), and then takes only the first
SCREAMING_SNAKE_CASE__ : List[Any] = eigenvectors[:, ::-1][:, 0:dimensions]
# Project the database on the new space
SCREAMING_SNAKE_CASE__ : Union[str, Any] = np.dot(filtered_eigenvectors.T , lowercase__ )
logging.info('Principal Component Analysis computed' )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format='%(message)s' , force=lowercase__ )
logging.error('Dataset empty' )
raise AssertionError
def _a ( lowercase__ : np.ndarray , lowercase__ : np.ndarray , lowercase__ : int , lowercase__ : int ):
'''simple docstring'''
assert classes > dimensions
# Check if features have been already loaded
if features.any:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] = eigh(
covariance_between_classes(lowercase__ , lowercase__ , lowercase__ ) , covariance_within_classes(lowercase__ , lowercase__ , lowercase__ ) , )
SCREAMING_SNAKE_CASE__ : Tuple = eigenvectors[:, ::-1][:, :dimensions]
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] = np.linalg.svd(lowercase__ )
SCREAMING_SNAKE_CASE__ : List[Any] = svd_matrix[:, 0:dimensions]
SCREAMING_SNAKE_CASE__ : int = np.dot(filtered_svd_matrix.T , lowercase__ )
logging.info('Linear Discriminant Analysis computed' )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format='%(message)s' , force=lowercase__ )
logging.error('Dataset empty' )
raise AssertionError
def _a ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = np.array([[1, 2, 3, 4, 5], [2, 3, 4, 5, 6], [3, 4, 5, 6, 7]] )
SCREAMING_SNAKE_CASE__ : Tuple = np.array([0, 0, 0, 1, 1] )
SCREAMING_SNAKE_CASE__ : str = 2
SCREAMING_SNAKE_CASE__ : Dict = 2
# Assert that the function raises an AssertionError if dimensions > classes
with pytest.raises(lowercase__ ) as error_info:
SCREAMING_SNAKE_CASE__ : Optional[int] = linear_discriminant_analysis(
lowercase__ , lowercase__ , lowercase__ , lowercase__ )
if isinstance(lowercase__ , np.ndarray ):
raise AssertionError(
'Did not raise AssertionError for dimensions > classes' )
assert error_info.type is AssertionError
def _a ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]] )
SCREAMING_SNAKE_CASE__ : List[str] = 2
SCREAMING_SNAKE_CASE__ : Union[str, Any] = np.array([[6.92820323, 8.66025404, 10.39230485], [3.0, 3.0, 3.0]] )
with pytest.raises(lowercase__ ) as error_info:
SCREAMING_SNAKE_CASE__ : int = principal_component_analysis(lowercase__ , lowercase__ )
if not np.allclose(lowercase__ , lowercase__ ):
raise AssertionError
assert error_info.type is AssertionError
if __name__ == "__main__":
import doctest
doctest.testmod()
| 85
| 1
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_rembert import RemBertTokenizer
else:
_lowerCAmelCase = None
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {"vocab_file": "sentencepiece.model", "tokenizer_file": "tokenizer.json"}
_lowerCAmelCase = {
"vocab_file": {
"google/rembert": "https://huggingface.co/google/rembert/resolve/main/sentencepiece.model",
},
"tokenizer_file": {
"google/rembert": "https://huggingface.co/google/rembert/resolve/main/tokenizer.json",
},
}
_lowerCAmelCase = {
"google/rembert": 256,
}
_lowerCAmelCase = "▁"
class SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
_A : Optional[int] = VOCAB_FILES_NAMES
_A : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
_A : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_A : Tuple = RemBertTokenizer
def __init__(self , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=False , lowerCAmelCase_="[CLS]" , lowerCAmelCase_="[SEP]" , lowerCAmelCase_="<unk>" , lowerCAmelCase_="[SEP]" , lowerCAmelCase_="<pad>" , lowerCAmelCase_="[CLS]" , lowerCAmelCase_="[MASK]" , **lowerCAmelCase_ , ):
# Mask token behave like a normal word, i.e. include the space before it
A_ : List[str] = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else mask_token
super().__init__(
lowerCAmelCase_ , tokenizer_file=lowerCAmelCase_ , do_lower_case=lowerCAmelCase_ , remove_space=lowerCAmelCase_ , keep_accents=lowerCAmelCase_ , bos_token=lowerCAmelCase_ , eos_token=lowerCAmelCase_ , unk_token=lowerCAmelCase_ , sep_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , cls_token=lowerCAmelCase_ , mask_token=lowerCAmelCase_ , **lowerCAmelCase_ , )
A_ : List[Any] = do_lower_case
A_ : Tuple = remove_space
A_ : Optional[Any] = keep_accents
A_ : List[str] = vocab_file
A_ : Union[str, Any] = False if not self.vocab_file else True
def lowerCamelCase(self , lowerCAmelCase_ , lowerCAmelCase_ = None ):
A_ : Any = [self.sep_token_id]
A_ : List[str] = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def lowerCamelCase(self , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = False ):
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"""You should not supply a second sequence if the provided sequence of """
"""ids is already formatted with special tokens for the model.""" )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(lowerCAmelCase_ )) + [1] + ([0] * len(lowerCAmelCase_ )) + [1]
return [1] + ([0] * len(lowerCAmelCase_ )) + [1]
def lowerCamelCase(self , lowerCAmelCase_ , lowerCAmelCase_ = None ):
A_ : Any = [self.sep_token_id]
A_ : Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCamelCase(self , lowerCAmelCase_ , lowerCAmelCase_ = None ):
if not os.path.isdir(lowerCAmelCase_ ):
logger.error("""Vocabulary path ({}) should be a directory""".format(lowerCAmelCase_ ) )
return
A_ : str = os.path.join(
lowerCAmelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase_ ):
copyfile(self.vocab_file , lowerCAmelCase_ )
return (out_vocab_file,)
| 480
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
_A : Optional[int] = """facebook/bart-large-mnli"""
_A : str = (
"""This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which """
"""should be the text to classify, and `labels`, which should be the list of labels to use for classification. """
"""It returns the most likely label in the list of provided `labels` for the input text."""
)
_A : List[str] = """text_classifier"""
_A : Optional[int] = AutoTokenizer
_A : Optional[Any] = AutoModelForSequenceClassification
_A : List[str] = ["""text""", ["""text"""]]
_A : Dict = ["""text"""]
def lowerCamelCase(self ):
super().setup()
A_ : int = self.model.config
A_ : List[str] = -1
for idx, label in config.idalabel.items():
if label.lower().startswith("""entail""" ):
A_ : List[Any] = int(lowerCAmelCase_ )
if self.entailment_id == -1:
raise ValueError("""Could not determine the entailment ID from the model config, please pass it at init.""" )
def lowerCamelCase(self , lowerCAmelCase_ , lowerCAmelCase_ ):
A_ : List[Any] = labels
return self.pre_processor(
[text] * len(lowerCAmelCase_ ) , [f"""This example is {label}""" for label in labels] , return_tensors="""pt""" , padding="""max_length""" , )
def lowerCamelCase(self , lowerCAmelCase_ ):
A_ : str = outputs.logits
A_ : Optional[int] = torch.argmax(logits[:, 2] ).item()
return self._labels[label_id]
| 480
| 1
|
'''simple docstring'''
import os
import tempfile
import unittest
import numpy as np
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
from diffusers import FlaxDDIMScheduler, FlaxDiffusionPipeline, FlaxStableDiffusionPipeline
@require_flax
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def _lowerCAmelCase ( self ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
# pipeline has Flax weights
a_ : Union[str, Any] = FlaxDiffusionPipeline.from_pretrained(
"""hf-internal-testing/tiny-stable-diffusion-pipe""" , safety_checker=lowerCAmelCase_ , cache_dir=lowerCAmelCase_ )
a_ : List[str] = [t[-1] for t in os.walk(os.path.join(lowerCAmelCase_ , os.listdir(lowerCAmelCase_ )[0] , """snapshots""" ) )]
a_ : Any = [item for sublist in all_root_files for item in sublist]
# None of the downloaded files should be a PyTorch file even if we have some here:
# https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_pytorch_model.bin
assert not any(f.endswith(""".bin""" ) for f in files )
@slow
@require_flax
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ , a_ : Optional[int] = FlaxStableDiffusionPipeline.from_pretrained(
"""hf-internal-testing/tiny-stable-diffusion-pipe""" , safety_checker=lowerCAmelCase_ )
a_ : Optional[int] = (
"""A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"""
""" field, close up, split lighting, cinematic"""
)
a_ : List[str] = jax.random.PRNGKey(0 )
a_ : Optional[Any] = 4
a_ : Dict = jax.device_count()
a_ : Any = num_samples * [prompt]
a_ : Optional[Any] = pipeline.prepare_inputs(lowerCAmelCase_ )
# shard inputs and rng
a_ : str = replicate(lowerCAmelCase_ )
a_ : List[Any] = jax.random.split(lowerCAmelCase_ , lowerCAmelCase_ )
a_ : Tuple = shard(lowerCAmelCase_ )
a_ : Any = pipeline(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , jit=lowerCAmelCase_ ).images
assert images.shape == (num_samples, 1, 64, 64, 3)
if jax.device_count() == 8:
assert np.abs(np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 4.1514745 ) < 1E-3
assert np.abs(np.abs(lowerCAmelCase_ , dtype=np.floataa ).sum() - 49947.875 ) < 5E-1
a_ : List[str] = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:] ) ) )
assert len(lowerCAmelCase_ ) == num_samples
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ , a_ : List[str] = FlaxStableDiffusionPipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , revision="""flax""" , safety_checker=lowerCAmelCase_ )
a_ : int = (
"""A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"""
""" field, close up, split lighting, cinematic"""
)
a_ : int = jax.random.PRNGKey(0 )
a_ : Any = 50
a_ : List[str] = jax.device_count()
a_ : Any = num_samples * [prompt]
a_ : List[str] = pipeline.prepare_inputs(lowerCAmelCase_ )
# shard inputs and rng
a_ : int = replicate(lowerCAmelCase_ )
a_ : Optional[int] = jax.random.split(lowerCAmelCase_ , lowerCAmelCase_ )
a_ : int = shard(lowerCAmelCase_ )
a_ : Any = pipeline(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , jit=lowerCAmelCase_ ).images
assert images.shape == (num_samples, 1, 5_12, 5_12, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.05652401) ) < 1E-3
assert np.abs((np.abs(lowerCAmelCase_ , dtype=np.floataa ).sum() - 2383808.2) ) < 5E-1
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ , a_ : Dict = FlaxStableDiffusionPipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , revision="""bf16""" , dtype=jnp.bfloataa , safety_checker=lowerCAmelCase_ )
a_ : Optional[int] = (
"""A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"""
""" field, close up, split lighting, cinematic"""
)
a_ : List[Any] = jax.random.PRNGKey(0 )
a_ : str = 50
a_ : List[Any] = jax.device_count()
a_ : Optional[int] = num_samples * [prompt]
a_ : List[str] = pipeline.prepare_inputs(lowerCAmelCase_ )
# shard inputs and rng
a_ : Optional[int] = replicate(lowerCAmelCase_ )
a_ : Dict = jax.random.split(lowerCAmelCase_ , lowerCAmelCase_ )
a_ : Union[str, Any] = shard(lowerCAmelCase_ )
a_ : List[Any] = pipeline(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , jit=lowerCAmelCase_ ).images
assert images.shape == (num_samples, 1, 5_12, 5_12, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.04003906) ) < 1E-3
assert np.abs((np.abs(lowerCAmelCase_ , dtype=np.floataa ).sum() - 2373516.75) ) < 5E-1
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ , a_ : List[Any] = FlaxStableDiffusionPipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , revision="""bf16""" , dtype=jnp.bfloataa )
a_ : Optional[Any] = (
"""A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"""
""" field, close up, split lighting, cinematic"""
)
a_ : Optional[Any] = jax.random.PRNGKey(0 )
a_ : str = 50
a_ : Optional[int] = jax.device_count()
a_ : List[Any] = num_samples * [prompt]
a_ : List[Any] = pipeline.prepare_inputs(lowerCAmelCase_ )
# shard inputs and rng
a_ : str = replicate(lowerCAmelCase_ )
a_ : Dict = jax.random.split(lowerCAmelCase_ , lowerCAmelCase_ )
a_ : Optional[Any] = shard(lowerCAmelCase_ )
a_ : Optional[Any] = pipeline(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , jit=lowerCAmelCase_ ).images
assert images.shape == (num_samples, 1, 5_12, 5_12, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.04003906) ) < 1E-3
assert np.abs((np.abs(lowerCAmelCase_ , dtype=np.floataa ).sum() - 2373516.75) ) < 5E-1
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ : Any = FlaxDDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , set_alpha_to_one=lowerCAmelCase_ , steps_offset=1 , )
a_ , a_ : List[str] = FlaxStableDiffusionPipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , revision="""bf16""" , dtype=jnp.bfloataa , scheduler=lowerCAmelCase_ , safety_checker=lowerCAmelCase_ , )
a_ : Optional[Any] = scheduler.create_state()
a_ : Dict = scheduler_state
a_ : int = (
"""A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"""
""" field, close up, split lighting, cinematic"""
)
a_ : Tuple = jax.random.PRNGKey(0 )
a_ : Tuple = 50
a_ : int = jax.device_count()
a_ : Any = num_samples * [prompt]
a_ : Optional[int] = pipeline.prepare_inputs(lowerCAmelCase_ )
# shard inputs and rng
a_ : Dict = replicate(lowerCAmelCase_ )
a_ : List[Any] = jax.random.split(lowerCAmelCase_ , lowerCAmelCase_ )
a_ : int = shard(lowerCAmelCase_ )
a_ : int = pipeline(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , jit=lowerCAmelCase_ ).images
assert images.shape == (num_samples, 1, 5_12, 5_12, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.045043945) ) < 1E-3
assert np.abs((np.abs(lowerCAmelCase_ , dtype=np.floataa ).sum() - 2347693.5) ) < 5E-1
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ : Union[str, Any] = (
"""A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"""
""" field, close up, split lighting, cinematic"""
)
a_ : Optional[Any] = jax.device_count()
a_ : Optional[int] = num_samples * [prompt]
a_ : Optional[int] = jax.random.split(jax.random.PRNGKey(0 ) , lowerCAmelCase_ )
a_ , a_ : Optional[int] = FlaxStableDiffusionPipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , revision="""bf16""" , dtype=jnp.bfloataa , safety_checker=lowerCAmelCase_ , )
a_ : Dict = replicate(lowerCAmelCase_ )
a_ : Any = pipeline.prepare_inputs(lowerCAmelCase_ )
a_ : Optional[int] = shard(lowerCAmelCase_ )
a_ : str = pipeline(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , jit=lowerCAmelCase_ ).images
assert images.shape == (num_samples, 1, 5_12, 5_12, 3)
a_ : Union[str, Any] = images[2, 0, 2_56, 10:17, 1]
# With memory efficient attention
a_ , a_ : str = FlaxStableDiffusionPipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , revision="""bf16""" , dtype=jnp.bfloataa , safety_checker=lowerCAmelCase_ , use_memory_efficient_attention=lowerCAmelCase_ , )
a_ : Optional[Any] = replicate(lowerCAmelCase_ )
a_ : int = pipeline.prepare_inputs(lowerCAmelCase_ )
a_ : Optional[int] = shard(lowerCAmelCase_ )
a_ : Union[str, Any] = pipeline(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , jit=lowerCAmelCase_ ).images
assert images_eff.shape == (num_samples, 1, 5_12, 5_12, 3)
a_ : Optional[Any] = images[2, 0, 2_56, 10:17, 1]
# I checked the results visually and they are very similar. However, I saw that the max diff is `1` and the `sum`
# over the 8 images is exactly `256`, which is very suspicious. Testing a random slice for now.
assert abs(slice_eff - slice ).max() < 1E-2
| 577
|
'''simple docstring'''
def _snake_case ( A_ : str , A_ : str ):
"""simple docstring"""
if not (isinstance(A_ , A_ ) and isinstance(A_ , A_ )):
raise ValueError("""longest_common_substring() takes two strings for inputs""" )
a_ : Optional[int] = len(A_ )
a_ : Dict = len(A_ )
a_ : Tuple = [[0] * (texta_length + 1) for _ in range(texta_length + 1 )]
a_ : str = 0
a_ : Union[str, Any] = 0
for i in range(1 , texta_length + 1 ):
for j in range(1 , texta_length + 1 ):
if texta[i - 1] == texta[j - 1]:
a_ : int = 1 + dp[i - 1][j - 1]
if dp[i][j] > ans_length:
a_ : Tuple = i
a_ : Union[str, Any] = dp[i][j]
return texta[ans_index - ans_length : ans_index]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 577
| 1
|
"""simple docstring"""
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __UpperCAmelCase ( __lowerCAmelCase ):
A__ : str = ['''image_processor''', '''tokenizer''']
A__ : List[str] = '''BridgeTowerImageProcessor'''
A__ : Optional[Any] = ('''RobertaTokenizer''', '''RobertaTokenizerFast''')
def __init__( self , _lowerCamelCase , _lowerCamelCase ):
super().__init__(_lowerCamelCase , _lowerCamelCase )
def __call__( self , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = True , _lowerCamelCase = False , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = 0 , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = False , _lowerCamelCase = False , _lowerCamelCase = False , _lowerCamelCase = False , _lowerCamelCase = True , _lowerCamelCase = None , **_lowerCamelCase , ):
lowerCamelCase__ =self.tokenizer(
text=_lowerCamelCase , add_special_tokens=_lowerCamelCase , padding=_lowerCamelCase , truncation=_lowerCamelCase , max_length=_lowerCamelCase , stride=_lowerCamelCase , pad_to_multiple_of=_lowerCamelCase , return_token_type_ids=_lowerCamelCase , return_attention_mask=_lowerCamelCase , return_overflowing_tokens=_lowerCamelCase , return_special_tokens_mask=_lowerCamelCase , return_offsets_mapping=_lowerCamelCase , return_length=_lowerCamelCase , verbose=_lowerCamelCase , return_tensors=_lowerCamelCase , **_lowerCamelCase , )
# add pixel_values + pixel_mask
lowerCamelCase__ =self.image_processor(
_lowerCamelCase , return_tensors=_lowerCamelCase , do_normalize=_lowerCamelCase , do_center_crop=_lowerCamelCase , **_lowerCamelCase )
encoding.update(_lowerCamelCase )
return encoding
def _a ( self , *_lowerCamelCase , **_lowerCamelCase ):
return self.tokenizer.batch_decode(*_lowerCamelCase , **_lowerCamelCase )
def _a ( self , *_lowerCamelCase , **_lowerCamelCase ):
return self.tokenizer.decode(*_lowerCamelCase , **_lowerCamelCase )
@property
def _a ( self ):
lowerCamelCase__ =self.tokenizer.model_input_names
lowerCamelCase__ =self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 132
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
a =logging.get_logger(__name__)
a ={
'shi-labs/dinat-mini-in1k-224': 'https://huggingface.co/shi-labs/dinat-mini-in1k-224/resolve/main/config.json',
# See all Dinat models at https://huggingface.co/models?filter=dinat
}
class __UpperCAmelCase ( __lowerCAmelCase , __lowerCAmelCase ):
A__ : str = '''dinat'''
A__ : List[str] = {
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self , _lowerCamelCase=4 , _lowerCamelCase=3 , _lowerCamelCase=64 , _lowerCamelCase=[3, 4, 6, 5] , _lowerCamelCase=[2, 4, 8, 16] , _lowerCamelCase=7 , _lowerCamelCase=[[1, 8, 1], [1, 4, 1, 4], [1, 2, 1, 2, 1, 2], [1, 1, 1, 1, 1]] , _lowerCamelCase=3.0 , _lowerCamelCase=True , _lowerCamelCase=0.0 , _lowerCamelCase=0.0 , _lowerCamelCase=0.1 , _lowerCamelCase="gelu" , _lowerCamelCase=0.0_2 , _lowerCamelCase=1E-5 , _lowerCamelCase=0.0 , _lowerCamelCase=None , _lowerCamelCase=None , **_lowerCamelCase , ):
super().__init__(**_lowerCamelCase )
lowerCamelCase__ =patch_size
lowerCamelCase__ =num_channels
lowerCamelCase__ =embed_dim
lowerCamelCase__ =depths
lowerCamelCase__ =len(_lowerCamelCase )
lowerCamelCase__ =num_heads
lowerCamelCase__ =kernel_size
lowerCamelCase__ =dilations
lowerCamelCase__ =mlp_ratio
lowerCamelCase__ =qkv_bias
lowerCamelCase__ =hidden_dropout_prob
lowerCamelCase__ =attention_probs_dropout_prob
lowerCamelCase__ =drop_path_rate
lowerCamelCase__ =hidden_act
lowerCamelCase__ =layer_norm_eps
lowerCamelCase__ =initializer_range
# we set the hidden_size attribute in order to make Dinat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
lowerCamelCase__ =int(embed_dim * 2 ** (len(_lowerCamelCase ) - 1) )
lowerCamelCase__ =layer_scale_init_value
lowerCamelCase__ =["stem"] + [F'''stage{idx}''' for idx in range(1 , len(_lowerCamelCase ) + 1 )]
lowerCamelCase__ , lowerCamelCase__ =get_aligned_output_features_output_indices(
out_features=_lowerCamelCase , out_indices=_lowerCamelCase , stage_names=self.stage_names )
| 132
| 1
|
import os
import tempfile
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from torch import nn
from transformers import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_inverse_sqrt_schedule,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase=10 ) -> List[str]:
snake_case__ = []
for _ in range(__lowerCAmelCase ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
return lrs
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase=10 ) -> Dict:
snake_case__ = []
for step in range(__lowerCAmelCase ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
if step == num_steps // 2:
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case__ = os.path.join(__lowerCAmelCase , '''schedule.bin''' )
torch.save(scheduler.state_dict() , __lowerCAmelCase )
snake_case__ = torch.load(__lowerCAmelCase )
scheduler.load_state_dict(__lowerCAmelCase )
return lrs
@require_torch
class __magic_name__ (unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self:int , _a:List[Any] , _a:Optional[Any] , _a:int ):
self.assertEqual(len(_a ) , len(_a ) )
for a, b in zip(_a , _a ):
self.assertAlmostEqual(_a , _a , delta=_a )
def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] ):
snake_case__ = torch.tensor([0.1, -0.2, -0.1] , requires_grad=_a )
snake_case__ = torch.tensor([0.4, 0.2, -0.5] )
snake_case__ = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
snake_case__ = AdamW(params=[w] , lr=2e-1 , weight_decay=0.0 )
for _ in range(1_00 ):
snake_case__ = criterion(_a , _a )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1e-2 )
def SCREAMING_SNAKE_CASE__ ( self:Any ):
snake_case__ = torch.tensor([0.1, -0.2, -0.1] , requires_grad=_a )
snake_case__ = torch.tensor([0.4, 0.2, -0.5] )
snake_case__ = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
snake_case__ = Adafactor(
params=[w] , lr=1e-2 , eps=(1e-30, 1e-3) , clip_threshold=1.0 , decay_rate=-0.8 , betaa=_a , weight_decay=0.0 , relative_step=_a , scale_parameter=_a , warmup_init=_a , )
for _ in range(10_00 ):
snake_case__ = criterion(_a , _a )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1e-2 )
@require_torch
class __magic_name__ (unittest.TestCase ):
'''simple docstring'''
__lowercase : int = nn.Linear(50 ,50 ) if is_torch_available() else None
__lowercase : Optional[Any] = AdamW(m.parameters() ,lr=10.0 ) if is_torch_available() else None
__lowercase : List[str] = 10
def SCREAMING_SNAKE_CASE__ ( self:List[str] , _a:Optional[int] , _a:Optional[int] , _a:Union[str, Any] , _a:int=None ):
self.assertEqual(len(_a ) , len(_a ) )
for a, b in zip(_a , _a ):
self.assertAlmostEqual(_a , _a , delta=_a , msg=_a )
def SCREAMING_SNAKE_CASE__ ( self:int ):
snake_case__ = {'''num_warmup_steps''': 2, '''num_training_steps''': 10}
# schedulers doct format
# function: (sched_args_dict, expected_learning_rates)
snake_case__ = {
get_constant_schedule: ({}, [10.0] * self.num_steps),
get_constant_schedule_with_warmup: (
{'''num_warmup_steps''': 4},
[0.0, 2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0],
),
get_linear_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25],
),
get_cosine_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38],
),
get_cosine_with_hard_restarts_schedule_with_warmup: (
{**common_kwargs, '''num_cycles''': 2},
[0.0, 5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46],
),
get_polynomial_decay_schedule_with_warmup: (
{**common_kwargs, '''power''': 2.0, '''lr_end''': 1e-7},
[0.0, 5.0, 10.0, 7.656, 5.625, 3.906, 2.5, 1.406, 0.625, 0.156],
),
get_inverse_sqrt_schedule: (
{'''num_warmup_steps''': 2},
[0.0, 5.0, 10.0, 8.165, 7.071, 6.325, 5.774, 5.345, 5.0, 4.714],
),
}
for scheduler_func, data in scheds.items():
snake_case__ , snake_case__ = data
snake_case__ = scheduler_func(self.optimizer , **_a )
self.assertEqual(len([scheduler.get_lr()[0]] ) , 1 )
snake_case__ = unwrap_schedule(_a , self.num_steps )
self.assertListAlmostEqual(
_a , _a , tol=1e-2 , msg=F"""failed for {scheduler_func} in normal scheduler""" , )
snake_case__ = scheduler_func(self.optimizer , **_a )
if scheduler_func.__name__ != "get_constant_schedule":
LambdaScheduleWrapper.wrap_scheduler(_a ) # wrap to test picklability of the schedule
snake_case__ = unwrap_and_save_reload_schedule(_a , self.num_steps )
self.assertListEqual(_a , _a , msg=F"""failed for {scheduler_func} in save and reload""" )
class __magic_name__ :
'''simple docstring'''
def __init__( self:Any , _a:Dict ):
snake_case__ = fn
def __call__( self:Any , *_a:Dict , **_a:List[str] ):
return self.fn(*_a , **_a )
@classmethod
def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] , _a:Tuple ):
snake_case__ = list(map(self , scheduler.lr_lambdas ) )
| 33
|
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class __UpperCamelCase ( _a ,_a ):
'''simple docstring'''
@register_to_config
def __init__( self , *,
lowerCamelCase__ = 4 , lowerCamelCase__ = 7_6_8 , lowerCamelCase__ , lowerCamelCase__ , ):
super().__init__()
UpperCAmelCase__: int = nn.Parameter(torch.zeros(lowerCamelCase__ ) )
# parameters for additional clip time embeddings
UpperCAmelCase__: Optional[Any] = nn.Linear(lowerCamelCase__ , lowerCamelCase__ )
UpperCAmelCase__: str = nn.Linear(lowerCamelCase__ , lowerCamelCase__ )
# parameters for encoder hidden states
UpperCAmelCase__: Tuple = clip_extra_context_tokens
UpperCAmelCase__: List[str] = nn.Linear(
lowerCamelCase__ , self.clip_extra_context_tokens * cross_attention_dim )
UpperCAmelCase__: Union[str, Any] = nn.Linear(lowerCamelCase__ , lowerCamelCase__ )
UpperCAmelCase__: str = nn.LayerNorm(lowerCamelCase__ )
def _UpperCAmelCase ( self , *, lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
if do_classifier_free_guidance:
# Add the classifier free guidance embeddings to the image embeddings
UpperCAmelCase__: Any = image_embeddings.shape[0]
UpperCAmelCase__: Optional[Any] = self.learned_classifier_free_guidance_embeddings.unsqueeze(0 )
UpperCAmelCase__: Union[str, Any] = classifier_free_guidance_embeddings.expand(
lowerCamelCase__ , -1 )
UpperCAmelCase__: Tuple = torch.cat([classifier_free_guidance_embeddings, image_embeddings] , dim=0 )
# The image embeddings batch size and the text embeddings batch size are equal
assert image_embeddings.shape[0] == prompt_embeds.shape[0]
UpperCAmelCase__: str = prompt_embeds.shape[0]
# "Specifically, we modify the architecture described in Nichol et al. (2021) by projecting and
# adding CLIP embeddings to the existing timestep embedding, ...
UpperCAmelCase__: List[str] = self.embedding_proj(lowerCamelCase__ )
UpperCAmelCase__: Optional[int] = self.clip_image_embeddings_project_to_time_embeddings(lowerCamelCase__ )
UpperCAmelCase__: Any = time_projected_image_embeddings + time_projected_prompt_embeds
# ... and by projecting CLIP embeddings into four
# extra tokens of context that are concatenated to the sequence of outputs from the GLIDE text encoder"
UpperCAmelCase__: Dict = self.clip_extra_context_tokens_proj(lowerCamelCase__ )
UpperCAmelCase__: Optional[Any] = clip_extra_context_tokens.reshape(lowerCamelCase__ , -1 , self.clip_extra_context_tokens )
UpperCAmelCase__: Dict = clip_extra_context_tokens.permute(0 , 2 , 1 )
UpperCAmelCase__: Any = self.encoder_hidden_states_proj(lowerCamelCase__ )
UpperCAmelCase__: List[str] = self.text_encoder_hidden_states_norm(lowerCamelCase__ )
UpperCAmelCase__: Optional[int] = torch.cat([clip_extra_context_tokens, text_encoder_hidden_states] , dim=1 )
return text_encoder_hidden_states, additive_clip_time_embeddings
| 113
| 0
|
class lowercase_ :
def __init__( self , __A ) -> int:
SCREAMING_SNAKE_CASE_ : Any =set_counts
SCREAMING_SNAKE_CASE_ : Optional[int] =max(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE_ : Optional[int] =len(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE_ : str =[1] * num_sets
SCREAMING_SNAKE_CASE_ : Optional[int] =list(range(UpperCAmelCase_ ) )
def _snake_case ( self , __A , __A ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ : int =self.get_parent(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE_ : List[str] =self.get_parent(UpperCAmelCase_ )
if src_parent == dst_parent:
return False
if self.ranks[dst_parent] >= self.ranks[src_parent]:
self.set_counts[dst_parent] += self.set_counts[src_parent]
SCREAMING_SNAKE_CASE_ : Dict =0
SCREAMING_SNAKE_CASE_ : Tuple =dst_parent
if self.ranks[dst_parent] == self.ranks[src_parent]:
self.ranks[dst_parent] += 1
SCREAMING_SNAKE_CASE_ : Dict =self.set_counts[dst_parent]
else:
self.set_counts[src_parent] += self.set_counts[dst_parent]
SCREAMING_SNAKE_CASE_ : Optional[Any] =0
SCREAMING_SNAKE_CASE_ : int =src_parent
SCREAMING_SNAKE_CASE_ : str =self.set_counts[src_parent]
SCREAMING_SNAKE_CASE_ : str =max(self.max_set , UpperCAmelCase_ )
return True
def _snake_case ( self , __A ) -> Tuple:
if self.parents[disj_set] == disj_set:
return disj_set
SCREAMING_SNAKE_CASE_ : Any =self.get_parent(self.parents[disj_set] )
return self.parents[disj_set]
| 706
|
import timeit
import numpy as np
import datasets
from datasets.arrow_writer import ArrowWriter
from datasets.features.features import _ArrayXD
def SCREAMING_SNAKE_CASE_ ( UpperCAmelCase_ : str ) -> int:
def wrapper(*UpperCAmelCase_ : str , **UpperCAmelCase_ : str ):
SCREAMING_SNAKE_CASE_ : Optional[int] =timeit.default_timer()
SCREAMING_SNAKE_CASE_ : Dict =func(*UpperCAmelCase_ , **UpperCAmelCase_ )
SCREAMING_SNAKE_CASE_ : str =timeit.default_timer() - starttime
return delta
SCREAMING_SNAKE_CASE_ : Any =func.__name__
return wrapper
def SCREAMING_SNAKE_CASE_ ( UpperCAmelCase_ : dict , UpperCAmelCase_ : Any=1_0_0 , UpperCAmelCase_ : Dict=None ) -> int:
SCREAMING_SNAKE_CASE_ : Optional[Any] =[]
SCREAMING_SNAKE_CASE_ : List[str] =seq_shapes or {}
for i in range(UpperCAmelCase_ ):
SCREAMING_SNAKE_CASE_ : Optional[Any] ={}
for col_id, (k, v) in enumerate(features.items() ):
if isinstance(UpperCAmelCase_ , _ArrayXD ):
SCREAMING_SNAKE_CASE_ : Any =np.random.rand(*v.shape ).astype(v.dtype )
elif isinstance(UpperCAmelCase_ , datasets.Value ):
if v.dtype == "string":
SCREAMING_SNAKE_CASE_ : Optional[Any] ='''The small grey turtle was surprisingly fast when challenged.'''
else:
SCREAMING_SNAKE_CASE_ : int =np.random.randint(1_0 , size=1 ).astype(v.dtype ).item()
elif isinstance(UpperCAmelCase_ , datasets.Sequence ):
while isinstance(UpperCAmelCase_ , datasets.Sequence ):
SCREAMING_SNAKE_CASE_ : Tuple =v.feature
SCREAMING_SNAKE_CASE_ : Optional[Any] =seq_shapes[k]
SCREAMING_SNAKE_CASE_ : Optional[Any] =np.random.rand(*UpperCAmelCase_ ).astype(v.dtype )
SCREAMING_SNAKE_CASE_ : Union[str, Any] =data
dummy_data.append((i, example) )
return dummy_data
def SCREAMING_SNAKE_CASE_ ( UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : str=1_0_0 , UpperCAmelCase_ : Dict=None ) -> Dict:
SCREAMING_SNAKE_CASE_ : Optional[int] =generate_examples(UpperCAmelCase_ , num_examples=UpperCAmelCase_ , seq_shapes=UpperCAmelCase_ )
with ArrowWriter(features=UpperCAmelCase_ , path=UpperCAmelCase_ ) as writer:
for key, record in dummy_data:
SCREAMING_SNAKE_CASE_ : List[Any] =features.encode_example(UpperCAmelCase_ )
writer.write(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Tuple =writer.finalize()
if not num_final_examples == num_examples:
raise ValueError(
f'Error writing the dataset, wrote {num_final_examples} examples but should have written {num_examples}.' )
SCREAMING_SNAKE_CASE_ : List[Any] =datasets.Dataset.from_file(filename=UpperCAmelCase_ , info=datasets.DatasetInfo(features=UpperCAmelCase_ ) )
return dataset
| 431
| 0
|
'''simple docstring'''
def _a ( _lowerCamelCase ) -> List[Any]:
"""simple docstring"""
__snake_case : Dict = len(UpperCAmelCase_ )
for i in range(1 , UpperCAmelCase_ ):
__snake_case : str = collection[i]
__snake_case : Union[str, Any] = 0
__snake_case : List[str] = i - 1
while low <= high:
__snake_case : str = (low + high) // 2
if val < collection[mid]:
__snake_case : str = mid - 1
else:
__snake_case : Any = mid + 1
for j in range(UpperCAmelCase_ , UpperCAmelCase_ , -1 ):
__snake_case : Optional[Any] = collection[j - 1]
__snake_case : int = val
return collection
if __name__ == "__main__":
__UpperCamelCase = input("Enter numbers separated by a comma:\n").strip()
__UpperCamelCase = [int(item) for item in user_input.split(",")]
print(binary_insertion_sort(unsorted))
| 26
|
import argparse
import os
from pathlib import Path
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer
from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params
snake_case__ = [
# replace left string with right string to get the relevant state_dict key (identical state dict to bart)
["""memory_attention""", """encoder_attn"""],
["""attention""", """attn"""],
["""/""", """."""],
[""".LayerNorm.gamma""", """_layer_norm.weight"""],
[""".LayerNorm.beta""", """_layer_norm.bias"""],
["""r.layer_""", """r.layers."""],
["""output_proj""", """out_proj"""],
["""ffn.dense_1.""", """fc2."""],
["""ffn.dense.""", """fc1."""],
["""ffn_layer_norm""", """final_layer_norm"""],
["""kernel""", """weight"""],
["""encoder_layer_norm.""", """encoder.layer_norm."""],
["""decoder_layer_norm.""", """decoder.layer_norm."""],
["""embeddings.weights""", """shared.weight"""],
]
def lowerCamelCase_ ( UpperCAmelCase_ : Dict ):
for pegasus_name, hf_name in PATTERNS:
lowercase : int = k.replace(UpperCAmelCase_ , UpperCAmelCase_ )
return k
def lowerCamelCase_ ( UpperCAmelCase_ : dict , UpperCAmelCase_ : dict ):
lowercase : Optional[int] = DEFAULTS.copy()
cfg_kwargs.update(UpperCAmelCase_ )
lowercase : Optional[Any] = PegasusConfig(**UpperCAmelCase_ )
lowercase : List[Any] = PegasusForConditionalGeneration(UpperCAmelCase_ )
lowercase : Optional[Any] = torch_model.model.state_dict()
lowercase : Optional[int] = {}
for k, v in tf_weights.items():
lowercase : str = rename_state_dict_key(UpperCAmelCase_ )
if new_k not in sd:
raise ValueError(f'''could not find new key {new_k} in state dict. (converted from {k})''' )
if "dense" in k or "proj" in new_k:
lowercase : List[str] = v.T
lowercase : str = torch.tensor(UpperCAmelCase_ , dtype=sd[new_k].dtype )
assert v.shape == sd[new_k].shape, f'''{new_k}, {k}, {v.shape}, {sd[new_k].shape}'''
# make sure embedding.padding_idx is respected
lowercase : Any = torch.zeros_like(mapping['''shared.weight'''][cfg.pad_token_id + 1] )
lowercase : Union[str, Any] = mapping['''shared.weight''']
lowercase : str = mapping['''shared.weight''']
lowercase : List[Any] = {k: torch.zeros_like(UpperCAmelCase_ ) for k, v in sd.items() if k.endswith('''bias''' ) and k not in mapping}
mapping.update(**UpperCAmelCase_ )
lowercase , lowercase : int = torch_model.model.load_state_dict(UpperCAmelCase_ , strict=UpperCAmelCase_ )
lowercase : List[Any] = [
k for k in missing if k not in ['''encoder.embed_positions.weight''', '''decoder.embed_positions.weight''']
]
assert unexpected_missing == [], f'''no matches found for the following torch keys {unexpected_missing}'''
assert extra == [], f'''no matches found for the following tf keys {extra}'''
return torch_model
def lowerCamelCase_ ( UpperCAmelCase_ : Union[str, Any]="./ckpt/aeslc/model.ckpt-32000" ):
lowercase : Dict = tf.train.list_variables(UpperCAmelCase_ )
lowercase : Optional[Any] = {}
lowercase : Optional[int] = ['''Adafactor''', '''global_step''']
for name, shape in tqdm(UpperCAmelCase_ , desc='''converting tf checkpoint to dict''' ):
lowercase : Tuple = any(pat in name for pat in ignore_name )
if skip_key:
continue
lowercase : Tuple = tf.train.load_variable(UpperCAmelCase_ , UpperCAmelCase_ )
lowercase : List[str] = array
return tf_weights
def lowerCamelCase_ ( UpperCAmelCase_ : str , UpperCAmelCase_ : str ):
# save tokenizer first
lowercase : str = Path(UpperCAmelCase_ ).parent.name
lowercase : List[Any] = task_specific_params[f'''summarization_{dataset}''']['''max_position_embeddings''']
lowercase : Dict = PegasusTokenizer.from_pretrained('''sshleifer/pegasus''' , model_max_length=UpperCAmelCase_ )
assert tok.model_max_length == desired_max_model_length
tok.save_pretrained(UpperCAmelCase_ )
# convert model
lowercase : List[str] = get_tf_weights_as_numpy(UpperCAmelCase_ )
lowercase : Any = task_specific_params[f'''summarization_{dataset}''']
if dataset == "large":
lowercase : Optional[int] = task_specific_params
lowercase : str = convert_pegasus(UpperCAmelCase_ , UpperCAmelCase_ )
torch_model.save_pretrained(UpperCAmelCase_ )
lowercase : Any = torch_model.state_dict()
sd.pop('''model.decoder.embed_positions.weight''' )
sd.pop('''model.encoder.embed_positions.weight''' )
torch.save(UpperCAmelCase_ , Path(UpperCAmelCase_ ) / '''pytorch_model.bin''' )
if __name__ == "__main__":
snake_case__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""tf_ckpt_path""", type=str, help="""passed to tf.train.list_variables""")
parser.add_argument("""save_dir""", default=None, type=str, help="""Path to the output PyTorch model.""")
snake_case__ = parser.parse_args()
if args.save_dir is None:
snake_case__ = Path(args.tf_ckpt_path).parent.name
snake_case__ = os.path.join("""pegasus""", dataset)
convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir)
| 583
| 0
|
'''simple docstring'''
from transformers import BertTokenizerFast
from .custom_tokenization import CustomTokenizer
class __magic_name__ ( lowerCAmelCase ):
UpperCAmelCase =CustomTokenizer
pass
| 331
|
'''simple docstring'''
def lowerCamelCase__ ( __lowerCamelCase : int = 1_0_0 ):
'''simple docstring'''
_UpperCAmelCase : int =set()
_UpperCAmelCase : Union[str, Any] =0
_UpperCAmelCase : Optional[Any] =n + 1 # maximum limit
for a in range(2 , __lowerCamelCase ):
for b in range(2 , __lowerCamelCase ):
_UpperCAmelCase : Tuple =a**b # calculates the current power
collect_powers.add(__lowerCamelCase ) # adds the result to the set
return len(__lowerCamelCase )
if __name__ == "__main__":
print('Number of terms ', solution(int(str(input()).strip())))
| 331
| 1
|
from __future__ import annotations
from random import random
from typing import Generic, TypeVar
SCREAMING_SNAKE_CASE__ : List[str] = TypeVar("KT")
SCREAMING_SNAKE_CASE__ : str = TypeVar("VT")
class snake_case ( Generic[KT, VT] ):
def __init__( self : Union[str, Any] , a_ : KT | str = "root" , a_ : VT | None = None )-> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = key
SCREAMING_SNAKE_CASE__ : Dict = value
SCREAMING_SNAKE_CASE__ : list[Node[KT, VT]] = []
def __repr__( self : Dict )-> str:
"""simple docstring"""
return F'''Node({self.key}: {self.value})'''
@property
def __lowercase( self : List[str] )-> int:
"""simple docstring"""
return len(self.forward )
class snake_case ( Generic[KT, VT] ):
def __init__( self : Union[str, Any] , a_ : float = 0.5 , a_ : int = 16 )-> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Node[KT, VT] = Node[KT, VT]()
SCREAMING_SNAKE_CASE__ : Tuple = 0
SCREAMING_SNAKE_CASE__ : str = p
SCREAMING_SNAKE_CASE__ : Optional[int] = max_level
def __str__( self : Optional[Any] )-> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = list(self )
if len(a_ ) == 0:
return F'''SkipList(level={self.level})'''
SCREAMING_SNAKE_CASE__ : str = max((len(str(a_ ) ) for item in items) , default=4 )
SCREAMING_SNAKE_CASE__ : Dict = max(a_ , 4 ) + 4
SCREAMING_SNAKE_CASE__ : Dict = self.head
SCREAMING_SNAKE_CASE__ : Union[str, Any] = []
SCREAMING_SNAKE_CASE__ : int = node.forward.copy()
lines.append(F'''[{node.key}]'''.ljust(a_ , '-' ) + '* ' * len(a_ ) )
lines.append(' ' * label_size + '| ' * len(a_ ) )
while len(node.forward ) != 0:
SCREAMING_SNAKE_CASE__ : int = node.forward[0]
lines.append(
F'''[{node.key}]'''.ljust(a_ , '-' )
+ ' '.join(str(n.key ) if n.key == node.key else '|' for n in forwards ) )
lines.append(' ' * label_size + '| ' * len(a_ ) )
SCREAMING_SNAKE_CASE__ : Optional[int] = node.forward
lines.append('None'.ljust(a_ ) + '* ' * len(a_ ) )
return F'''SkipList(level={self.level})\n''' + "\n".join(a_ )
def __iter__( self : Optional[int] )-> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = self.head
while len(node.forward ) != 0:
yield node.forward[0].key
SCREAMING_SNAKE_CASE__ : Optional[int] = node.forward[0]
def __lowercase( self : str )-> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = 1
while random() < self.p and level < self.max_level:
level += 1
return level
def __lowercase( self : Dict , a_ : Optional[int] )-> tuple[Node[KT, VT] | None, list[Node[KT, VT]]]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = []
SCREAMING_SNAKE_CASE__ : Any = self.head
for i in reversed(range(self.level ) ):
# i < node.level - When node level is lesser than `i` decrement `i`.
# node.forward[i].key < key - Jumping to node with key value higher
# or equal to searched key would result
# in skipping searched key.
while i < node.level and node.forward[i].key < key:
SCREAMING_SNAKE_CASE__ : str = node.forward[i]
# Each leftmost node (relative to searched node) will potentially have to
# be updated.
update_vector.append(a_ )
update_vector.reverse() # Note that we were inserting values in reverse order.
# len(node.forward) != 0 - If current node doesn't contain any further
# references then searched key is not present.
# node.forward[0].key == key - Next node key should be equal to search key
# if key is present.
if len(node.forward ) != 0 and node.forward[0].key == key:
return node.forward[0], update_vector
else:
return None, update_vector
def __lowercase( self : Optional[Any] , a_ : KT )-> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Union[str, Any] = self._locate_node(a_ )
if node is not None:
for i, update_node in enumerate(a_ ):
# Remove or replace all references to removed node.
if update_node.level > i and update_node.forward[i].key == key:
if node.level > i:
SCREAMING_SNAKE_CASE__ : List[Any] = node.forward[i]
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] = update_node.forward[:i]
def __lowercase( self : Any , a_ : KT , a_ : VT )-> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str = self._locate_node(a_ )
if node is not None:
SCREAMING_SNAKE_CASE__ : str = value
else:
SCREAMING_SNAKE_CASE__ : Dict = self.random_level()
if level > self.level:
# After level increase we have to add additional nodes to head.
for _ in range(self.level - 1 , a_ ):
update_vector.append(self.head )
SCREAMING_SNAKE_CASE__ : List[Any] = level
SCREAMING_SNAKE_CASE__ : Union[str, Any] = Node(a_ , a_ )
for i, update_node in enumerate(update_vector[:level] ):
# Change references to pass through new node.
if update_node.level > i:
new_node.forward.append(update_node.forward[i] )
if update_node.level < i + 1:
update_node.forward.append(a_ )
else:
SCREAMING_SNAKE_CASE__ : Optional[int] = new_node
def __lowercase( self : Any , a_ : VT )-> VT | None:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : int = self._locate_node(a_ )
if node is not None:
return node.value
return None
def _a ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = SkipList()
skip_list.insert('Key1' , 3 )
skip_list.insert('Key2' , 12 )
skip_list.insert('Key3' , 41 )
skip_list.insert('Key4' , -19 )
SCREAMING_SNAKE_CASE__ : Tuple = skip_list.head
SCREAMING_SNAKE_CASE__ : Optional[int] = {}
while node.level != 0:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = node.forward[0]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = node.value
assert len(lowercase__ ) == 4
assert all_values["Key1"] == 3
assert all_values["Key2"] == 12
assert all_values["Key3"] == 41
assert all_values["Key4"] == -19
def _a ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = SkipList()
skip_list.insert('Key1' , 10 )
skip_list.insert('Key1' , 12 )
skip_list.insert('Key5' , 7 )
skip_list.insert('Key7' , 10 )
skip_list.insert('Key10' , 5 )
skip_list.insert('Key7' , 7 )
skip_list.insert('Key5' , 5 )
skip_list.insert('Key10' , 10 )
SCREAMING_SNAKE_CASE__ : Dict = skip_list.head
SCREAMING_SNAKE_CASE__ : Any = {}
while node.level != 0:
SCREAMING_SNAKE_CASE__ : int = node.forward[0]
SCREAMING_SNAKE_CASE__ : List[Any] = node.value
if len(lowercase__ ) != 4:
print()
assert len(lowercase__ ) == 4
assert all_values["Key1"] == 12
assert all_values["Key7"] == 7
assert all_values["Key5"] == 5
assert all_values["Key10"] == 10
def _a ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = SkipList()
assert skip_list.find('Some key' ) is None
def _a ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = SkipList()
skip_list.insert('Key2' , 20 )
assert skip_list.find('Key2' ) == 20
skip_list.insert('Some Key' , 10 )
skip_list.insert('Key2' , 8 )
skip_list.insert('V' , 13 )
assert skip_list.find('Y' ) is None
assert skip_list.find('Key2' ) == 8
assert skip_list.find('Some Key' ) == 10
assert skip_list.find('V' ) == 13
def _a ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any = SkipList()
skip_list.delete('Some key' )
assert len(skip_list.head.forward ) == 0
def _a ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] = SkipList()
skip_list.insert('Key1' , 12 )
skip_list.insert('V' , 13 )
skip_list.insert('X' , 14 )
skip_list.insert('Key2' , 15 )
skip_list.delete('V' )
skip_list.delete('Key2' )
assert skip_list.find('V' ) is None
assert skip_list.find('Key2' ) is None
def _a ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = SkipList()
skip_list.insert('Key1' , 12 )
skip_list.insert('V' , 13 )
skip_list.insert('X' , 14 )
skip_list.insert('Key2' , 15 )
skip_list.delete('V' )
assert skip_list.find('V' ) is None
assert skip_list.find('X' ) == 14
assert skip_list.find('Key1' ) == 12
assert skip_list.find('Key2' ) == 15
skip_list.delete('X' )
assert skip_list.find('V' ) is None
assert skip_list.find('X' ) is None
assert skip_list.find('Key1' ) == 12
assert skip_list.find('Key2' ) == 15
skip_list.delete('Key1' )
assert skip_list.find('V' ) is None
assert skip_list.find('X' ) is None
assert skip_list.find('Key1' ) is None
assert skip_list.find('Key2' ) == 15
skip_list.delete('Key2' )
assert skip_list.find('V' ) is None
assert skip_list.find('X' ) is None
assert skip_list.find('Key1' ) is None
assert skip_list.find('Key2' ) is None
def _a ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any = SkipList()
skip_list.insert('Key1' , 12 )
skip_list.insert('V' , 13 )
skip_list.insert('X' , 1_42 )
skip_list.insert('Key2' , 15 )
skip_list.delete('X' )
def traverse_keys(lowercase__ : Any ):
yield node.key
for forward_node in node.forward:
yield from traverse_keys(lowercase__ )
assert len(set(traverse_keys(skip_list.head ) ) ) == 4
def _a ( ):
'''simple docstring'''
def is_sorted(lowercase__ : int ):
return all(next_item >= item for item, next_item in zip(lowercase__ , lst[1:] ) )
SCREAMING_SNAKE_CASE__ : Dict = SkipList()
for i in range(10 ):
skip_list.insert(lowercase__ , lowercase__ )
assert is_sorted(list(lowercase__ ) )
skip_list.delete(5 )
skip_list.delete(8 )
skip_list.delete(2 )
assert is_sorted(list(lowercase__ ) )
skip_list.insert(-12 , -12 )
skip_list.insert(77 , 77 )
assert is_sorted(list(lowercase__ ) )
def _a ( ):
'''simple docstring'''
for _ in range(1_00 ):
# Repeat test 100 times due to the probabilistic nature of skip list
# random values == random bugs
test_insert()
test_insert_overrides_existing_value()
test_searching_empty_list_returns_none()
test_search()
test_deleting_item_from_empty_list_do_nothing()
test_deleted_items_are_not_founded_by_find_method()
test_delete_removes_only_given_key()
test_delete_doesnt_leave_dead_nodes()
test_iter_always_yields_sorted_values()
def _a ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = SkipList()
skip_list.insert(2 , '2' )
skip_list.insert(4 , '4' )
skip_list.insert(6 , '4' )
skip_list.insert(4 , '5' )
skip_list.insert(8 , '4' )
skip_list.insert(9 , '4' )
skip_list.delete(4 )
print(lowercase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 85
|
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, XLMRobertaTokenizer
from diffusers import AltDiffusionPipeline, AutoencoderKL, DDIMScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class a__ ( a__ , a__ , a__ , unittest.TestCase ):
'''simple docstring'''
lowercase__ : Optional[Any] = AltDiffusionPipeline
lowercase__ : Dict = TEXT_TO_IMAGE_PARAMS
lowercase__ : str = TEXT_TO_IMAGE_BATCH_PARAMS
lowercase__ : Any = TEXT_TO_IMAGE_IMAGE_PARAMS
lowercase__ : List[str] = TEXT_TO_IMAGE_IMAGE_PARAMS
def __SCREAMING_SNAKE_CASE ( self ) -> str:
torch.manual_seed(0 )
lowerCAmelCase__ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
lowerCAmelCase__ = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=lowerCamelCase_ , set_alpha_to_one=lowerCamelCase_ , )
torch.manual_seed(0 )
lowerCAmelCase__ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
# TODO: address the non-deterministic text encoder (fails for save-load tests)
# torch.manual_seed(0)
# text_encoder_config = RobertaSeriesConfig(
# hidden_size=32,
# project_dim=32,
# intermediate_size=37,
# layer_norm_eps=1e-05,
# num_attention_heads=4,
# num_hidden_layers=5,
# vocab_size=5002,
# )
# text_encoder = RobertaSeriesModelWithTransformation(text_encoder_config)
torch.manual_seed(0 )
lowerCAmelCase__ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=50_02 , )
lowerCAmelCase__ = CLIPTextModel(lowerCamelCase_ )
lowerCAmelCase__ = XLMRobertaTokenizer.from_pretrained('''hf-internal-testing/tiny-xlm-roberta''' )
lowerCAmelCase__ = 77
lowerCAmelCase__ = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_=0 ) -> List[str]:
if str(lowerCamelCase_ ).startswith('''mps''' ):
lowerCAmelCase__ = torch.manual_seed(lowerCamelCase_ )
else:
lowerCAmelCase__ = torch.Generator(device=lowerCamelCase_ ).manual_seed(lowerCamelCase_ )
lowerCAmelCase__ = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
super().test_attention_slicing_forward_pass(expected_max_diff=3e-3 )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
lowerCAmelCase__ = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase__ = self.get_dummy_components()
torch.manual_seed(0 )
lowerCAmelCase__ = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=50_02 , )
# TODO: remove after fixing the non-deterministic text encoder
lowerCAmelCase__ = RobertaSeriesModelWithTransformation(lowerCamelCase_ )
lowerCAmelCase__ = text_encoder
lowerCAmelCase__ = AltDiffusionPipeline(**lowerCamelCase_ )
lowerCAmelCase__ = alt_pipe.to(lowerCamelCase_ )
alt_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCAmelCase__ = self.get_dummy_inputs(lowerCamelCase_ )
lowerCAmelCase__ = '''A photo of an astronaut'''
lowerCAmelCase__ = alt_pipe(**lowerCamelCase_ )
lowerCAmelCase__ = output.images
lowerCAmelCase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase__ = np.array(
[0.5_748_162, 0.60_447_145, 0.48_821_217, 0.50_100_636, 0.5_431_185, 0.45_763_683, 0.49_657_696, 0.48_132_733, 0.47_573_093] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
lowerCAmelCase__ = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase__ = self.get_dummy_components()
lowerCAmelCase__ = PNDMScheduler(skip_prk_steps=lowerCamelCase_ )
torch.manual_seed(0 )
lowerCAmelCase__ = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=50_02 , )
# TODO: remove after fixing the non-deterministic text encoder
lowerCAmelCase__ = RobertaSeriesModelWithTransformation(lowerCamelCase_ )
lowerCAmelCase__ = text_encoder
lowerCAmelCase__ = AltDiffusionPipeline(**lowerCamelCase_ )
lowerCAmelCase__ = alt_pipe.to(lowerCamelCase_ )
alt_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCAmelCase__ = self.get_dummy_inputs(lowerCamelCase_ )
lowerCAmelCase__ = alt_pipe(**lowerCamelCase_ )
lowerCAmelCase__ = output.images
lowerCAmelCase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase__ = np.array(
[0.51_605_093, 0.5_707_241, 0.47_365_507, 0.50_578_886, 0.5_633_877, 0.4_642_503, 0.5_182_081, 0.48_763_484, 0.49_084_237] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class a__ ( unittest.TestCase ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
# make sure here that pndm scheduler skips prk
lowerCAmelCase__ = AltDiffusionPipeline.from_pretrained('''BAAI/AltDiffusion''' , safety_checker=lowerCamelCase_ )
lowerCAmelCase__ = alt_pipe.to(lowerCamelCase_ )
alt_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCAmelCase__ = '''A painting of a squirrel eating a burger'''
lowerCAmelCase__ = torch.manual_seed(0 )
lowerCAmelCase__ = alt_pipe([prompt] , generator=lowerCamelCase_ , guidance_scale=6.0 , num_inference_steps=20 , output_type='''np''' )
lowerCAmelCase__ = output.images
lowerCAmelCase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
lowerCAmelCase__ = np.array([0.1_010, 0.0_800, 0.0_794, 0.0_885, 0.0_843, 0.0_762, 0.0_769, 0.0_729, 0.0_586] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
lowerCAmelCase__ = DDIMScheduler.from_pretrained('''BAAI/AltDiffusion''' , subfolder='''scheduler''' )
lowerCAmelCase__ = AltDiffusionPipeline.from_pretrained('''BAAI/AltDiffusion''' , scheduler=lowerCamelCase_ , safety_checker=lowerCamelCase_ )
lowerCAmelCase__ = alt_pipe.to(lowerCamelCase_ )
alt_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCAmelCase__ = '''A painting of a squirrel eating a burger'''
lowerCAmelCase__ = torch.manual_seed(0 )
lowerCAmelCase__ = alt_pipe([prompt] , generator=lowerCamelCase_ , num_inference_steps=2 , output_type='''numpy''' )
lowerCAmelCase__ = output.images
lowerCAmelCase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
lowerCAmelCase__ = np.array([0.4_019, 0.4_052, 0.3_810, 0.4_119, 0.3_916, 0.3_982, 0.4_651, 0.4_195, 0.5_323] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 90
| 0
|
from __future__ import annotations
import time
from collections.abc import Sequence
from random import randint
from matplotlib import pyplot as plt
def _lowercase ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_):
"""simple docstring"""
if not arr:
return None, None, 0
if low == high:
return low, high, arr[low]
snake_case__ : List[Any] = (low + high) // 2
snake_case__ : Union[str, Any] = max_subarray(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_)
snake_case__ : Union[str, Any] = max_subarray(UpperCAmelCase_ , mid + 1 , UpperCAmelCase_)
snake_case__ : Optional[int] = max_cross_sum(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_)
if left_sum >= right_sum and left_sum >= cross_sum:
return left_low, left_high, left_sum
elif right_sum >= left_sum and right_sum >= cross_sum:
return right_low, right_high, right_sum
return cross_left, cross_right, cross_sum
def _lowercase ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_):
"""simple docstring"""
snake_case__ : Optional[Any] = float("""-inf"""), -1
snake_case__ : Dict = float("""-inf"""), -1
snake_case__ : int | float = 0
for i in range(UpperCAmelCase_ , low - 1 , -1):
summ += arr[i]
if summ > left_sum:
snake_case__ : Optional[int] = summ
snake_case__ : str = i
snake_case__ : int = 0
for i in range(mid + 1 , high + 1):
summ += arr[i]
if summ > right_sum:
snake_case__ : Dict = summ
snake_case__ : List[Any] = i
return max_left, max_right, (left_sum + right_sum)
def _lowercase ( UpperCAmelCase_):
"""simple docstring"""
snake_case__ : Dict = [randint(1 , UpperCAmelCase_) for _ in range(UpperCAmelCase_)]
snake_case__ : Dict = time.time()
max_subarray(UpperCAmelCase_ , 0 , input_size - 1)
snake_case__ : Optional[Any] = time.time()
return end - start
def _lowercase ( ):
"""simple docstring"""
snake_case__ : List[Any] = [10, 100, 1_000, 10_000, 50_000, 100_000, 200_000, 300_000, 400_000, 500_000]
snake_case__ : int = [time_max_subarray(UpperCAmelCase_) for input_size in input_sizes]
print("""No of Inputs\t\tTime Taken""")
for input_size, runtime in zip(UpperCAmelCase_ , UpperCAmelCase_):
print(UpperCAmelCase_ , """\t\t""" , UpperCAmelCase_)
plt.plot(UpperCAmelCase_ , UpperCAmelCase_)
plt.xlabel("""Number of Inputs""")
plt.ylabel("""Time taken in seconds""")
plt.show()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 706
|
from __future__ import annotations
from typing import Any
def _lowercase ( UpperCAmelCase_):
"""simple docstring"""
if not postfix_notation:
return 0
snake_case__ : List[str] = {"""+""", """-""", """*""", """/"""}
snake_case__ : list[Any] = []
for token in postfix_notation:
if token in operations:
snake_case__ , snake_case__ : Tuple = stack.pop(), stack.pop()
if token == "+":
stack.append(a + b)
elif token == "-":
stack.append(a - b)
elif token == "*":
stack.append(a * b)
else:
if a * b < 0 and a % b != 0:
stack.append(a // b + 1)
else:
stack.append(a // b)
else:
stack.append(int(UpperCAmelCase_))
return stack.pop()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 127
| 0
|
'''simple docstring'''
from .constants import (
MODEL_NAME,
OPTIMIZER_NAME,
RNG_STATE_NAME,
SAFE_WEIGHTS_INDEX_NAME,
SAFE_WEIGHTS_NAME,
SCALER_NAME,
SCHEDULER_NAME,
TORCH_LAUNCH_PARAMS,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
)
from .dataclasses import (
BnbQuantizationConfig,
ComputeEnvironment,
CustomDtype,
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
DynamoBackend,
FPaRecipeKwargs,
FullyShardedDataParallelPlugin,
GradientAccumulationPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
KwargsHandler,
LoggerType,
MegatronLMPlugin,
PrecisionType,
ProjectConfiguration,
RNGType,
SageMakerDistributedType,
TensorInformation,
TorchDynamoPlugin,
)
from .environment import get_int_from_env, parse_choice_from_env, parse_flag_from_env
from .imports import (
get_ccl_version,
is_abit_bnb_available,
is_abit_bnb_available,
is_aim_available,
is_bfaa_available,
is_bnb_available,
is_botoa_available,
is_ccl_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_fpa_available,
is_ipex_available,
is_megatron_lm_available,
is_mlflow_available,
is_mps_available,
is_npu_available,
is_rich_available,
is_safetensors_available,
is_sagemaker_available,
is_tensorboard_available,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
from .modeling import (
check_device_map,
check_tied_parameters_in_config,
check_tied_parameters_on_same_device,
compute_module_sizes,
convert_file_size_to_int,
dtype_byte_size,
find_tied_parameters,
get_balanced_memory,
get_max_layer_size,
get_max_memory,
get_mixed_precision_context_manager,
id_tensor_storage,
infer_auto_device_map,
load_checkpoint_in_model,
load_offloaded_weights,
load_state_dict,
named_module_tensors,
retie_parameters,
set_module_tensor_to_device,
shard_checkpoint,
)
from .offload import (
OffloadedWeightsLoader,
PrefixedDataset,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
save_offload_index,
)
from .operations import (
broadcast,
broadcast_object_list,
concatenate,
convert_outputs_to_fpaa,
convert_to_fpaa,
find_batch_size,
find_device,
gather,
gather_object,
get_data_structure,
honor_type,
initialize_tensors,
is_namedtuple,
is_tensor_information,
is_torch_tensor,
listify,
pad_across_processes,
recursively_apply,
reduce,
send_to_device,
slice_tensors,
)
from .versions import compare_versions, is_torch_version
if is_deepspeed_available():
from .deepspeed import (
DeepSpeedEngineWrapper,
DeepSpeedOptimizerWrapper,
DeepSpeedSchedulerWrapper,
DummyOptim,
DummyScheduler,
HfDeepSpeedConfig,
)
from .bnb import has_abit_bnb_layers, load_and_quantize_model
from .fsdp_utils import load_fsdp_model, load_fsdp_optimizer, save_fsdp_model, save_fsdp_optimizer
from .launch import (
PrepareForLaunch,
_filter_args,
prepare_deepspeed_cmd_env,
prepare_multi_gpu_env,
prepare_sagemager_args_inputs,
prepare_simple_launcher_cmd_env,
prepare_tpu,
)
from .megatron_lm import (
AbstractTrainStep,
BertTrainStep,
GPTTrainStep,
MegatronEngine,
MegatronLMDummyDataLoader,
MegatronLMDummyScheduler,
MegatronLMOptimizerWrapper,
MegatronLMSchedulerWrapper,
TaTrainStep,
avg_losses_across_data_parallel_group,
gather_across_data_parallel_groups,
)
from .megatron_lm import initialize as megatron_lm_initialize
from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader
from .megatron_lm import prepare_model as megatron_lm_prepare_model
from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer
from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler
from .memory import find_executable_batch_size, release_memory
from .other import (
extract_model_from_parallel,
get_pretty_name,
is_port_in_use,
merge_dicts,
patch_environment,
save,
wait_for_everyone,
write_basic_config,
)
from .random import set_seed, synchronize_rng_state, synchronize_rng_states
from .torch_xla import install_xla
from .tqdm import tqdm
from .transformer_engine import convert_model, has_transformer_engine_layers
| 135
|
'''simple docstring'''
from typing import Callable, List, Optional, Union
import PIL
import torch
from transformers import (
CLIPImageProcessor,
CLIPSegForImageSegmentation,
CLIPSegProcessor,
CLIPTextModel,
CLIPTokenizer,
)
from diffusers import DiffusionPipeline
from diffusers.configuration_utils import FrozenDict
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionInpaintPipeline
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import deprecate, is_accelerate_available, logging
SCREAMING_SNAKE_CASE_: Optional[int] =logging.get_logger(__name__) # pylint: disable=invalid-name
class __A ( UpperCamelCase__ ):
def __init__(self : Any , __a : CLIPSegForImageSegmentation , __a : CLIPSegProcessor , __a : AutoencoderKL , __a : CLIPTextModel , __a : CLIPTokenizer , __a : UNetaDConditionModel , __a : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , __a : StableDiffusionSafetyChecker , __a : CLIPImageProcessor , ):
super().__init__()
if hasattr(scheduler.config , "steps_offset" ) and scheduler.config.steps_offset != 1:
UpperCAmelCase_ = (
f"""The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`"""
f""" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure """
"to update the config accordingly as leaving `steps_offset` might led to incorrect results"
" in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,"
" it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`"
" file"
)
deprecate("steps_offset!=1" , "1.0.0" , __a , standard_warn=__a )
UpperCAmelCase_ = dict(scheduler.config )
UpperCAmelCase_ = 1
UpperCAmelCase_ = FrozenDict(__a )
if hasattr(scheduler.config , "skip_prk_steps" ) and scheduler.config.skip_prk_steps is False:
UpperCAmelCase_ = (
f"""The configuration file of this scheduler: {scheduler} has not set the configuration"""
" `skip_prk_steps`. `skip_prk_steps` should be set to True in the configuration file. Please make"
" sure to update the config accordingly as not setting `skip_prk_steps` in the config might lead to"
" incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face"
" Hub, it would be very nice if you could open a Pull request for the"
" `scheduler/scheduler_config.json` file"
)
deprecate("skip_prk_steps not set" , "1.0.0" , __a , standard_warn=__a )
UpperCAmelCase_ = dict(scheduler.config )
UpperCAmelCase_ = True
UpperCAmelCase_ = FrozenDict(__a )
if safety_checker is None:
logger.warning(
f"""You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"""
" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
" results in services or applications open to the public. Both the diffusers team and Hugging Face"
" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
" it only for use-cases that involve analyzing network behavior or auditing its results. For more"
" information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." )
self.register_modules(
segmentation_model=__a , segmentation_processor=__a , vae=__a , text_encoder=__a , tokenizer=__a , unet=__a , scheduler=__a , safety_checker=__a , feature_extractor=__a , )
def _lowercase (self : str , __a : Optional[Union[str, int]] = "auto" ):
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
UpperCAmelCase_ = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(__a )
def _lowercase (self : int ):
self.enable_attention_slicing(__a )
def _lowercase (self : Optional[Any] ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
UpperCAmelCase_ = torch.device("cuda" )
for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae, self.safety_checker]:
if cpu_offloaded_model is not None:
cpu_offload(__a , __a )
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def _lowercase (self : Optional[int] ):
if self.device != torch.device("meta" ) or not hasattr(self.unet , "_hf_hook" ):
return self.device
for module in self.unet.modules():
if (
hasattr(__a , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
def __call__(self : Dict , __a : Union[str, List[str]] , __a : Union[torch.FloatTensor, PIL.Image.Image] , __a : str , __a : int = 512 , __a : int = 512 , __a : int = 50 , __a : float = 7.5 , __a : Optional[Union[str, List[str]]] = None , __a : Optional[int] = 1 , __a : float = 0.0 , __a : Optional[torch.Generator] = None , __a : Optional[torch.FloatTensor] = None , __a : Optional[str] = "pil" , __a : bool = True , __a : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __a : int = 1 , **__a : int , ):
UpperCAmelCase_ = self.segmentation_processor(
text=[text] , images=[image] , padding="max_length" , return_tensors="pt" ).to(self.device )
UpperCAmelCase_ = self.segmentation_model(**__a )
UpperCAmelCase_ = torch.sigmoid(outputs.logits ).cpu().detach().unsqueeze(-1 ).numpy()
UpperCAmelCase_ = self.numpy_to_pil(__a )[0].resize(image.size )
# Run inpainting pipeline with the generated mask
UpperCAmelCase_ = StableDiffusionInpaintPipeline(
vae=self.vae , text_encoder=self.text_encoder , tokenizer=self.tokenizer , unet=self.unet , scheduler=self.scheduler , safety_checker=self.safety_checker , feature_extractor=self.feature_extractor , )
return inpainting_pipeline(
prompt=__a , image=__a , mask_image=__a , height=__a , width=__a , num_inference_steps=__a , guidance_scale=__a , negative_prompt=__a , num_images_per_prompt=__a , eta=__a , generator=__a , latents=__a , output_type=__a , return_dict=__a , callback=__a , callback_steps=__a , )
| 78
| 0
|
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer
from transformers.testing_utils import require_tokenizers, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor
@require_tokenizers
@require_vision
class lowerCAmelCase_ ( unittest.TestCase ):
def UpperCamelCase_ ( self : Any ):
_UpperCamelCase = tempfile.mkdtemp()
# fmt: off
_UpperCamelCase = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''', ''',''', '''low''', '''lowest''']
# fmt: on
_UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
_UpperCamelCase = {
'''do_resize''': True,
'''size''': {'''height''': 18, '''width''': 18},
'''do_normalize''': True,
'''image_mean''': [0.5, 0.5, 0.5],
'''image_std''': [0.5, 0.5, 0.5],
}
_UpperCamelCase = os.path.join(self.tmpdirname , _A )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(_A , _A )
def UpperCamelCase_ ( self : Tuple , **_A : Optional[Any] ):
return BertTokenizer.from_pretrained(self.tmpdirname , **_A )
def UpperCamelCase_ ( self : List[Any] , **_A : Union[str, Any] ):
return ViTImageProcessor.from_pretrained(self.tmpdirname , **_A )
def UpperCamelCase_ ( self : int ):
shutil.rmtree(self.tmpdirname )
def UpperCamelCase_ ( self : List[Any] ):
_UpperCamelCase = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
_UpperCamelCase = [Image.fromarray(np.moveaxis(_A , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def UpperCamelCase_ ( self : Optional[int] ):
_UpperCamelCase = self.get_tokenizer()
_UpperCamelCase = self.get_image_processor()
_UpperCamelCase = VisionTextDualEncoderProcessor(tokenizer=_A , image_processor=_A )
processor.save_pretrained(self.tmpdirname )
_UpperCamelCase = VisionTextDualEncoderProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , _A )
def UpperCamelCase_ ( self : Optional[Any] ):
_UpperCamelCase = VisionTextDualEncoderProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_UpperCamelCase = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
_UpperCamelCase = self.get_image_processor(do_normalize=_A , padding_value=1.0 )
_UpperCamelCase = VisionTextDualEncoderProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=_A , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _A )
def UpperCamelCase_ ( self : Union[str, Any] ):
_UpperCamelCase = self.get_image_processor()
_UpperCamelCase = self.get_tokenizer()
_UpperCamelCase = VisionTextDualEncoderProcessor(tokenizer=_A , image_processor=_A )
_UpperCamelCase = self.prepare_image_inputs()
_UpperCamelCase = image_processor(_A , return_tensors='''np''' )
_UpperCamelCase = processor(images=_A , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def UpperCamelCase_ ( self : Dict ):
_UpperCamelCase = self.get_image_processor()
_UpperCamelCase = self.get_tokenizer()
_UpperCamelCase = VisionTextDualEncoderProcessor(tokenizer=_A , image_processor=_A )
_UpperCamelCase = '''lower newer'''
_UpperCamelCase = processor(text=_A )
_UpperCamelCase = tokenizer(_A )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def UpperCamelCase_ ( self : Union[str, Any] ):
_UpperCamelCase = self.get_image_processor()
_UpperCamelCase = self.get_tokenizer()
_UpperCamelCase = VisionTextDualEncoderProcessor(tokenizer=_A , image_processor=_A )
_UpperCamelCase = '''lower newer'''
_UpperCamelCase = self.prepare_image_inputs()
_UpperCamelCase = processor(text=_A , images=_A )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with self.assertRaises(_A ):
processor()
def UpperCamelCase_ ( self : List[Any] ):
_UpperCamelCase = self.get_image_processor()
_UpperCamelCase = self.get_tokenizer()
_UpperCamelCase = VisionTextDualEncoderProcessor(tokenizer=_A , image_processor=_A )
_UpperCamelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_UpperCamelCase = processor.batch_decode(_A )
_UpperCamelCase = tokenizer.batch_decode(_A )
self.assertListEqual(_A , _A )
def UpperCamelCase_ ( self : List[str] ):
_UpperCamelCase = self.get_image_processor()
_UpperCamelCase = self.get_tokenizer()
_UpperCamelCase = VisionTextDualEncoderProcessor(tokenizer=_A , image_processor=_A )
_UpperCamelCase = '''lower newer'''
_UpperCamelCase = self.prepare_image_inputs()
_UpperCamelCase = processor(text=_A , images=_A )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 71
|
import multiprocessing
import os
from typing import BinaryIO, Optional, Union
import fsspec
from .. import Dataset, Features, NamedSplit, config
from ..formatting import query_table
from ..packaged_modules.json.json import Json
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class lowerCAmelCase_ ( __lowercase ):
def __init__( self : int , _A : NestedDataStructureLike[PathLike] , _A : Optional[NamedSplit] = None , _A : Optional[Features] = None , _A : str = None , _A : bool = False , _A : bool = False , _A : Optional[str] = None , _A : Optional[int] = None , **_A : str , ):
super().__init__(
_A , split=_A , features=_A , cache_dir=_A , keep_in_memory=_A , streaming=_A , num_proc=_A , **_A , )
_UpperCamelCase = field
_UpperCamelCase = path_or_paths if isinstance(_A , _A ) else {self.split: path_or_paths}
_UpperCamelCase = Json(
cache_dir=_A , data_files=_A , features=_A , field=_A , **_A , )
def UpperCamelCase_ ( self : List[str] ):
# Build iterable dataset
if self.streaming:
_UpperCamelCase = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
self.builder.download_and_prepare(
download_config=_A , download_mode=_A , verification_mode=_A , base_path=_A , num_proc=self.num_proc , )
_UpperCamelCase = self.builder.as_dataset(
split=self.split , verification_mode=_A , in_memory=self.keep_in_memory )
return dataset
class lowerCAmelCase_ :
def __init__( self : Optional[Any] , _A : Dataset , _A : Union[PathLike, BinaryIO] , _A : Optional[int] = None , _A : Optional[int] = None , **_A : List[str] , ):
if num_proc is not None and num_proc <= 0:
raise ValueError(F"""num_proc {num_proc} must be an integer > 0.""" )
_UpperCamelCase = dataset
_UpperCamelCase = path_or_buf
_UpperCamelCase = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
_UpperCamelCase = num_proc
_UpperCamelCase = '''utf-8'''
_UpperCamelCase = to_json_kwargs
def UpperCamelCase_ ( self : Optional[Any] ):
_UpperCamelCase = self.to_json_kwargs.pop('''path_or_buf''' , _A )
_UpperCamelCase = self.to_json_kwargs.pop('''orient''' , '''records''' )
_UpperCamelCase = self.to_json_kwargs.pop('''lines''' , True if orient == '''records''' else False )
_UpperCamelCase = self.to_json_kwargs.pop('''index''' , False if orient in ['''split''', '''table'''] else True )
_UpperCamelCase = self.to_json_kwargs.pop('''compression''' , _A )
if compression not in [None, "infer", "gzip", "bz2", "xz"]:
raise NotImplementedError(F"""`datasets` currently does not support {compression} compression""" )
if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ):
with fsspec.open(self.path_or_buf , '''wb''' , compression=_A ) as buffer:
_UpperCamelCase = self._write(file_obj=_A , orient=_A , lines=_A , index=_A , **self.to_json_kwargs )
else:
if compression:
raise NotImplementedError(
F"""The compression parameter is not supported when writing to a buffer, but compression={compression}"""
''' was passed. Please provide a local path instead.''' )
_UpperCamelCase = self._write(
file_obj=self.path_or_buf , orient=_A , lines=_A , index=_A , **self.to_json_kwargs )
return written
def UpperCamelCase_ ( self : Any , _A : Optional[Any] ):
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = args
_UpperCamelCase = query_table(
table=self.dataset.data , key=slice(_A , offset + self.batch_size ) , indices=self.dataset._indices , )
_UpperCamelCase = batch.to_pandas().to_json(
path_or_buf=_A , orient=_A , lines=_A , index=_A , **_A )
if not json_str.endswith('''\n''' ):
json_str += "\n"
return json_str.encode(self.encoding )
def UpperCamelCase_ ( self : int , _A : BinaryIO , _A : Dict , _A : Optional[Any] , _A : Dict , **_A : str , ):
_UpperCamelCase = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating json from Arrow format''' , ):
_UpperCamelCase = self._batch_json((offset, orient, lines, index, to_json_kwargs) )
written += file_obj.write(_A )
else:
_UpperCamelCase , _UpperCamelCase = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for json_str in logging.tqdm(
pool.imap(
self._batch_json , [(offset, orient, lines, index, to_json_kwargs) for offset in range(0 , _A , _A )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating json from Arrow format''' , ):
written += file_obj.write(_A )
return written
| 71
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__a = {
'configuration_roc_bert': ['ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RoCBertConfig'],
'tokenization_roc_bert': ['RoCBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
pass
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
'ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'RoCBertForCausalLM',
'RoCBertForMaskedLM',
'RoCBertForMultipleChoice',
'RoCBertForPreTraining',
'RoCBertForQuestionAnswering',
'RoCBertForSequenceClassification',
'RoCBertForTokenClassification',
'RoCBertLayer',
'RoCBertModel',
'RoCBertPreTrainedModel',
'load_tf_weights_in_roc_bert',
]
if TYPE_CHECKING:
from .configuration_roc_bert import ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RoCBertConfig
from .tokenization_roc_bert import RoCBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
raise OptionalDependencyNotAvailable()
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roc_bert import (
ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RoCBertForCausalLM,
RoCBertForMaskedLM,
RoCBertForMultipleChoice,
RoCBertForPreTraining,
RoCBertForQuestionAnswering,
RoCBertForSequenceClassification,
RoCBertForTokenClassification,
RoCBertLayer,
RoCBertModel,
RoCBertPreTrainedModel,
load_tf_weights_in_roc_bert,
)
else:
import sys
__a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 97
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor
from .base import PipelineTool
class UpperCAmelCase_ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
A : Dict = 'openai/whisper-base'
A : Optional[Any] = (
'This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the '
'transcribed text.'
)
A : Dict = 'transcriber'
A : Any = WhisperProcessor
A : Any = WhisperForConditionalGeneration
A : Union[str, Any] = ['audio']
A : Optional[int] = ['text']
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE ) -> List[str]:
return self.pre_processor(_SCREAMING_SNAKE_CASE , return_tensors="pt" ).input_features
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE ) -> List[Any]:
return self.model.generate(inputs=_SCREAMING_SNAKE_CASE )
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE ) -> Dict:
return self.pre_processor.batch_decode(_SCREAMING_SNAKE_CASE , skip_special_tokens=_SCREAMING_SNAKE_CASE )[0]
| 568
| 0
|
def _lowerCAmelCase ( __lowerCamelCase : Union[str, Any] ):
return [
{
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
},
{
0: [6],
1: [9],
2: [4, 5],
3: [4],
4: [2, 3],
5: [2],
6: [0, 7],
7: [6],
8: [],
9: [1],
},
{
0: [4],
1: [6],
2: [],
3: [5, 6, 7],
4: [0, 6],
5: [3, 8, 9],
6: [1, 3, 4, 7],
7: [3, 6, 8, 9],
8: [5, 7],
9: [5, 7],
},
{
0: [1, 3],
1: [0, 2, 4],
2: [1, 3, 4],
3: [0, 2, 4],
4: [1, 2, 3],
},
][index]
def _lowerCAmelCase ( __lowerCamelCase : dict[int, list[int]] ):
__SCREAMING_SNAKE_CASE : Tuple = 0
__SCREAMING_SNAKE_CASE : Any = len(__lowerCamelCase ) # No of vertices in graph
__SCREAMING_SNAKE_CASE : Tuple = [0] * n
__SCREAMING_SNAKE_CASE : Dict = [False] * n
def dfs(__lowerCamelCase : Optional[int] , __lowerCamelCase : Dict , __lowerCamelCase : Dict , __lowerCamelCase : Any ):
__SCREAMING_SNAKE_CASE : Optional[int] = True
__SCREAMING_SNAKE_CASE : int = id_
id_ += 1
for to in graph[at]:
if to == parent:
pass
elif not visited[to]:
dfs(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , id_ )
__SCREAMING_SNAKE_CASE : Optional[Any] = min(low[at] , low[to] )
if id_ <= low[to]:
bridges.append((at, to) if at < to else (to, at) )
else:
# This edge is a back edge and cannot be a bridge
__SCREAMING_SNAKE_CASE : List[Any] = min(low[at] , low[to] )
__SCREAMING_SNAKE_CASE : list[tuple[int, int]] = []
for i in range(__lowerCamelCase ):
if not visited[i]:
dfs(__lowerCamelCase , -1 , __lowerCamelCase , id_ )
return bridges
if __name__ == "__main__":
import doctest
doctest.testmod()
| 710
|
import argparse
import logging
import os
from pathlib import Path
from typing import Any, Dict
import pytorch_lightning as pl
from pytorch_lightning.utilities import rank_zero_info
from transformers import (
AdamW,
AutoConfig,
AutoModel,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelForTokenClassification,
AutoModelWithLMHead,
AutoTokenizer,
PretrainedConfig,
PreTrainedTokenizer,
)
from transformers.optimization import (
Adafactor,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.utils.versions import require_version
_lowerCamelCase = logging.getLogger(__name__)
require_version("""pytorch_lightning>=1.0.4""")
_lowerCamelCase = {
"""base""": AutoModel,
"""sequence-classification""": AutoModelForSequenceClassification,
"""question-answering""": AutoModelForQuestionAnswering,
"""pretraining""": AutoModelForPreTraining,
"""token-classification""": AutoModelForTokenClassification,
"""language-modeling""": AutoModelWithLMHead,
"""summarization""": AutoModelForSeqaSeqLM,
"""translation""": AutoModelForSeqaSeqLM,
}
# update this and the import above to support new schedulers from transformers.optimization
_lowerCamelCase = {
"""linear""": get_linear_schedule_with_warmup,
"""cosine""": get_cosine_schedule_with_warmup,
"""cosine_w_restarts""": get_cosine_with_hard_restarts_schedule_with_warmup,
"""polynomial""": get_polynomial_decay_schedule_with_warmup,
# '': get_constant_schedule, # not supported for now
# '': get_constant_schedule_with_warmup, # not supported for now
}
_lowerCamelCase = sorted(arg_to_scheduler.keys())
_lowerCamelCase = """{""" + """, """.join(arg_to_scheduler_choices) + """}"""
class _SCREAMING_SNAKE_CASE (pl.LightningModule ):
def __init__( self : Dict , UpperCamelCase : argparse.Namespace , UpperCamelCase : str=None , UpperCamelCase : Tuple="base" , UpperCamelCase : Optional[Any]=None , UpperCamelCase : List[Any]=None , UpperCamelCase : Optional[int]=None , **UpperCamelCase : int , )->Dict:
super().__init__()
# TODO: move to self.save_hyperparameters()
# self.save_hyperparameters()
# can also expand arguments into trainer signature for easier reading
self.save_hyperparameters(UpperCamelCase )
__SCREAMING_SNAKE_CASE : int = 0
__SCREAMING_SNAKE_CASE : List[Any] = Path(self.hparams.output_dir )
__SCREAMING_SNAKE_CASE : Dict = self.hparams.cache_dir if self.hparams.cache_dir else None
if config is None:
__SCREAMING_SNAKE_CASE : List[Any] = AutoConfig.from_pretrained(
self.hparams.config_name if self.hparams.config_name else self.hparams.model_name_or_path , **({"num_labels": num_labels} if num_labels is not None else {}) , cache_dir=UpperCamelCase , **UpperCamelCase , )
else:
__SCREAMING_SNAKE_CASE : PretrainedConfig = config
__SCREAMING_SNAKE_CASE : Any = ("encoder_layerdrop", "decoder_layerdrop", "dropout", "attention_dropout")
for p in extra_model_params:
if getattr(self.hparams , UpperCamelCase , UpperCamelCase ):
assert hasattr(self.config , UpperCamelCase ), F"""model config doesn't have a `{p}` attribute"""
setattr(self.config , UpperCamelCase , getattr(self.hparams , UpperCamelCase ) )
if tokenizer is None:
__SCREAMING_SNAKE_CASE : Optional[int] = AutoTokenizer.from_pretrained(
self.hparams.tokenizer_name if self.hparams.tokenizer_name else self.hparams.model_name_or_path , cache_dir=UpperCamelCase , )
else:
__SCREAMING_SNAKE_CASE : PreTrainedTokenizer = tokenizer
__SCREAMING_SNAKE_CASE : str = MODEL_MODES[mode]
if model is None:
__SCREAMING_SNAKE_CASE : Optional[Any] = self.model_type.from_pretrained(
self.hparams.model_name_or_path , from_tf=bool(".ckpt" in self.hparams.model_name_or_path ) , config=self.config , cache_dir=UpperCamelCase , )
else:
__SCREAMING_SNAKE_CASE : Optional[int] = model
def __snake_case ( self : Dict , *UpperCamelCase : Dict , **UpperCamelCase : Union[str, Any] )->Optional[Any]:
__SCREAMING_SNAKE_CASE : List[Any] = self.model_type.from_pretrained(*UpperCamelCase , **UpperCamelCase )
def __snake_case ( self : Optional[Any] )->Tuple:
__SCREAMING_SNAKE_CASE : Tuple = arg_to_scheduler[self.hparams.lr_scheduler]
__SCREAMING_SNAKE_CASE : List[str] = get_schedule_func(
self.opt , num_warmup_steps=self.hparams.warmup_steps , num_training_steps=self.total_steps() )
__SCREAMING_SNAKE_CASE : Union[str, Any] = {"scheduler": scheduler, "interval": "step", "frequency": 1}
return scheduler
def __snake_case ( self : int )->Tuple:
__SCREAMING_SNAKE_CASE : List[Any] = self.model
__SCREAMING_SNAKE_CASE : Any = ["bias", "LayerNorm.weight"]
__SCREAMING_SNAKE_CASE : Optional[int] = [
{
"params": [
p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay )
], # check this named paramters
"weight_decay": self.hparams.weight_decay,
},
{
"params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay )],
"weight_decay": 0.0,
},
]
if self.hparams.adafactor:
__SCREAMING_SNAKE_CASE : Dict = Adafactor(
UpperCamelCase , lr=self.hparams.learning_rate , scale_parameter=UpperCamelCase , relative_step=UpperCamelCase )
else:
__SCREAMING_SNAKE_CASE : Tuple = AdamW(
UpperCamelCase , lr=self.hparams.learning_rate , eps=self.hparams.adam_epsilon )
__SCREAMING_SNAKE_CASE : List[Any] = optimizer
__SCREAMING_SNAKE_CASE : Any = self.get_lr_scheduler()
return [optimizer], [scheduler]
def __snake_case ( self : Dict , UpperCamelCase : Any , UpperCamelCase : Any )->Optional[Any]:
return self.validation_step(UpperCamelCase , UpperCamelCase )
def __snake_case ( self : List[Any] , UpperCamelCase : Tuple )->List[Any]:
return self.validation_end(UpperCamelCase )
def __snake_case ( self : str )->int:
__SCREAMING_SNAKE_CASE : Union[str, Any] = max(1 , self.hparams.gpus ) # TODO: consider num_tpu_cores
__SCREAMING_SNAKE_CASE : List[Any] = self.hparams.train_batch_size * self.hparams.accumulate_grad_batches * num_devices
return (self.dataset_size / effective_batch_size) * self.hparams.max_epochs
def __snake_case ( self : Union[str, Any] , UpperCamelCase : Tuple )->Optional[Any]:
if stage == "test":
__SCREAMING_SNAKE_CASE : List[str] = len(self.test_dataloader().dataset )
else:
__SCREAMING_SNAKE_CASE : Optional[int] = self.get_dataloader("train" , self.hparams.train_batch_size , shuffle=UpperCamelCase )
__SCREAMING_SNAKE_CASE : Tuple = len(self.train_dataloader().dataset )
def __snake_case ( self : Optional[int] , UpperCamelCase : str , UpperCamelCase : int , UpperCamelCase : bool = False )->List[Any]:
raise NotImplementedError("You must implement this for your task" )
def __snake_case ( self : List[Any] )->Union[str, Any]:
return self.train_loader
def __snake_case ( self : Tuple )->Union[str, Any]:
return self.get_dataloader("dev" , self.hparams.eval_batch_size , shuffle=UpperCamelCase )
def __snake_case ( self : Optional[Any] )->Any:
return self.get_dataloader("test" , self.hparams.eval_batch_size , shuffle=UpperCamelCase )
def __snake_case ( self : int , UpperCamelCase : List[str] )->int:
return os.path.join(
self.hparams.data_dir , "cached_{}_{}_{}".format(
UpperCamelCase , list(filter(UpperCamelCase , self.hparams.model_name_or_path.split("/" ) ) ).pop() , str(self.hparams.max_seq_length ) , ) , )
@pl.utilities.rank_zero_only
def __snake_case ( self : Union[str, Any] , UpperCamelCase : Dict[str, Any] )->None:
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.output_dir.joinpath("best_tfmr" )
__SCREAMING_SNAKE_CASE : List[Any] = self.step_count
self.model.save_pretrained(UpperCamelCase )
self.tokenizer.save_pretrained(UpperCamelCase )
@staticmethod
def __snake_case ( UpperCamelCase : Any , UpperCamelCase : Optional[Any] )->List[str]:
parser.add_argument(
"--model_name_or_path" , default=UpperCamelCase , type=UpperCamelCase , required=UpperCamelCase , help="Path to pretrained model or model identifier from huggingface.co/models" , )
parser.add_argument(
"--config_name" , default="" , type=UpperCamelCase , help="Pretrained config name or path if not the same as model_name" )
parser.add_argument(
"--tokenizer_name" , default=UpperCamelCase , type=UpperCamelCase , help="Pretrained tokenizer name or path if not the same as model_name" , )
parser.add_argument(
"--cache_dir" , default=str(Path(UpperCamelCase ).parent / "test_run" / "cache" ) , type=UpperCamelCase , help="Where do you want to store the pre-trained models downloaded from huggingface.co" , )
parser.add_argument(
"--encoder_layerdrop" , type=UpperCamelCase , help="Encoder layer dropout probability (Optional). Goes into model.config" , )
parser.add_argument(
"--decoder_layerdrop" , type=UpperCamelCase , help="Decoder layer dropout probability (Optional). Goes into model.config" , )
parser.add_argument(
"--dropout" , type=UpperCamelCase , help="Dropout probability (Optional). Goes into model.config" , )
parser.add_argument(
"--attention_dropout" , type=UpperCamelCase , help="Attention dropout probability (Optional). Goes into model.config" , )
parser.add_argument("--learning_rate" , default=5E-5 , type=UpperCamelCase , help="The initial learning rate for Adam." )
parser.add_argument(
"--lr_scheduler" , default="linear" , choices=UpperCamelCase , metavar=UpperCamelCase , type=UpperCamelCase , help="Learning rate scheduler" , )
parser.add_argument("--weight_decay" , default=0.0 , type=UpperCamelCase , help="Weight decay if we apply some." )
parser.add_argument("--adam_epsilon" , default=1E-8 , type=UpperCamelCase , help="Epsilon for Adam optimizer." )
parser.add_argument("--warmup_steps" , default=0 , type=UpperCamelCase , help="Linear warmup over warmup_steps." )
parser.add_argument("--num_workers" , default=4 , type=UpperCamelCase , help="kwarg passed to DataLoader" )
parser.add_argument("--num_train_epochs" , dest="max_epochs" , default=3 , type=UpperCamelCase )
parser.add_argument("--train_batch_size" , default=3_2 , type=UpperCamelCase )
parser.add_argument("--eval_batch_size" , default=3_2 , type=UpperCamelCase )
parser.add_argument("--adafactor" , action="store_true" )
class _SCREAMING_SNAKE_CASE (pl.Callback ):
def __snake_case ( self : Dict , UpperCamelCase : Optional[int] , UpperCamelCase : Tuple )->Tuple:
if (
trainer.is_global_zero and trainer.global_rank == 0
): # we initialize the retriever only on master worker with RAY. In new pytorch-lightning accelorators are removed.
pl_module.model.rag.retriever.init_retrieval() # better to use hook functions.
class _SCREAMING_SNAKE_CASE (pl.Callback ):
def __snake_case ( self : List[Any] , UpperCamelCase : Optional[Any] , UpperCamelCase : Optional[Any] )->Tuple:
# print(pl_module.model.rag)
for name, param in pl_module.model.rag.named_parameters():
if param.grad is None:
print(UpperCamelCase )
class _SCREAMING_SNAKE_CASE (pl.Callback ):
def __snake_case ( self : Optional[int] , UpperCamelCase : Dict , UpperCamelCase : Dict )->List[Any]:
__SCREAMING_SNAKE_CASE : Union[str, Any] = trainer.lr_schedulers[0]["scheduler"]
__SCREAMING_SNAKE_CASE : Optional[int] = {F"""lr_group_{i}""": lr for i, lr in enumerate(lr_scheduler.get_lr() )}
pl_module.logger.log_metrics(UpperCamelCase )
def __snake_case ( self : Tuple , UpperCamelCase : pl.Trainer , UpperCamelCase : pl.LightningModule )->Union[str, Any]:
rank_zero_info("***** Validation results *****" )
__SCREAMING_SNAKE_CASE : Optional[Any] = trainer.callback_metrics
# Log results
for key in sorted(UpperCamelCase ):
if key not in ["log", "progress_bar"]:
rank_zero_info("{} = {}\n".format(UpperCamelCase , str(metrics[key] ) ) )
def __snake_case ( self : str , UpperCamelCase : pl.Trainer , UpperCamelCase : pl.LightningModule )->Optional[Any]:
rank_zero_info("***** Test results *****" )
__SCREAMING_SNAKE_CASE : List[Any] = trainer.callback_metrics
# Log and save results to file
__SCREAMING_SNAKE_CASE : List[str] = os.path.join(pl_module.hparams.output_dir , "test_results.txt" )
with open(UpperCamelCase , "w" ) as writer:
for key in sorted(UpperCamelCase ):
if key not in ["log", "progress_bar"]:
rank_zero_info("{} = {}\n".format(UpperCamelCase , str(metrics[key] ) ) )
writer.write("{} = {}\n".format(UpperCamelCase , str(metrics[key] ) ) )
def _lowerCAmelCase ( __lowerCamelCase : List[str] , __lowerCamelCase : int ):
"""simple docstring"""
parser.add_argument(
"--output_dir" , default=str(Path(__lowerCamelCase ).parent / "test_run" / "model_checkpoints" ) , type=__lowerCamelCase , help="The output directory where the model predictions and checkpoints will be written." , )
parser.add_argument(
"--fp16" , action="store_true" , help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit" , )
parser.add_argument(
"--fp16_opt_level" , type=__lowerCamelCase , default="O2" , help=(
"For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
"See details at https://nvidia.github.io/apex/amp.html"
) , )
parser.add_argument("--n_tpu_cores" , dest="tpu_cores" , type=__lowerCamelCase )
parser.add_argument("--max_grad_norm" , dest="gradient_clip_val" , default=1.0 , type=__lowerCamelCase , help="Max gradient norm" )
parser.add_argument("--do_train" , action="store_true" , help="Whether to run training." )
parser.add_argument("--do_predict" , action="store_true" , help="Whether to run predictions on the test set." )
parser.add_argument(
"--gradient_accumulation_steps" , dest="accumulate_grad_batches" , type=__lowerCamelCase , default=1 , help="Number of updates steps to accumulate before performing a backward/update pass." , )
parser.add_argument("--seed" , type=__lowerCamelCase , default=42 , help="random seed for initialization" )
parser.add_argument(
"--data_dir" , default=str(Path(__lowerCamelCase ).parent / "test_run" / "dummy-train-data" ) , type=__lowerCamelCase , help="The input data dir. Should contain the training files for the CoNLL-2003 NER task." , )
def _lowerCAmelCase ( __lowerCamelCase : BaseTransformer , __lowerCamelCase : argparse.Namespace , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : List[Any]=True , __lowerCamelCase : List[Any]=[] , __lowerCamelCase : Any=None , __lowerCamelCase : Optional[int]=None , **__lowerCamelCase : Any , ):
"""simple docstring"""
pl.seed_everything(args.seed )
# init model
__SCREAMING_SNAKE_CASE : List[str] = Path(model.hparams.output_dir )
odir.mkdir(exist_ok=__lowerCamelCase )
# add custom checkpoints
if checkpoint_callback is None:
__SCREAMING_SNAKE_CASE : Tuple = pl.callbacks.ModelCheckpoint(
filepath=args.output_dir , prefix="checkpoint" , monitor="val_loss" , mode="min" , save_top_k=1 )
if early_stopping_callback:
extra_callbacks.append(__lowerCamelCase )
if logging_callback is None:
__SCREAMING_SNAKE_CASE : List[str] = LoggingCallback()
__SCREAMING_SNAKE_CASE : str = {}
if args.fpaa:
__SCREAMING_SNAKE_CASE : Any = 16
if args.gpus > 1:
__SCREAMING_SNAKE_CASE : List[Any] = "auto"
__SCREAMING_SNAKE_CASE : List[Any] = "ddp"
__SCREAMING_SNAKE_CASE : List[str] = args.accumulate_grad_batches
__SCREAMING_SNAKE_CASE : List[str] = None
__SCREAMING_SNAKE_CASE : Optional[int] = "auto"
__SCREAMING_SNAKE_CASE : Optional[int] = pl.Trainer.from_argparse_args(
__lowerCamelCase , weights_summary=__lowerCamelCase , callbacks=[logging_callback] + extra_callbacks + [InitCallback()] + [checkpoint_callback] , logger=__lowerCamelCase , val_check_interval=1 , num_sanity_val_steps=2 , **__lowerCamelCase , )
if args.do_train:
trainer.fit(__lowerCamelCase )
else:
print("RAG modeling tests with new set functions successfuly executed!" )
return trainer
| 447
| 0
|
'''simple docstring'''
from __future__ import annotations
from math import ceil, floor, sqrt
def __a ( lowerCAmelCase__ : int = 2000000 ):
a__ : list[int] = [0]
a__ : int
for idx in range(1 , ceil(sqrt(target * 2 ) * 1.1 ) ):
triangle_numbers.append(triangle_numbers[-1] + idx )
# we want this to be as close as possible to target
a__ : int = 0
# the area corresponding to the grid that gives the product closest to target
a__ : int = 0
# an estimate of b, using the quadratic formula
a__ : float
# the largest integer less than b_estimate
a__ : int
# the largest integer less than b_estimate
a__ : int
# the triangle number corresponding to b_floor
a__ : int
# the triangle number corresponding to b_ceil
a__ : int
for idx_a, triangle_a in enumerate(triangle_numbers[1:] , 1 ):
a__ : Any = (-1 + sqrt(1 + 8 * target / triangle_a )) / 2
a__ : Any = floor(lowerCAmelCase__ )
a__ : Optional[int] = ceil(lowerCAmelCase__ )
a__ : List[str] = triangle_numbers[b_floor]
a__ : Dict = triangle_numbers[b_ceil]
if abs(target - triangle_b_first_guess * triangle_a ) < abs(
target - best_product ):
a__ : Tuple = triangle_b_first_guess * triangle_a
a__ : str = idx_a * b_floor
if abs(target - triangle_b_second_guess * triangle_a ) < abs(
target - best_product ):
a__ : Optional[int] = triangle_b_second_guess * triangle_a
a__ : Dict = idx_a * b_ceil
return area
if __name__ == "__main__":
print(f'{solution() = }')
| 688
|
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_fnet import FNetTokenizer
else:
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
__SCREAMING_SNAKE_CASE = {
'vocab_file': {
'google/fnet-base': 'https://huggingface.co/google/fnet-base/resolve/main/spiece.model',
'google/fnet-large': 'https://huggingface.co/google/fnet-large/resolve/main/spiece.model',
},
'tokenizer_file': {
'google/fnet-base': 'https://huggingface.co/google/fnet-base/resolve/main/tokenizer.json',
'google/fnet-large': 'https://huggingface.co/google/fnet-large/resolve/main/tokenizer.json',
},
}
__SCREAMING_SNAKE_CASE = {
'google/fnet-base': 5_1_2,
'google/fnet-large': 5_1_2,
}
__SCREAMING_SNAKE_CASE = '▁'
class lowerCAmelCase__ ( lowerCAmelCase_ ):
"""simple docstring"""
__UpperCamelCase = VOCAB_FILES_NAMES
__UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase = ["input_ids", "token_type_ids"]
__UpperCamelCase = FNetTokenizer
def __init__( self : Any , A__ : Any=None , A__ : int=None , A__ : List[str]=False , A__ : int=True , A__ : str=True , A__ : List[Any]="<unk>" , A__ : Dict="[SEP]" , A__ : List[str]="<pad>" , A__ : Union[str, Any]="[CLS]" , A__ : Dict="[MASK]" , **A__ : Tuple , ) -> List[str]:
'''simple docstring'''
a__ : Optional[int] = (
AddedToken(A__ , lstrip=A__ , rstrip=A__ , normalized=A__ )
if isinstance(A__ , A__ )
else mask_token
)
super().__init__(
A__ , tokenizer_file=A__ , do_lower_case=A__ , remove_space=A__ , keep_accents=A__ , unk_token=A__ , sep_token=A__ , pad_token=A__ , cls_token=A__ , mask_token=A__ , **A__ , )
a__ : Optional[Any] = do_lower_case
a__ : Dict = remove_space
a__ : List[Any] = keep_accents
a__ : Optional[Any] = vocab_file
a__ : Any = False if not self.vocab_file else True
def __lowerCAmelCase ( self : str , A__ : List[int] , A__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
a__ : Optional[int] = [self.sep_token_id]
a__ : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def __lowerCAmelCase ( self : List[Any] , A__ : List[int] , A__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
a__ : Dict = [self.sep_token_id]
a__ : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __lowerCAmelCase ( self : Tuple , A__ : str , A__ : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(A__ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
a__ : Union[str, Any] = os.path.join(
A__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A__ ):
copyfile(self.vocab_file , A__ )
return (out_vocab_file,)
| 688
| 1
|
"""simple docstring"""
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
a__ : Optional[int] = logging.get_logger(__name__)
a__ : List[str] = {
'''EleutherAI/gpt-j-6B''': '''https://huggingface.co/EleutherAI/gpt-j-6B/resolve/main/config.json''',
# See all GPT-J models at https://huggingface.co/models?filter=gpt_j
}
class UpperCamelCase_ ( UpperCamelCase):
snake_case__ : str = "gptj"
snake_case__ : List[Any] = {
"max_position_embeddings": "n_positions",
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self : Any , UpperCAmelCase__ : List[Any]=5_0_4_0_0 , UpperCAmelCase__ : Tuple=2_0_4_8 , UpperCAmelCase__ : Any=4_0_9_6 , UpperCAmelCase__ : List[str]=2_8 , UpperCAmelCase__ : Optional[int]=1_6 , UpperCAmelCase__ : Any=6_4 , UpperCAmelCase__ : Any=None , UpperCAmelCase__ : List[Any]="gelu_new" , UpperCAmelCase__ : Any=0.0 , UpperCAmelCase__ : int=0.0 , UpperCAmelCase__ : Tuple=0.0 , UpperCAmelCase__ : List[str]=1E-5 , UpperCAmelCase__ : int=0.02 , UpperCAmelCase__ : Any=True , UpperCAmelCase__ : str=5_0_2_5_6 , UpperCAmelCase__ : Dict=5_0_2_5_6 , UpperCAmelCase__ : List[str]=False , **UpperCAmelCase__ : Tuple , ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE = vocab_size
__SCREAMING_SNAKE_CASE = n_positions
__SCREAMING_SNAKE_CASE = n_embd
__SCREAMING_SNAKE_CASE = n_layer
__SCREAMING_SNAKE_CASE = n_head
__SCREAMING_SNAKE_CASE = n_inner
__SCREAMING_SNAKE_CASE = rotary_dim
__SCREAMING_SNAKE_CASE = activation_function
__SCREAMING_SNAKE_CASE = resid_pdrop
__SCREAMING_SNAKE_CASE = embd_pdrop
__SCREAMING_SNAKE_CASE = attn_pdrop
__SCREAMING_SNAKE_CASE = layer_norm_epsilon
__SCREAMING_SNAKE_CASE = initializer_range
__SCREAMING_SNAKE_CASE = use_cache
__SCREAMING_SNAKE_CASE = bos_token_id
__SCREAMING_SNAKE_CASE = eos_token_id
super().__init__(
bos_token_id=__A , eos_token_id=__A , tie_word_embeddings=__A , **__A )
class UpperCamelCase_ ( UpperCamelCase):
def __init__( self : List[str] , UpperCAmelCase__ : PretrainedConfig , UpperCAmelCase__ : str = "default" , UpperCAmelCase__ : List[PatchingSpec] = None , UpperCAmelCase__ : bool = False , ) -> List[str]:
super().__init__(__A , task=__A , patching_specs=__A , use_past=__A )
if not getattr(self._config , "pad_token_id" , __A ):
# TODO: how to do that better?
__SCREAMING_SNAKE_CASE = 0
@property
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Mapping[str, Mapping[int, str]]:
__SCREAMING_SNAKE_CASE = OrderedDict({"input_ids": {0: "batch", 1: "sequence"}} )
if self.use_past:
self.fill_with_past_key_values_(__A , direction="inputs" )
__SCREAMING_SNAKE_CASE = {0: "batch", 1: "past_sequence + sequence"}
else:
__SCREAMING_SNAKE_CASE = {0: "batch", 1: "sequence"}
return common_inputs
@property
def UpperCAmelCase_ ( self : Any ) -> int:
return self._config.n_layer
@property
def UpperCAmelCase_ ( self : Any ) -> int:
return self._config.n_head
def UpperCAmelCase_ ( self : List[Any] , UpperCAmelCase__ : PreTrainedTokenizer , UpperCAmelCase__ : int = -1 , UpperCAmelCase__ : int = -1 , UpperCAmelCase__ : bool = False , UpperCAmelCase__ : Optional[TensorType] = None , ) -> Mapping[str, Any]:
__SCREAMING_SNAKE_CASE = super(__A , self ).generate_dummy_inputs(
__A , batch_size=__A , seq_length=__A , is_pair=__A , framework=__A )
# We need to order the input in the way they appears in the forward()
__SCREAMING_SNAKE_CASE = OrderedDict({"input_ids": common_inputs["input_ids"]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
__SCREAMING_SNAKE_CASE = seqlen + 2
__SCREAMING_SNAKE_CASE = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
__SCREAMING_SNAKE_CASE = [
(torch.zeros(__A ), torch.zeros(__A )) for _ in range(self.num_layers )
]
__SCREAMING_SNAKE_CASE = common_inputs["attention_mask"]
if self.use_past:
__SCREAMING_SNAKE_CASE = ordered_inputs["attention_mask"].dtype
__SCREAMING_SNAKE_CASE = torch.cat(
[ordered_inputs["attention_mask"], torch.ones(__A , __A , dtype=__A )] , dim=1 )
return ordered_inputs
@property
def UpperCAmelCase_ ( self : int ) -> int:
return 1_3
| 718
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
a__ : List[str] = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : int = ['''MLukeTokenizer''']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mluke import MLukeTokenizer
else:
import sys
a__ : Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 553
| 0
|
'''simple docstring'''
from __future__ import annotations
__UpperCAmelCase = list[list[int]]
# assigning initial values to the grid
__UpperCAmelCase = [
[3, 0, 6, 5, 0, 8, 4, 0, 0],
[5, 2, 0, 0, 0, 0, 0, 0, 0],
[0, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
# a grid with no solution
__UpperCAmelCase = [
[5, 0, 6, 5, 0, 8, 4, 0, 3],
[5, 2, 0, 0, 0, 0, 0, 0, 2],
[1, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
def _snake_case ( A , A , A , A ) -> bool:
for i in range(9 ):
if grid[row][i] == n or grid[i][column] == n:
return False
for i in range(3 ):
for j in range(3 ):
if grid[(row - row % 3) + i][(column - column % 3) + j] == n:
return False
return True
def _snake_case ( A ) -> tuple[int, int] | None:
for i in range(9 ):
for j in range(9 ):
if grid[i][j] == 0:
return i, j
return None
def _snake_case ( A ) -> Matrix | None:
if location := find_empty_location(A ):
lowerCAmelCase__ , lowerCAmelCase__ = location
else:
# If the location is ``None``, then the grid is solved.
return grid
for digit in range(1 , 10 ):
if is_safe(A , A , A , A ):
lowerCAmelCase__ = digit
if sudoku(A ) is not None:
return grid
lowerCAmelCase__ = 0
return None
def _snake_case ( A ) -> None:
for row in grid:
for cell in row:
print(A , end=''' ''' )
print()
if __name__ == "__main__":
# make a copy of grid so that you can compare with the unmodified grid
for example_grid in (initial_grid, no_solution):
print('''\nExample grid:\n''' + '''=''' * 20)
print_solution(example_grid)
print('''\nExample grid solution:''')
__UpperCAmelCase = sudoku(example_grid)
if solution is not None:
print_solution(solution)
else:
print('''Cannot find a solution.''')
| 90
|
"""simple docstring"""
from __future__ import annotations
import os
from collections.abc import Mapping
__UpperCamelCase : Optional[Any] = tuple[int, int]
class a :
def __init__( self , _snake_case , _snake_case ):
"""simple docstring"""
lowerCAmelCase = vertices
lowerCAmelCase = {
(min(_snake_case ), max(_snake_case )): weight for edge, weight in edges.items()
}
def UpperCamelCase__ ( self , _snake_case , _snake_case ):
"""simple docstring"""
self.vertices.add(edge[0] )
self.vertices.add(edge[1] )
lowerCAmelCase = weight
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = Graph({min(self.vertices )} , {} )
lowerCAmelCase = 42
lowerCAmelCase = 42
lowerCAmelCase = 42
lowerCAmelCase = 42
while len(subgraph.vertices ) < len(self.vertices ):
lowerCAmelCase = max(self.edges.values() ) + 1
for edge, weight in self.edges.items():
if (edge[0] in subgraph.vertices) ^ (edge[1] in subgraph.vertices):
if weight < min_weight:
lowerCAmelCase = edge
lowerCAmelCase = weight
subgraph.add_edge(_snake_case , _snake_case )
return subgraph
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str = "p107_network.txt" ):
lowerCAmelCase = os.path.abspath(os.path.dirname(_UpperCAmelCase ) )
lowerCAmelCase = os.path.join(_UpperCAmelCase , _UpperCAmelCase )
lowerCAmelCase = {}
lowerCAmelCase = 42
lowerCAmelCase = 42
lowerCAmelCase = 42
with open(_UpperCAmelCase ) as f:
lowerCAmelCase = f.read().strip().split('\n' )
lowerCAmelCase = [line.split(',' ) for line in data]
for edgea in range(1 , len(_UpperCAmelCase ) ):
for edgea in range(_UpperCAmelCase ):
if adjaceny_matrix[edgea][edgea] != "-":
lowerCAmelCase = int(adjaceny_matrix[edgea][edgea] )
lowerCAmelCase = Graph(set(range(len(_UpperCAmelCase ) ) ) , _UpperCAmelCase )
lowerCAmelCase = graph.prims_algorithm()
lowerCAmelCase = sum(graph.edges.values() )
lowerCAmelCase = sum(subgraph.edges.values() )
return initial_total - optimal_total
if __name__ == "__main__":
print(f'''{solution() = }''')
| 4
| 0
|
"""simple docstring"""
from math import isqrt
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
_UpperCAmelCase = [True] * max_number
for i in range(2 ,isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 ,lowercase ,lowercase ):
_UpperCAmelCase = False
return [i for i in range(2 ,lowercase ) if is_prime[i]]
def __UpperCAmelCase ( lowercase = 10**8 ):
"""simple docstring"""
_UpperCAmelCase = calculate_prime_numbers(max_number // 2 )
_UpperCAmelCase = 0
_UpperCAmelCase = 0
_UpperCAmelCase = len(lowercase ) - 1
while left <= right:
while prime_numbers[left] * prime_numbers[right] >= max_number:
right -= 1
semiprimes_count += right - left + 1
left += 1
return semiprimes_count
if __name__ == "__main__":
print(F'''{solution() = }''')
| 275
|
"""simple docstring"""
# Lint as: python3
import itertools
import os
import re
UpperCAmelCase__ = re.compile(r"""([A-Z]+)([A-Z][a-z])""")
UpperCAmelCase__ = re.compile(r"""([a-z\d])([A-Z])""")
UpperCAmelCase__ = re.compile(r"""(?<!_)_(?!_)""")
UpperCAmelCase__ = re.compile(r"""(_{2,})""")
UpperCAmelCase__ = r"""^\w+(\.\w+)*$"""
UpperCAmelCase__ = r"""<>:/\|?*"""
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
_UpperCAmelCase = _uppercase_uppercase_re.sub(R"""\1_\2""" ,lowercase )
_UpperCAmelCase = _lowercase_uppercase_re.sub(R"""\1_\2""" ,lowercase )
return name.lower()
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
_UpperCAmelCase = _single_underscore_re.split(lowercase )
_UpperCAmelCase = [_multiple_underscores_re.split(lowercase ) for n in name]
return "".join(n.capitalize() for n in itertools.chain.from_iterable(lowercase ) if n != """""" )
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
if os.path.basename(lowercase ) != name:
raise ValueError(f'''Should be a dataset name, not a path: {name}''' )
return camelcase_to_snakecase(lowercase )
def __UpperCAmelCase ( lowercase ,lowercase ):
"""simple docstring"""
if os.path.basename(lowercase ) != name:
raise ValueError(f'''Should be a dataset name, not a path: {name}''' )
if not re.match(_split_re ,lowercase ):
raise ValueError(f'''Split name should match \'{_split_re}\'\' but got \'{split}\'.''' )
return f'''{filename_prefix_for_name(lowercase )}-{split}'''
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ,lowercase=None ):
"""simple docstring"""
_UpperCAmelCase = filename_prefix_for_split(lowercase ,lowercase )
if filetype_suffix:
prefix += f'''.{filetype_suffix}'''
_UpperCAmelCase = os.path.join(lowercase ,lowercase )
return f'''{filepath}*'''
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ,lowercase=None ,lowercase=None ):
"""simple docstring"""
_UpperCAmelCase = filename_prefix_for_split(lowercase ,lowercase )
_UpperCAmelCase = os.path.join(lowercase ,lowercase )
if shard_lengths:
_UpperCAmelCase = len(lowercase )
_UpperCAmelCase = [f'''{prefix}-{shard_id:05d}-of-{num_shards:05d}''' for shard_id in range(lowercase )]
if filetype_suffix:
_UpperCAmelCase = [filename + f'''.{filetype_suffix}''' for filename in filenames]
return filenames
else:
_UpperCAmelCase = prefix
if filetype_suffix:
filename += f'''.{filetype_suffix}'''
return [filename]
| 275
| 1
|
from maths.prime_check import is_prime
def __UpperCAmelCase ( lowerCamelCase_ : int ) -> int:
"""simple docstring"""
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
SCREAMING_SNAKE_CASE_ : Dict = F'Input value of [number={number}] must be an integer'
raise TypeError(lowerCamelCase_ )
if is_prime(lowerCamelCase_ ) and is_prime(number + 2 ):
return number + 2
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 105
|
'''simple docstring'''
import json
import logging
import os
import sys
from time import time
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, require_torch_tpu
logging.basicConfig(level=logging.DEBUG)
_lowerCAmelCase = logging.getLogger()
def _lowerCAmelCase ( lowercase : Any ) ->int:
"""simple docstring"""
lowercase__ = {}
lowercase__ = os.path.join(lowercase , '''all_results.json''' )
if os.path.exists(lowercase ):
with open(lowercase , '''r''' ) as f:
lowercase__ = json.load(lowercase )
else:
raise ValueError(F'''can\'t find {path}''' )
return results
_lowerCAmelCase = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
@require_torch_tpu
class __A ( a ):
"""simple docstring"""
def snake_case_( self )-> List[str]:
import xla_spawn
lowercase__ = self.get_auto_remove_tmp_dir()
lowercase__ = f'''
./examples/pytorch/text-classification/run_glue.py
--num_cores=8
./examples/pytorch/text-classification/run_glue.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--overwrite_output_dir
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--do_train
--do_eval
--debug tpu_metrics_debug
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--max_steps=10
--warmup_steps=2
--seed=42
--max_seq_length=128
'''.split()
with patch.object(_lowerCamelCase , '''argv''' , _lowerCamelCase ):
lowercase__ = time()
xla_spawn.main()
lowercase__ = time()
lowercase__ = get_results(_lowerCamelCase )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.7_5 )
# Assert that the script takes less than 500 seconds to make sure it doesn't hang.
self.assertLess(end - start , 5_0_0 )
def snake_case_( self )-> Tuple:
import xla_spawn
lowercase__ = '''
./tests/test_trainer_tpu.py
--num_cores=8
./tests/test_trainer_tpu.py
'''.split()
with patch.object(_lowerCamelCase , '''argv''' , _lowerCamelCase ):
xla_spawn.main()
| 161
| 0
|
import os
import string
import sys
lowercase : Optional[Any] = 1 << 8
lowercase : int = {
"""tab""": ord("""\t"""),
"""newline""": ord("""\r"""),
"""esc""": 2_7,
"""up""": 6_5 + ARROW_KEY_FLAG,
"""down""": 6_6 + ARROW_KEY_FLAG,
"""right""": 6_7 + ARROW_KEY_FLAG,
"""left""": 6_8 + ARROW_KEY_FLAG,
"""mod_int""": 9_1,
"""undefined""": sys.maxsize,
"""interrupt""": 3,
"""insert""": 5_0,
"""delete""": 5_1,
"""pg_up""": 5_3,
"""pg_down""": 5_4,
}
lowercase : Optional[Any] = KEYMAP["""up"""]
lowercase : Optional[int] = KEYMAP["""left"""]
if sys.platform == "win32":
lowercase : Dict = []
lowercase : int = {
b"""\xe0H""": KEYMAP["""up"""] - ARROW_KEY_FLAG,
b"""\x00H""": KEYMAP["""up"""] - ARROW_KEY_FLAG,
b"""\xe0P""": KEYMAP["""down"""] - ARROW_KEY_FLAG,
b"""\x00P""": KEYMAP["""down"""] - ARROW_KEY_FLAG,
b"""\xe0M""": KEYMAP["""right"""] - ARROW_KEY_FLAG,
b"""\x00M""": KEYMAP["""right"""] - ARROW_KEY_FLAG,
b"""\xe0K""": KEYMAP["""left"""] - ARROW_KEY_FLAG,
b"""\x00K""": KEYMAP["""left"""] - ARROW_KEY_FLAG,
}
for i in range(1_0):
lowercase : List[str] = ord(str(i))
def UpperCAmelCase_ ( ):
if os.name == "nt":
import msvcrt
lowerCamelCase_: Tuple = """mbcs"""
# Flush the keyboard buffer
while msvcrt.kbhit():
msvcrt.getch()
if len(_UpperCAmelCase ) == 0:
# Read the keystroke
lowerCamelCase_: str = msvcrt.getch()
# If it is a prefix char, get second part
if ch in (b"\x00", b"\xe0"):
lowerCamelCase_: Any = ch + msvcrt.getch()
# Translate actual Win chars to bullet char types
try:
lowerCamelCase_: Union[str, Any] = chr(WIN_KEYMAP[cha] )
WIN_CH_BUFFER.append(chr(KEYMAP["""mod_int"""] ) )
WIN_CH_BUFFER.append(_UpperCAmelCase )
if ord(_UpperCAmelCase ) in (
KEYMAP["insert"] - 1 << 9,
KEYMAP["delete"] - 1 << 9,
KEYMAP["pg_up"] - 1 << 9,
KEYMAP["pg_down"] - 1 << 9,
):
WIN_CH_BUFFER.append(chr(1_2_6 ) )
lowerCamelCase_: Optional[Any] = chr(KEYMAP["""esc"""] )
except KeyError:
lowerCamelCase_: Dict = cha[1]
else:
lowerCamelCase_: Union[str, Any] = ch.decode(_UpperCAmelCase )
else:
lowerCamelCase_: List[Any] = WIN_CH_BUFFER.pop(0 )
elif os.name == "posix":
import termios
import tty
lowerCamelCase_: int = sys.stdin.fileno()
lowerCamelCase_: Tuple = termios.tcgetattr(_UpperCAmelCase )
try:
tty.setraw(_UpperCAmelCase )
lowerCamelCase_: Optional[Any] = sys.stdin.read(1 )
finally:
termios.tcsetattr(_UpperCAmelCase , termios.TCSADRAIN , _UpperCAmelCase )
return ch
def UpperCAmelCase_ ( ):
lowerCamelCase_: Union[str, Any] = get_raw_chars()
if ord(_UpperCAmelCase ) in [KEYMAP["interrupt"], KEYMAP["newline"]]:
return char
elif ord(_UpperCAmelCase ) == KEYMAP["esc"]:
lowerCamelCase_: Dict = get_raw_chars()
if ord(_UpperCAmelCase ) == KEYMAP["mod_int"]:
lowerCamelCase_: Optional[Any] = get_raw_chars()
if ord(_UpperCAmelCase ) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(_UpperCAmelCase ) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG:
return chr(ord(_UpperCAmelCase ) + ARROW_KEY_FLAG )
else:
return KEYMAP["undefined"]
else:
return get_raw_chars()
else:
if char in string.printable:
return char
else:
return KEYMAP["undefined"]
| 584
|
from ..utils import is_flax_available, is_torch_available
if is_torch_available():
from .autoencoder_kl import AutoencoderKL
from .controlnet import ControlNetModel
from .dual_transformer_ad import DualTransformeraDModel
from .modeling_utils import ModelMixin
from .prior_transformer import PriorTransformer
from .ta_film_transformer import TaFilmDecoder
from .transformer_ad import TransformeraDModel
from .unet_ad import UNetaDModel
from .unet_ad import UNetaDModel
from .unet_ad_condition import UNetaDConditionModel
from .unet_ad_condition import UNetaDConditionModel
from .vq_model import VQModel
if is_flax_available():
from .controlnet_flax import FlaxControlNetModel
from .unet_ad_condition_flax import FlaxUNetaDConditionModel
from .vae_flax import FlaxAutoencoderKL
| 584
| 1
|
"""simple docstring"""
import heapq
def snake_case_ ( A_ : dict ):
'''simple docstring'''
_lowerCamelCase : list[list] = []
# for each node and his adjacency list add them and the rank of the node to queue
# using heapq module the queue will be filled like a Priority Queue
# heapq works with a min priority queue, so I used -1*len(v) to build it
for key, value in graph.items():
# O(log(n))
heapq.heappush(A_, [-1 * len(A_ ), (key, value)] )
# chosen_vertices = set of chosen vertices
_lowerCamelCase : str = set()
# while queue isn't empty and there are still edges
# (queue[0][0] is the rank of the node with max rank)
while queue and queue[0][0] != 0:
# extract vertex with max rank from queue and add it to chosen_vertices
_lowerCamelCase : Dict = heapq.heappop(A_ )[1][0]
chosen_vertices.add(A_ )
# Remove all arcs adjacent to argmax
for elem in queue:
# if v haven't adjacent node, skip
if elem[0] == 0:
continue
# if argmax is reachable from elem
# remove argmax from elem's adjacent list and update his rank
if argmax in elem[1][1]:
_lowerCamelCase : List[str] = elem[1][1].index(A_ )
del elem[1][1][index]
elem[0] += 1
# re-order the queue
heapq.heapify(A_ )
return chosen_vertices
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase__ = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
print(F"""Minimum vertex cover:\n{greedy_min_vertex_cover(graph)}""")
| 83
|
"""simple docstring"""
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def snake_case_ ( A_ : Tuple, A_ : List[str], A_ : Optional[Any], A_ : Dict, A_ : Dict=True, A_ : int="pt" ):
'''simple docstring'''
_lowerCamelCase : str = {'''add_prefix_space''': True} if isinstance(A_, A_ ) and not line.startswith(''' ''' ) else {}
_lowerCamelCase : Union[str, Any] = padding_side
return tokenizer(
[line], max_length=A_, padding='''max_length''' if pad_to_max_length else None, truncation=A_, return_tensors=A_, add_special_tokens=A_, **A_, )
def snake_case_ ( A_ : Any, A_ : Optional[int], A_ : List[Any]=None, ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = input_ids.ne(A_ ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class __snake_case ( _lowercase):
def __init__( self : Tuple , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Tuple="train" , __lowerCAmelCase : List[str]=None , __lowerCAmelCase : Optional[int]=None , __lowerCAmelCase : Any=None , __lowerCAmelCase : Union[str, Any]="" , ):
"""simple docstring"""
super().__init__()
_lowerCamelCase : Optional[int] = Path(__lowerCAmelCase ).joinpath(type_path + '''.source''' )
_lowerCamelCase : List[str] = Path(__lowerCAmelCase ).joinpath(type_path + '''.target''' )
_lowerCamelCase : List[Any] = self.get_char_lens(self.src_file )
_lowerCamelCase : Optional[int] = max_source_length
_lowerCamelCase : Optional[Any] = max_target_length
assert min(self.src_lens ) > 0, f'''found empty line in {self.src_file}'''
_lowerCamelCase : List[Any] = tokenizer
_lowerCamelCase : List[Any] = prefix
if n_obs is not None:
_lowerCamelCase : List[str] = self.src_lens[:n_obs]
_lowerCamelCase : int = src_lang
_lowerCamelCase : Union[str, Any] = tgt_lang
def __len__( self : int ):
"""simple docstring"""
return len(self.src_lens )
def __getitem__( self : Dict , __lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : str = index + 1 # linecache starts at 1
_lowerCamelCase : Union[str, Any] = self.prefix + linecache.getline(str(self.src_file ) , __lowerCAmelCase ).rstrip('''\n''' )
_lowerCamelCase : Optional[Any] = linecache.getline(str(self.tgt_file ) , __lowerCAmelCase ).rstrip('''\n''' )
assert source_line, f'''empty source line for index {index}'''
assert tgt_line, f'''empty tgt line for index {index}'''
# Need to add eos token manually for T5
if isinstance(self.tokenizer , __lowerCAmelCase ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
_lowerCamelCase : Optional[int] = (
self.tokenizer.question_encoder if isinstance(self.tokenizer , __lowerCAmelCase ) else self.tokenizer
)
_lowerCamelCase : Union[str, Any] = self.tokenizer.generator if isinstance(self.tokenizer , __lowerCAmelCase ) else self.tokenizer
_lowerCamelCase : List[str] = encode_line(__lowerCAmelCase , __lowerCAmelCase , self.max_source_length , '''right''' )
_lowerCamelCase : List[str] = encode_line(__lowerCAmelCase , __lowerCAmelCase , self.max_target_length , '''right''' )
_lowerCamelCase : Optional[Any] = source_inputs['''input_ids'''].squeeze()
_lowerCamelCase : Union[str, Any] = target_inputs['''input_ids'''].squeeze()
_lowerCamelCase : Any = source_inputs['''attention_mask'''].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase : str ):
"""simple docstring"""
return [len(__lowerCAmelCase ) for x in Path(__lowerCAmelCase ).open().readlines()]
def SCREAMING_SNAKE_CASE ( self : List[Any] , __lowerCAmelCase : Any ):
"""simple docstring"""
_lowerCamelCase : List[Any] = torch.stack([x['''input_ids'''] for x in batch] )
_lowerCamelCase : Tuple = torch.stack([x['''attention_mask'''] for x in batch] )
_lowerCamelCase : Union[str, Any] = torch.stack([x['''decoder_input_ids'''] for x in batch] )
_lowerCamelCase : Tuple = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer , __lowerCAmelCase )
else self.tokenizer.pad_token_id
)
_lowerCamelCase : Tuple = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer , __lowerCAmelCase )
else self.tokenizer.pad_token_id
)
_lowerCamelCase : Union[str, Any] = trim_batch(__lowerCAmelCase , __lowerCAmelCase )
_lowerCamelCase , _lowerCamelCase : List[str] = trim_batch(__lowerCAmelCase , __lowerCAmelCase , attention_mask=__lowerCAmelCase )
_lowerCamelCase : Optional[int] = {
'''input_ids''': source_ids,
'''attention_mask''': source_mask,
'''decoder_input_ids''': y,
}
return batch
lowerCAmelCase__ = getLogger(__name__)
def snake_case_ ( A_ : List[List] ):
'''simple docstring'''
return list(itertools.chain.from_iterable(A_ ) )
def snake_case_ ( A_ : str ):
'''simple docstring'''
_lowerCamelCase : Dict = get_git_info()
save_json(A_, os.path.join(A_, '''git_log.json''' ) )
def snake_case_ ( A_ : str, A_ : Union[str, Any], A_ : int=4, **A_ : Optional[int] ):
'''simple docstring'''
with open(A_, '''w''' ) as f:
json.dump(A_, A_, indent=A_, **A_ )
def snake_case_ ( A_ : Any ):
'''simple docstring'''
with open(A_ ) as f:
return json.load(A_ )
def snake_case_ ( ):
'''simple docstring'''
_lowerCamelCase : List[str] = git.Repo(search_parent_directories=A_ )
_lowerCamelCase : str = {
'''repo_id''': str(A_ ),
'''repo_sha''': str(repo.head.object.hexsha ),
'''repo_branch''': str(repo.active_branch ),
'''hostname''': str(socket.gethostname() ),
}
return repo_infos
def snake_case_ ( A_ : Callable, A_ : Iterable ):
'''simple docstring'''
return list(map(A_, A_ ) )
def snake_case_ ( A_ : str, A_ : Tuple ):
'''simple docstring'''
with open(A_, '''wb''' ) as f:
return pickle.dump(A_, A_ )
def snake_case_ ( A_ : List[str] ):
'''simple docstring'''
def remove_articles(A_ : str ):
return re.sub(R'''\b(a|an|the)\b''', ''' ''', A_ )
def white_space_fix(A_ : Any ):
return " ".join(text.split() )
def remove_punc(A_ : List[Any] ):
_lowerCamelCase : Any = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(A_ : Optional[int] ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(A_ ) ) ) )
def snake_case_ ( A_ : int, A_ : List[Any] ):
'''simple docstring'''
_lowerCamelCase : str = normalize_answer(A_ ).split()
_lowerCamelCase : int = normalize_answer(A_ ).split()
_lowerCamelCase : str = Counter(A_ ) & Counter(A_ )
_lowerCamelCase : Any = sum(common.values() )
if num_same == 0:
return 0
_lowerCamelCase : int = 1.0 * num_same / len(A_ )
_lowerCamelCase : str = 1.0 * num_same / len(A_ )
_lowerCamelCase : List[Any] = (2 * precision * recall) / (precision + recall)
return fa
def snake_case_ ( A_ : Dict, A_ : str ):
'''simple docstring'''
return normalize_answer(A_ ) == normalize_answer(A_ )
def snake_case_ ( A_ : List[str], A_ : List[str] ):
'''simple docstring'''
assert len(A_ ) == len(A_ )
_lowerCamelCase : Optional[Any] = 0
for hypo, pred in zip(A_, A_ ):
em += exact_match_score(A_, A_ )
if len(A_ ) > 0:
em /= len(A_ )
return {"em": em}
def snake_case_ ( A_ : Optional[int] ):
'''simple docstring'''
return model_prefix.startswith('''rag''' )
def snake_case_ ( A_ : Dict, A_ : int, A_ : List[Any] ):
'''simple docstring'''
_lowerCamelCase : Dict = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
_lowerCamelCase : Tuple = '''dropout_rate'''
for p in extra_params:
if getattr(A_, A_, A_ ):
if not hasattr(A_, A_ ) and not hasattr(A_, equivalent_param[p] ):
logger.info('''config doesn\'t have a `{}` attribute'''.format(A_ ) )
delattr(A_, A_ )
continue
_lowerCamelCase : Union[str, Any] = p if hasattr(A_, A_ ) else equivalent_param[p]
setattr(A_, A_, getattr(A_, A_ ) )
delattr(A_, A_ )
return hparams, config
| 83
| 1
|
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import TensorType, logging
if TYPE_CHECKING:
from ...onnx.config import PatchingSpec
from ...tokenization_utils_base import PreTrainedTokenizerBase
lowercase__ : Dict = logging.get_logger(__name__)
lowercase__ : Any = {
"""allenai/longformer-base-4096""": """https://huggingface.co/allenai/longformer-base-4096/resolve/main/config.json""",
"""allenai/longformer-large-4096""": """https://huggingface.co/allenai/longformer-large-4096/resolve/main/config.json""",
"""allenai/longformer-large-4096-finetuned-triviaqa""": (
"""https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/config.json"""
),
"""allenai/longformer-base-4096-extra.pos.embd.only""": (
"""https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/config.json"""
),
"""allenai/longformer-large-4096-extra.pos.embd.only""": (
"""https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/config.json"""
),
}
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = """longformer"""
def __init__( self : str , SCREAMING_SNAKE_CASE_ : Union[List[int], int] = 5_1_2 , SCREAMING_SNAKE_CASE_ : int = 2 , SCREAMING_SNAKE_CASE_ : int = 1 , SCREAMING_SNAKE_CASE_ : int = 0 , SCREAMING_SNAKE_CASE_ : int = 2 , SCREAMING_SNAKE_CASE_ : int = 3_0_5_2_2 , SCREAMING_SNAKE_CASE_ : int = 7_6_8 , SCREAMING_SNAKE_CASE_ : int = 1_2 , SCREAMING_SNAKE_CASE_ : int = 1_2 , SCREAMING_SNAKE_CASE_ : int = 3_0_7_2 , SCREAMING_SNAKE_CASE_ : str = "gelu" , SCREAMING_SNAKE_CASE_ : float = 0.1 , SCREAMING_SNAKE_CASE_ : float = 0.1 , SCREAMING_SNAKE_CASE_ : int = 5_1_2 , SCREAMING_SNAKE_CASE_ : int = 2 , SCREAMING_SNAKE_CASE_ : float = 0.02 , SCREAMING_SNAKE_CASE_ : float = 1E-12 , SCREAMING_SNAKE_CASE_ : bool = False , **SCREAMING_SNAKE_CASE_ : Tuple , ):
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Optional[Any] = attention_window
lowerCAmelCase_ : Union[str, Any] = sep_token_id
lowerCAmelCase_ : Union[str, Any] = bos_token_id
lowerCAmelCase_ : List[str] = eos_token_id
lowerCAmelCase_ : int = vocab_size
lowerCAmelCase_ : Dict = hidden_size
lowerCAmelCase_ : int = num_hidden_layers
lowerCAmelCase_ : Tuple = num_attention_heads
lowerCAmelCase_ : Any = hidden_act
lowerCAmelCase_ : Optional[int] = intermediate_size
lowerCAmelCase_ : Any = hidden_dropout_prob
lowerCAmelCase_ : List[str] = attention_probs_dropout_prob
lowerCAmelCase_ : Union[str, Any] = max_position_embeddings
lowerCAmelCase_ : Optional[int] = type_vocab_size
lowerCAmelCase_ : Dict = initializer_range
lowerCAmelCase_ : Any = layer_norm_eps
lowerCAmelCase_ : Dict = onnx_export
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
def __init__( self : List[str] , SCREAMING_SNAKE_CASE_ : "PretrainedConfig" , SCREAMING_SNAKE_CASE_ : str = "default" , SCREAMING_SNAKE_CASE_ : "List[PatchingSpec]" = None ):
super().__init__(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : str = True
@property
def SCREAMING_SNAKE_CASE__ ( self : Any ):
if self.task == "multiple-choice":
lowerCAmelCase_ : Union[str, Any] = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
lowerCAmelCase_ : Any = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('global_attention_mask', dynamic_axis),
] )
@property
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
lowerCAmelCase_ : Optional[Any] = super().outputs
if self.task == "default":
lowerCAmelCase_ : List[str] = {0: 'batch'}
return outputs
@property
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
return 1E-4
@property
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
# needs to be >= 14 to support tril operator
return max(super().default_onnx_opset , 1_4 )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , SCREAMING_SNAKE_CASE_ : "PreTrainedTokenizerBase" , SCREAMING_SNAKE_CASE_ : int = -1 , SCREAMING_SNAKE_CASE_ : int = -1 , SCREAMING_SNAKE_CASE_ : bool = False , SCREAMING_SNAKE_CASE_ : Optional[TensorType] = None , ):
lowerCAmelCase_ : Any = super().generate_dummy_inputs(
preprocessor=SCREAMING_SNAKE_CASE_ , batch_size=SCREAMING_SNAKE_CASE_ , seq_length=SCREAMING_SNAKE_CASE_ , is_pair=SCREAMING_SNAKE_CASE_ , framework=SCREAMING_SNAKE_CASE_ )
import torch
# for some reason, replacing this code by inputs["global_attention_mask"] = torch.randint(2, inputs["input_ids"].shape, dtype=torch.int64)
# makes the export fail randomly
lowerCAmelCase_ : str = torch.zeros_like(inputs['input_ids'] )
# make every second token global
lowerCAmelCase_ : Tuple = 1
return inputs
| 707
|
"""simple docstring"""
import os
import re
import sys
import traceback
import warnings
from pathlib import Path
from typing import Dict, Optional, Union
from uuid import uuida
from huggingface_hub import HfFolder, ModelCard, ModelCardData, hf_hub_download, whoami
from huggingface_hub.file_download import REGEX_COMMIT_HASH
from huggingface_hub.utils import (
EntryNotFoundError,
RepositoryNotFoundError,
RevisionNotFoundError,
is_jinja_available,
)
from packaging import version
from requests import HTTPError
from .. import __version__
from .constants import (
DEPRECATED_REVISION_ARGS,
DIFFUSERS_CACHE,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
SAFETENSORS_WEIGHTS_NAME,
WEIGHTS_NAME,
)
from .import_utils import (
ENV_VARS_TRUE_VALUES,
_flax_version,
_jax_version,
_onnxruntime_version,
_torch_version,
is_flax_available,
is_onnx_available,
is_torch_available,
)
from .logging import get_logger
lowercase__ : Optional[Any] = get_logger(__name__)
lowercase__ : Tuple = Path(__file__).parent / """model_card_template.md"""
lowercase__ : Optional[Any] = uuida().hex
lowercase__ : Any = os.getenv("""HF_HUB_OFFLINE""", """""").upper() in ENV_VARS_TRUE_VALUES
lowercase__ : Tuple = os.getenv("""DISABLE_TELEMETRY""", """""").upper() in ENV_VARS_TRUE_VALUES
lowercase__ : Dict = HUGGINGFACE_CO_RESOLVE_ENDPOINT + """/api/telemetry/"""
def UpperCamelCase_ ( lowerCAmelCase__ : Union[Dict, str, None] = None ) -> str:
"""simple docstring"""
lowerCAmelCase_ : Optional[int] = f"diffusers/{__version__}; python/{sys.version.split()[0]}; session_id/{SESSION_ID}"
if DISABLE_TELEMETRY or HF_HUB_OFFLINE:
return ua + "; telemetry/off"
if is_torch_available():
ua += f"; torch/{_torch_version}"
if is_flax_available():
ua += f"; jax/{_jax_version}"
ua += f"; flax/{_flax_version}"
if is_onnx_available():
ua += f"; onnxruntime/{_onnxruntime_version}"
# CI will set this value to True
if os.environ.get('DIFFUSERS_IS_CI' , '' ).upper() in ENV_VARS_TRUE_VALUES:
ua += "; is_ci/true"
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
ua += "; " + "; ".join(f"{k}/{v}" for k, v in user_agent.items() )
elif isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
ua += "; " + user_agent
return ua
def UpperCamelCase_ ( lowerCAmelCase__ : str , lowerCAmelCase__ : Optional[str] = None , lowerCAmelCase__ : Optional[str] = None ) -> Union[str, Any]:
"""simple docstring"""
if token is None:
lowerCAmelCase_ : Any = HfFolder.get_token()
if organization is None:
lowerCAmelCase_ : Union[str, Any] = whoami(lowerCAmelCase__ )['name']
return f"{username}/{model_id}"
else:
return f"{organization}/{model_id}"
def UpperCamelCase_ ( lowerCAmelCase__ : str , lowerCAmelCase__ : Optional[int] ) -> Optional[int]:
"""simple docstring"""
if not is_jinja_available():
raise ValueError(
'Modelcard rendering is based on Jinja templates.'
' Please make sure to have `jinja` installed before using `create_model_card`.'
' To install it, please run `pip install Jinja2`.' )
if hasattr(lowerCAmelCase__ , 'local_rank' ) and args.local_rank not in [-1, 0]:
return
lowerCAmelCase_ : List[str] = args.hub_token if hasattr(lowerCAmelCase__ , 'hub_token' ) else None
lowerCAmelCase_ : List[Any] = get_full_repo_name(lowerCAmelCase__ , token=lowerCAmelCase__ )
lowerCAmelCase_ : Dict = ModelCard.from_template(
card_data=ModelCardData( # Card metadata object that will be converted to YAML block
language='en' , license='apache-2.0' , library_name='diffusers' , tags=[] , datasets=args.dataset_name , metrics=[] , ) , template_path=lowerCAmelCase__ , model_name=lowerCAmelCase__ , repo_name=lowerCAmelCase__ , dataset_name=args.dataset_name if hasattr(lowerCAmelCase__ , 'dataset_name' ) else None , learning_rate=args.learning_rate , train_batch_size=args.train_batch_size , eval_batch_size=args.eval_batch_size , gradient_accumulation_steps=(
args.gradient_accumulation_steps if hasattr(lowerCAmelCase__ , 'gradient_accumulation_steps' ) else None
) , adam_betaa=args.adam_betaa if hasattr(lowerCAmelCase__ , 'adam_beta1' ) else None , adam_betaa=args.adam_betaa if hasattr(lowerCAmelCase__ , 'adam_beta2' ) else None , adam_weight_decay=args.adam_weight_decay if hasattr(lowerCAmelCase__ , 'adam_weight_decay' ) else None , adam_epsilon=args.adam_epsilon if hasattr(lowerCAmelCase__ , 'adam_epsilon' ) else None , lr_scheduler=args.lr_scheduler if hasattr(lowerCAmelCase__ , 'lr_scheduler' ) else None , lr_warmup_steps=args.lr_warmup_steps if hasattr(lowerCAmelCase__ , 'lr_warmup_steps' ) else None , ema_inv_gamma=args.ema_inv_gamma if hasattr(lowerCAmelCase__ , 'ema_inv_gamma' ) else None , ema_power=args.ema_power if hasattr(lowerCAmelCase__ , 'ema_power' ) else None , ema_max_decay=args.ema_max_decay if hasattr(lowerCAmelCase__ , 'ema_max_decay' ) else None , mixed_precision=args.mixed_precision , )
lowerCAmelCase_ : Tuple = os.path.join(args.output_dir , 'README.md' )
model_card.save(lowerCAmelCase__ )
def UpperCamelCase_ ( lowerCAmelCase__ : Optional[str] , lowerCAmelCase__ : Optional[str] = None ) -> Tuple:
"""simple docstring"""
if resolved_file is None or commit_hash is not None:
return commit_hash
lowerCAmelCase_ : Tuple = str(Path(lowerCAmelCase__ ).as_posix() )
lowerCAmelCase_ : Any = re.search(R'snapshots/([^/]+)/' , lowerCAmelCase__ )
if search is None:
return None
lowerCAmelCase_ : Tuple = search.groups()[0]
return commit_hash if REGEX_COMMIT_HASH.match(lowerCAmelCase__ ) else None
# Old default cache path, potentially to be migrated.
# This logic was more or less taken from `transformers`, with the following differences:
# - Diffusers doesn't use custom environment variables to specify the cache path.
# - There is no need to migrate the cache format, just move the files to the new location.
lowercase__ : int = os.path.expanduser(
os.getenv("""HF_HOME""", os.path.join(os.getenv("""XDG_CACHE_HOME""", """~/.cache"""), """huggingface"""))
)
lowercase__ : Optional[int] = os.path.join(hf_cache_home, """diffusers""")
def UpperCamelCase_ ( lowerCAmelCase__ : Optional[str] = None , lowerCAmelCase__ : Optional[str] = None ) -> None:
"""simple docstring"""
if new_cache_dir is None:
lowerCAmelCase_ : Any = DIFFUSERS_CACHE
if old_cache_dir is None:
lowerCAmelCase_ : Optional[int] = old_diffusers_cache
lowerCAmelCase_ : Optional[int] = Path(lowerCAmelCase__ ).expanduser()
lowerCAmelCase_ : Dict = Path(lowerCAmelCase__ ).expanduser()
for old_blob_path in old_cache_dir.glob('**/blobs/*' ):
if old_blob_path.is_file() and not old_blob_path.is_symlink():
lowerCAmelCase_ : List[str] = new_cache_dir / old_blob_path.relative_to(lowerCAmelCase__ )
new_blob_path.parent.mkdir(parents=lowerCAmelCase__ , exist_ok=lowerCAmelCase__ )
os.replace(lowerCAmelCase__ , lowerCAmelCase__ )
try:
os.symlink(lowerCAmelCase__ , lowerCAmelCase__ )
except OSError:
logger.warning(
'Could not create symlink between old cache and new cache. If you use an older version of diffusers again, files will be re-downloaded.' )
# At this point, old_cache_dir contains symlinks to the new cache (it can still be used).
lowercase__ : Any = os.path.join(DIFFUSERS_CACHE, """version_diffusers_cache.txt""")
if not os.path.isfile(cache_version_file):
lowercase__ : int = 0
else:
with open(cache_version_file) as f:
try:
lowercase__ : int = int(f.read())
except ValueError:
lowercase__ : Any = 0
if cache_version < 1:
lowercase__ : int = os.path.isdir(old_diffusers_cache) and len(os.listdir(old_diffusers_cache)) > 0
if old_cache_is_not_empty:
logger.warning(
"""The cache for model files in Diffusers v0.14.0 has moved to a new location. Moving your """
"""existing cached models. This is a one-time operation, you can interrupt it or run it """
"""later by calling `diffusers.utils.hub_utils.move_cache()`."""
)
try:
move_cache()
except Exception as e:
lowercase__ : int = """\n""".join(traceback.format_tb(e.__traceback__))
logger.error(
f'There was a problem when trying to move your cache:\n\n{trace}\n{e.__class__.__name__}: {e}\n\nPlease '
"""file an issue at https://github.com/huggingface/diffusers/issues/new/choose, copy paste this whole """
"""message and we will do our best to help."""
)
if cache_version < 1:
try:
os.makedirs(DIFFUSERS_CACHE, exist_ok=True)
with open(cache_version_file, """w""") as f:
f.write("""1""")
except Exception:
logger.warning(
f'There was a problem when trying to write in your cache folder ({DIFFUSERS_CACHE}). Please, ensure '
"""the directory exists and can be written to."""
)
def UpperCamelCase_ ( lowerCAmelCase__ : str , lowerCAmelCase__ : Optional[str] = None ) -> str:
"""simple docstring"""
if variant is not None:
lowerCAmelCase_ : Any = weights_name.split('.' )
lowerCAmelCase_ : List[Any] = splits[:-1] + [variant] + splits[-1:]
lowerCAmelCase_ : Any = '.'.join(lowerCAmelCase__ )
return weights_name
def UpperCamelCase_ ( lowerCAmelCase__ : List[str] , *,
lowerCAmelCase__ : str , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : str , lowerCAmelCase__ : str , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Optional[int]=None , ) -> str:
"""simple docstring"""
lowerCAmelCase_ : Optional[int] = str(lowerCAmelCase__ )
if os.path.isfile(lowerCAmelCase__ ):
return pretrained_model_name_or_path
elif os.path.isdir(lowerCAmelCase__ ):
if os.path.isfile(os.path.join(lowerCAmelCase__ , lowerCAmelCase__ ) ):
# Load from a PyTorch checkpoint
lowerCAmelCase_ : Dict = os.path.join(lowerCAmelCase__ , lowerCAmelCase__ )
return model_file
elif subfolder is not None and os.path.isfile(
os.path.join(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) ):
lowerCAmelCase_ : List[str] = os.path.join(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
return model_file
else:
raise EnvironmentError(
f"Error no file named {weights_name} found in directory {pretrained_model_name_or_path}." )
else:
# 1. First check if deprecated way of loading from branches is used
if (
revision in DEPRECATED_REVISION_ARGS
and (weights_name == WEIGHTS_NAME or weights_name == SAFETENSORS_WEIGHTS_NAME)
and version.parse(version.parse(lowerCAmelCase__ ).base_version ) >= version.parse('0.20.0' )
):
try:
lowerCAmelCase_ : Dict = hf_hub_download(
lowerCAmelCase__ , filename=_add_variant(lowerCAmelCase__ , lowerCAmelCase__ ) , cache_dir=lowerCAmelCase__ , force_download=lowerCAmelCase__ , proxies=lowerCAmelCase__ , resume_download=lowerCAmelCase__ , local_files_only=lowerCAmelCase__ , use_auth_token=lowerCAmelCase__ , user_agent=lowerCAmelCase__ , subfolder=lowerCAmelCase__ , revision=revision or commit_hash , )
warnings.warn(
f"Loading the variant {revision} from {pretrained_model_name_or_path} via `revision='{revision}'` is deprecated. Loading instead from `revision='main'` with `variant={revision}`. Loading model variants via `revision='{revision}'` will be removed in diffusers v1. Please use `variant='{revision}'` instead." , lowerCAmelCase__ , )
return model_file
except: # noqa: E722
warnings.warn(
f"You are loading the variant {revision} from {pretrained_model_name_or_path} via `revision='{revision}'`. This behavior is deprecated and will be removed in diffusers v1. One should use `variant='{revision}'` instead. However, it appears that {pretrained_model_name_or_path} currently does not have a {_add_variant(lowerCAmelCase__ , lowerCAmelCase__ )} file in the 'main' branch of {pretrained_model_name_or_path}. \n The Diffusers team and community would be very grateful if you could open an issue: https://github.com/huggingface/diffusers/issues/new with the title '{pretrained_model_name_or_path} is missing {_add_variant(lowerCAmelCase__ , lowerCAmelCase__ )}' so that the correct variant file can be added." , lowerCAmelCase__ , )
try:
# 2. Load model file as usual
lowerCAmelCase_ : Optional[Any] = hf_hub_download(
lowerCAmelCase__ , filename=lowerCAmelCase__ , cache_dir=lowerCAmelCase__ , force_download=lowerCAmelCase__ , proxies=lowerCAmelCase__ , resume_download=lowerCAmelCase__ , local_files_only=lowerCAmelCase__ , use_auth_token=lowerCAmelCase__ , user_agent=lowerCAmelCase__ , subfolder=lowerCAmelCase__ , revision=revision or commit_hash , )
return model_file
except RepositoryNotFoundError:
raise EnvironmentError(
f"{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier "
'listed on \'https://huggingface.co/models\'\nIf this is a private repository, make sure to pass a '
'token having permission to this repo with `use_auth_token` or log in with `huggingface-cli '
'login`.' )
except RevisionNotFoundError:
raise EnvironmentError(
f"{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for "
'this model name. Check the model page at '
f"'https://huggingface.co/{pretrained_model_name_or_path}' for available revisions." )
except EntryNotFoundError:
raise EnvironmentError(
f"{pretrained_model_name_or_path} does not appear to have a file named {weights_name}." )
except HTTPError as err:
raise EnvironmentError(
f"There was a specific connection error when trying to load {pretrained_model_name_or_path}:\n{err}" )
except ValueError:
raise EnvironmentError(
f"We couldn't connect to '{HUGGINGFACE_CO_RESOLVE_ENDPOINT}' to load this model, couldn't find it"
f" in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a"
f" directory containing a file named {weights_name} or"
' \nCheckout your internet connection or see how to run the library in'
' offline mode at \'https://huggingface.co/docs/diffusers/installation#offline-mode\'.' )
except EnvironmentError:
raise EnvironmentError(
f"Can't load the model for '{pretrained_model_name_or_path}'. If you were trying to load it from "
'\'https://huggingface.co/models\', make sure you don\'t have a local directory with the same name. '
f"Otherwise, make sure '{pretrained_model_name_or_path}' is the correct path to a directory "
f"containing a file named {weights_name}" )
| 317
| 0
|
from dataclasses import dataclass
from typing import Optional, Tuple
import torch
from torch import nn
from transformers import RobertaPreTrainedModel, XLMRobertaConfig, XLMRobertaModel
from transformers.utils import ModelOutput
@dataclass
class SCREAMING_SNAKE_CASE ( lowerCAmelCase ):
'''simple docstring'''
UpperCamelCase_ : Optional[torch.FloatTensor] = None
UpperCamelCase_ : torch.FloatTensor = None
UpperCamelCase_ : Optional[Tuple[torch.FloatTensor]] = None
UpperCamelCase_ : Optional[Tuple[torch.FloatTensor]] = None
class SCREAMING_SNAKE_CASE ( lowerCAmelCase ):
'''simple docstring'''
def __init__( self : Optional[Any] , UpperCAmelCase_ : Optional[Any]=1 , UpperCAmelCase_ : str=0 , UpperCAmelCase_ : str=2 , UpperCAmelCase_ : Optional[int]=512 , UpperCAmelCase_ : str="cls" , UpperCAmelCase_ : int=False , UpperCAmelCase_ : Union[str, Any]=True , **UpperCAmelCase_ : List[Any] , ):
super().__init__(pad_token_id=UpperCAmelCase_ , bos_token_id=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_ , **UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = project_dim
SCREAMING_SNAKE_CASE : Optional[Any] = pooler_fn
SCREAMING_SNAKE_CASE : Optional[Any] = learn_encoder
SCREAMING_SNAKE_CASE : Optional[int] = use_attention_mask
class SCREAMING_SNAKE_CASE ( lowerCAmelCase ):
'''simple docstring'''
UpperCamelCase_ : Dict = [r'''pooler''', r'''logit_scale''']
UpperCamelCase_ : Tuple = [r'''position_ids''', r'''predictions.decoder.bias''']
UpperCamelCase_ : Tuple = '''roberta'''
UpperCamelCase_ : Optional[Any] = RobertaSeriesConfig
def __init__( self : Union[str, Any] , UpperCAmelCase_ : Optional[Any] ):
super().__init__(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Dict = XLMRobertaModel(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = nn.Linear(config.hidden_size , config.project_dim )
SCREAMING_SNAKE_CASE : List[Any] = getattr(UpperCAmelCase_ , "has_pre_transformation" , UpperCAmelCase_ )
if self.has_pre_transformation:
SCREAMING_SNAKE_CASE : Union[str, Any] = nn.Linear(config.hidden_size , config.project_dim )
SCREAMING_SNAKE_CASE : List[str] = nn.LayerNorm(config.hidden_size , eps=config.layer_norm_eps )
self.post_init()
def _A ( self : Optional[Any] , UpperCAmelCase_ : Optional[torch.Tensor] = None , UpperCAmelCase_ : Optional[torch.Tensor] = None , UpperCAmelCase_ : Optional[torch.Tensor] = None , UpperCAmelCase_ : Optional[torch.Tensor] = None , UpperCAmelCase_ : Optional[torch.Tensor] = None , UpperCAmelCase_ : Optional[torch.Tensor] = None , UpperCAmelCase_ : Optional[torch.Tensor] = None , UpperCAmelCase_ : Optional[torch.Tensor] = None , UpperCAmelCase_ : Optional[bool] = None , UpperCAmelCase_ : Optional[bool] = None , UpperCAmelCase_ : Optional[bool] = None , ):
SCREAMING_SNAKE_CASE : Union[str, Any] = return_dict if return_dict is not None else self.config.use_return_dict
SCREAMING_SNAKE_CASE : Optional[Any] = self.base_model(
input_ids=UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , position_ids=UpperCAmelCase_ , head_mask=UpperCAmelCase_ , inputs_embeds=UpperCAmelCase_ , encoder_hidden_states=UpperCAmelCase_ , encoder_attention_mask=UpperCAmelCase_ , output_attentions=UpperCAmelCase_ , output_hidden_states=True if self.has_pre_transformation else output_hidden_states , return_dict=UpperCAmelCase_ , )
if self.has_pre_transformation:
SCREAMING_SNAKE_CASE : str = outputs["hidden_states"][-2]
SCREAMING_SNAKE_CASE : str = self.pre_LN(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : int = self.transformation_pre(UpperCAmelCase_ )
return TransformationModelOutput(
projection_state=UpperCAmelCase_ , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
else:
SCREAMING_SNAKE_CASE : str = self.transformation(outputs.last_hidden_state )
return TransformationModelOutput(
projection_state=UpperCAmelCase_ , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
| 62
|
import collections
import os
from typing import List, Optional, Tuple
from transformers.utils import is_jieba_available, requires_backends
if is_jieba_available():
import jieba
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__SCREAMING_SNAKE_CASE : Union[str, Any] = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : str = {'vocab_file': 'vocab.txt'}
__SCREAMING_SNAKE_CASE : Tuple = {
'vocab_file': {
'openbmb/cpm-ant-10b': 'https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt',
},
}
__SCREAMING_SNAKE_CASE : Any = {
'openbmb/cpm-ant-10b': 10_24,
}
def UpperCAmelCase__ ( __magic_name__ : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase : int = collections.OrderedDict()
with open(__magic_name__ , '''r''' , encoding='''utf-8''' ) as reader:
lowerCAmelCase : int = reader.readlines()
for index, token in enumerate(__magic_name__ ):
lowerCAmelCase : str = token.rstrip('''\n''' )
lowerCAmelCase : Tuple = index
return vocab
class __magic_name__ ( snake_case ):
def __init__( self : Any , lowerCamelCase__ : str , lowerCamelCase__ : List[str]="<unk>" , lowerCamelCase__ : Union[str, Any]=2_0_0 ):
lowerCAmelCase : Union[str, Any] = vocab
lowerCAmelCase : int = unk_token
lowerCAmelCase : List[str] = max_input_chars_per_word
def _A ( self : List[Any] , lowerCamelCase__ : List[str] ):
lowerCAmelCase : str = list(lowerCamelCase__ )
if len(lowerCamelCase__ ) > self.max_input_chars_per_word:
return [self.unk_token]
lowerCAmelCase : str = 0
lowerCAmelCase : Dict = []
while start < len(lowerCamelCase__ ):
lowerCAmelCase : Dict = len(lowerCamelCase__ )
lowerCAmelCase : Optional[Any] = None
while start < end:
lowerCAmelCase : Tuple = ''''''.join(chars[start:end] )
if substr in self.vocab:
lowerCAmelCase : Dict = substr
break
end -= 1
if cur_substr is None:
sub_tokens.append(self.unk_token )
start += 1
else:
sub_tokens.append(lowerCamelCase__ )
lowerCAmelCase : List[Any] = end
return sub_tokens
class __magic_name__ ( snake_case ):
_lowerCAmelCase = VOCAB_FILES_NAMES
_lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCAmelCase = ["input_ids", "attention_mask"]
_lowerCAmelCase = False
def __init__( self : Dict , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : str="<d>" , lowerCamelCase__ : Any="</d>" , lowerCamelCase__ : List[Any]="<s>" , lowerCamelCase__ : Union[str, Any]="</s>" , lowerCamelCase__ : Optional[Any]="<pad>" , lowerCamelCase__ : Any="<unk>" , lowerCamelCase__ : Union[str, Any]="</n>" , lowerCamelCase__ : Dict="</_>" , lowerCamelCase__ : Optional[int]="left" , **lowerCamelCase__ : Optional[int] , ):
requires_backends(self , ['''jieba'''] )
super().__init__(
bod_token=lowerCamelCase__ , eod_token=lowerCamelCase__ , bos_token=lowerCamelCase__ , eos_token=lowerCamelCase__ , pad_token=lowerCamelCase__ , unk_token=lowerCamelCase__ , line_token=lowerCamelCase__ , space_token=lowerCamelCase__ , padding_side=lowerCamelCase__ , **lowerCamelCase__ , )
lowerCAmelCase : Tuple = bod_token
lowerCAmelCase : Tuple = eod_token
lowerCAmelCase : Union[str, Any] = load_vocab(lowerCamelCase__ )
lowerCAmelCase : Any = self.encoder[space_token]
lowerCAmelCase : Any = self.encoder[line_token]
del self.encoder[space_token]
del self.encoder[line_token]
lowerCAmelCase : Optional[Any] = collections.OrderedDict(sorted(self.encoder.items() , key=lambda lowerCamelCase__ : x[1] ) )
lowerCAmelCase : Optional[int] = {v: k for k, v in self.encoder.items()}
lowerCAmelCase : Optional[Any] = WordpieceTokenizer(vocab=self.encoder , unk_token=self.unk_token )
@property
def _A ( self : Optional[Any] ):
return self.encoder[self.bod_token]
@property
def _A ( self : int ):
return self.encoder[self.eod_token]
@property
def _A ( self : int ):
return self.encoder["\n"]
@property
def _A ( self : Any ):
return len(self.encoder )
def _A ( self : int ):
return dict(self.encoder , **self.added_tokens_encoder )
def _A ( self : str , lowerCamelCase__ : int ):
lowerCAmelCase : Optional[Any] = []
for x in jieba.cut(lowerCamelCase__ , cut_all=lowerCamelCase__ ):
output_tokens.extend(self.wordpiece_tokenizer.tokenize(lowerCamelCase__ ) )
return output_tokens
def _A ( self : int , lowerCamelCase__ : Tuple , **lowerCamelCase__ : Any ):
lowerCAmelCase : List[Any] = [i for i in token_ids if i >= 0]
lowerCAmelCase : Union[str, Any] = [
x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id
]
return super()._decode(lowerCamelCase__ , **lowerCamelCase__ )
def _A ( self : Optional[int] , lowerCamelCase__ : Any ):
return token in self.encoder
def _A ( self : Optional[Any] , lowerCamelCase__ : List[str] ):
return "".join(lowerCamelCase__ )
def _A ( self : Tuple , lowerCamelCase__ : Optional[Any] ):
return self.encoder.get(lowerCamelCase__ , self.encoder.get(self.unk_token ) )
def _A ( self : Union[str, Any] , lowerCamelCase__ : Optional[Any] ):
return self.decoder.get(lowerCamelCase__ , self.unk_token )
def _A ( self : int , lowerCamelCase__ : str , lowerCamelCase__ : Optional[str] = None ):
if os.path.isdir(lowerCamelCase__ ):
lowerCAmelCase : List[Any] = os.path.join(
lowerCamelCase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
else:
lowerCAmelCase : str = (filename_prefix + '''-''' if filename_prefix else '''''') + save_directory
lowerCAmelCase : Tuple = 0
if " " in self.encoder:
lowerCAmelCase : Optional[int] = self.encoder[''' ''']
del self.encoder[" "]
if "\n" in self.encoder:
lowerCAmelCase : List[Any] = self.encoder['''\n''']
del self.encoder["\n"]
lowerCAmelCase : int = collections.OrderedDict(sorted(self.encoder.items() , key=lambda lowerCamelCase__ : x[1] ) )
with open(lowerCamelCase__ , '''w''' , encoding='''utf-8''' ) as writer:
for token, token_index in self.encoder.items():
if index != token_index:
logger.warning(
f'''Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'''
''' Please check that the vocabulary is not corrupted!''' )
lowerCAmelCase : int = token_index
writer.write(token + '''\n''' )
index += 1
return (vocab_file,)
def _A ( self : Any , lowerCamelCase__ : List[int] , lowerCamelCase__ : List[int] = None ):
if token_ids_a is None:
return [self.bos_token_id] + token_ids_a
return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a
def _A ( self : str , lowerCamelCase__ : List[int] , lowerCamelCase__ : Optional[List[int]] = None , lowerCamelCase__ : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase__ , token_ids_a=lowerCamelCase__ , already_has_special_tokens=lowerCamelCase__ )
if token_ids_a is not None:
return [1] + ([0] * len(lowerCamelCase__ )) + [1] + ([0] * len(lowerCamelCase__ ))
return [1] + ([0] * len(lowerCamelCase__ ))
| 348
| 0
|
from __future__ import annotations
def snake_case (UpperCamelCase : str , UpperCamelCase : list[str] | None = None , UpperCamelCase : dict[str, float] | None = None , UpperCamelCase : bool = False , ):
'''simple docstring'''
lowerCamelCase__ = cipher_alphabet or [chr(UpperCamelCase ) for i in range(97 , 123 )]
# If the argument is None or the user provided an empty dictionary
if not frequencies_dict:
# Frequencies of letters in the english language (how much they show up)
lowerCamelCase__ = {
"""a""": 0.0_8_4_9_7,
"""b""": 0.0_1_4_9_2,
"""c""": 0.0_2_2_0_2,
"""d""": 0.0_4_2_5_3,
"""e""": 0.1_1_1_6_2,
"""f""": 0.0_2_2_2_8,
"""g""": 0.0_2_0_1_5,
"""h""": 0.0_6_0_9_4,
"""i""": 0.0_7_5_4_6,
"""j""": 0.0_0_1_5_3,
"""k""": 0.0_1_2_9_2,
"""l""": 0.0_4_0_2_5,
"""m""": 0.0_2_4_0_6,
"""n""": 0.0_6_7_4_9,
"""o""": 0.0_7_5_0_7,
"""p""": 0.0_1_9_2_9,
"""q""": 0.0_0_0_9_5,
"""r""": 0.0_7_5_8_7,
"""s""": 0.0_6_3_2_7,
"""t""": 0.0_9_3_5_6,
"""u""": 0.0_2_7_5_8,
"""v""": 0.0_0_9_7_8,
"""w""": 0.0_2_5_6_0,
"""x""": 0.0_0_1_5_0,
"""y""": 0.0_1_9_9_4,
"""z""": 0.0_0_0_7_7,
}
else:
# Custom frequencies dictionary
lowerCamelCase__ = frequencies_dict
if not case_sensitive:
lowerCamelCase__ = ciphertext.lower()
# Chi squared statistic values
lowerCamelCase__ = {}
# cycle through all of the shifts
for shift in range(len(UpperCamelCase ) ):
lowerCamelCase__ = """"""
# decrypt the message with the shift
for letter in ciphertext:
try:
# Try to index the letter in the alphabet
lowerCamelCase__ = (alphabet_letters.index(letter.lower() ) - shift) % len(
UpperCamelCase )
decrypted_with_shift += (
alphabet_letters[new_key].upper()
if case_sensitive and letter.isupper()
else alphabet_letters[new_key]
)
except ValueError:
# Append the character if it isn't in the alphabet
decrypted_with_shift += letter
lowerCamelCase__ = 0.0
# Loop through each letter in the decoded message with the shift
for letter in decrypted_with_shift:
if case_sensitive:
lowerCamelCase__ = letter.lower()
if letter in frequencies:
# Get the amount of times the letter occurs in the message
lowerCamelCase__ = decrypted_with_shift.lower().count(UpperCamelCase )
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
lowerCamelCase__ = frequencies[letter] * occurrences
# Complete the chi squared statistic formula
lowerCamelCase__ = ((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
else:
if letter.lower() in frequencies:
# Get the amount of times the letter occurs in the message
lowerCamelCase__ = decrypted_with_shift.count(UpperCamelCase )
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
lowerCamelCase__ = frequencies[letter] * occurrences
# Complete the chi squared statistic formula
lowerCamelCase__ = ((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
# Add the data to the chi_squared_statistic_values dictionary
lowerCamelCase__ = (
chi_squared_statistic,
decrypted_with_shift,
)
# Get the most likely cipher by finding the cipher with the smallest chi squared
# statistic
def chi_squared_statistic_values_sorting_key(UpperCamelCase : int ) -> tuple[float, str]:
return chi_squared_statistic_values[key]
lowerCamelCase__ = min(
UpperCamelCase , key=UpperCamelCase , )
# Get all the data from the most likely cipher (key, decoded message)
(
(
lowerCamelCase__
) , (
lowerCamelCase__
) ,
) = chi_squared_statistic_values[most_likely_cipher]
# Return the data on the most likely shift
return (
most_likely_cipher,
most_likely_cipher_chi_squared_value,
decoded_most_likely_cipher,
)
| 235
|
import os
import time
import pytest
from datasets.utils.filelock import FileLock, Timeout
def snake_case (UpperCamelCase : Optional[Any] ):
'''simple docstring'''
lowerCamelCase__ = FileLock(str(tmpdir / """foo.lock""" ) )
lowerCamelCase__ = FileLock(str(tmpdir / """foo.lock""" ) )
lowerCamelCase__ = 0.0_1
with locka.acquire():
with pytest.raises(UpperCamelCase ):
lowerCamelCase__ = time.time()
locka.acquire(UpperCamelCase )
assert time.time() - _start > timeout
def snake_case (UpperCamelCase : Optional[Any] ):
'''simple docstring'''
lowerCamelCase__ = """a""" * 1000 + """.lock"""
lowerCamelCase__ = FileLock(str(tmpdir / filename ) )
assert locka._lock_file.endswith(""".lock""" )
assert not locka._lock_file.endswith(UpperCamelCase )
assert len(os.path.basename(locka._lock_file ) ) <= 255
lowerCamelCase__ = FileLock(tmpdir / filename )
with locka.acquire():
with pytest.raises(UpperCamelCase ):
locka.acquire(0 )
| 235
| 1
|
"""simple docstring"""
import argparse
import logging
import os
import sys
import numpy as np
import onnxruntime
import torch
from bart_onnx.generation_onnx import BARTBeamSearchGenerator
from bart_onnx.reduce_onnx_size import remove_dup_initializers
import transformers
from transformers import BartForConditionalGeneration, BartTokenizer
logging.basicConfig(
format="%(asctime)s | %(levelname)s | %(name)s | [%(filename)s:%(lineno)d] %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
level=os.environ.get("LOGLEVEL", "INFO").upper(),
stream=sys.stdout,
)
__lowercase : List[Any] = logging.getLogger(__name__)
__lowercase : Dict = {"facebook/bart-base": BartForConditionalGeneration}
__lowercase : Optional[Any] = {"facebook/bart-base": BartTokenizer}
def SCREAMING_SNAKE_CASE ( ):
__snake_case = argparse.ArgumentParser(description='''Export Bart model + Beam Search to ONNX graph.''')
parser.add_argument(
'''--validation_file''', type=snake_case, default=snake_case, help='''A csv or a json file containing the validation data.''')
parser.add_argument(
'''--max_length''', type=snake_case, default=5, help='''The maximum total input sequence length after tokenization.''', )
parser.add_argument(
'''--num_beams''', type=snake_case, default=snake_case, help=(
'''Number of beams to use for evaluation. This argument will be '''
'''passed to ``model.generate``, which is used during ``evaluate`` and ``predict``.'''
), )
parser.add_argument(
'''--model_name_or_path''', type=snake_case, help='''Path to pretrained model or model identifier from huggingface.co/models.''', required=snake_case, )
parser.add_argument(
'''--config_name''', type=snake_case, default=snake_case, help='''Pretrained config name or path if not the same as model_name''', )
parser.add_argument(
'''--device''', type=snake_case, default='''cpu''', help='''Device where the model will be run''', )
parser.add_argument('''--output_file_path''', type=snake_case, default=snake_case, help='''Where to store the final ONNX file.''')
__snake_case = parser.parse_args()
return args
def SCREAMING_SNAKE_CASE ( snake_case, snake_case="cpu"):
__snake_case = model_dict[model_name].from_pretrained(snake_case).to(snake_case)
__snake_case = tokenizer_dict[model_name].from_pretrained(snake_case)
if model_name in ["facebook/bart-base"]:
__snake_case = 0
__snake_case = None
__snake_case = 0
return huggingface_model, tokenizer
def SCREAMING_SNAKE_CASE ( snake_case, snake_case, snake_case, snake_case, snake_case):
model.eval()
__snake_case = None
__snake_case = torch.jit.script(BARTBeamSearchGenerator(snake_case))
with torch.no_grad():
__snake_case = '''My friends are cool but they eat too many carbs.'''
__snake_case = tokenizer([ARTICLE_TO_SUMMARIZE], max_length=10_24, return_tensors='''pt''').to(model.device)
__snake_case = model.generate(
inputs['''input_ids'''], attention_mask=inputs['''attention_mask'''], num_beams=snake_case, max_length=snake_case, early_stopping=snake_case, decoder_start_token_id=model.config.decoder_start_token_id, )
torch.onnx.export(
snake_case, (
inputs['''input_ids'''],
inputs['''attention_mask'''],
num_beams,
max_length,
model.config.decoder_start_token_id,
), snake_case, opset_version=14, input_names=['''input_ids''', '''attention_mask''', '''num_beams''', '''max_length''', '''decoder_start_token_id'''], output_names=['''output_ids'''], dynamic_axes={
'''input_ids''': {0: '''batch''', 1: '''seq'''},
'''output_ids''': {0: '''batch''', 1: '''seq_out'''},
}, example_outputs=snake_case, )
logger.info('''Model exported to {}'''.format(snake_case))
__snake_case = remove_dup_initializers(os.path.abspath(snake_case))
logger.info('''Deduplicated and optimized model written to {}'''.format(snake_case))
__snake_case = onnxruntime.InferenceSession(snake_case)
__snake_case = ort_sess.run(
snake_case, {
'''input_ids''': inputs['''input_ids'''].cpu().numpy(),
'''attention_mask''': inputs['''attention_mask'''].cpu().numpy(),
'''num_beams''': np.array(snake_case),
'''max_length''': np.array(snake_case),
'''decoder_start_token_id''': np.array(model.config.decoder_start_token_id),
}, )
np.testing.assert_allclose(summary_ids.cpu().numpy(), ort_out[0], rtol=1E-3, atol=1E-3)
logger.info('''Model outputs from torch and ONNX Runtime are similar.''')
logger.info('''Success.''')
def SCREAMING_SNAKE_CASE ( ):
__snake_case = parse_args()
__snake_case = 5
__snake_case = 4
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO, )
logger.setLevel(logging.INFO)
transformers.utils.logging.set_verbosity_error()
__snake_case = torch.device(args.device)
__snake_case , __snake_case = load_model_tokenizer(args.model_name_or_path, snake_case)
if model.config.decoder_start_token_id is None:
raise ValueError('''Make sure that `config.decoder_start_token_id` is correctly defined''')
model.to(snake_case)
if args.max_length:
__snake_case = args.max_length
if args.num_beams:
__snake_case = args.num_beams
if args.output_file_path:
__snake_case = args.output_file_path
else:
__snake_case = '''BART.onnx'''
logger.info('''Exporting model to ONNX''')
export_and_validate_model(snake_case, snake_case, snake_case, snake_case, snake_case)
if __name__ == "__main__":
main()
| 564
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowercase : Union[str, Any] = {"configuration_ibert": ["IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "IBertConfig", "IBertOnnxConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : Any = [
"IBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"IBertForMaskedLM",
"IBertForMultipleChoice",
"IBertForQuestionAnswering",
"IBertForSequenceClassification",
"IBertForTokenClassification",
"IBertModel",
"IBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_ibert import IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, IBertConfig, IBertOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ibert import (
IBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
IBertForMaskedLM,
IBertForMultipleChoice,
IBertForQuestionAnswering,
IBertForSequenceClassification,
IBertForTokenClassification,
IBertModel,
IBertPreTrainedModel,
)
else:
import sys
__lowercase : List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 564
| 1
|
"""simple docstring"""
import argparse
import json
import numpy
import torch
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def lowercase_ ( _lowerCamelCase: Dict , _lowerCamelCase: List[Any] ) -> Tuple:
'''simple docstring'''
__lowerCamelCase : str = torch.load(_lowercase , map_location="cpu" )
__lowerCamelCase : Union[str, Any] = chkpt['model']
# We have the base model one level deeper than the original XLM repository
__lowerCamelCase : Union[str, Any] = {}
for k, v in state_dict.items():
if "pred_layer" in k:
__lowerCamelCase : int = v
else:
__lowerCamelCase : Tuple = v
__lowerCamelCase : Tuple = chkpt['params']
__lowerCamelCase : Any = {n: v for n, v in config.items() if not isinstance(_lowercase , (torch.FloatTensor, numpy.ndarray) )}
__lowerCamelCase : int = chkpt['dico_word2id']
__lowerCamelCase : Any = {s + '</w>' if s.find("@@" ) == -1 and i > 13 else s.replace("@@" , "" ): i for s, i in vocab.items()}
# Save pytorch-model
__lowerCamelCase : int = pytorch_dump_folder_path + '/' + WEIGHTS_NAME
__lowerCamelCase : Union[str, Any] = pytorch_dump_folder_path + '/' + CONFIG_NAME
__lowerCamelCase : str = pytorch_dump_folder_path + '/' + VOCAB_FILES_NAMES['vocab_file']
print(F"""Save PyTorch model to {pytorch_weights_dump_path}""" )
torch.save(_lowercase , _lowercase )
print(F"""Save configuration file to {pytorch_config_dump_path}""" )
with open(_lowercase , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(_lowercase , indent=2 ) + "\n" )
print(F"""Save vocab file to {pytorch_config_dump_path}""" )
with open(_lowercase , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(_lowercase , indent=2 ) + "\n" )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--xlm_checkpoint_path''', default=None, type=str, required=True, help='''Path the official PyTorch dump.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
__A = parser.parse_args()
convert_xlm_checkpoint_to_pytorch(args.xlm_checkpoint_path, args.pytorch_dump_folder_path)
| 704
|
"""simple docstring"""
import os
from argparse import ArgumentParser
from typing import List
import torch.utils.data
from datasets import Dataset, IterableDataset
from datasets.distributed import split_dataset_by_node
__A = 4
__A = 3
class _snake_case ( a__ ):
pass
def lowercase_ ( _lowerCamelCase: List[str] ) -> List[str]:
'''simple docstring'''
for shard in shards:
for i in range(_lowerCamelCase ):
yield {"i": i, "shard": shard}
def lowercase_ ( ) -> List[Any]:
'''simple docstring'''
__lowerCamelCase : List[Any] = int(os.environ["RANK"] )
__lowerCamelCase : Optional[int] = int(os.environ["WORLD_SIZE"] )
__lowerCamelCase : Any = ArgumentParser()
parser.add_argument("--streaming" , type=_lowerCamelCase )
parser.add_argument("--local_rank" , type=_lowerCamelCase )
parser.add_argument("--num_workers" , type=_lowerCamelCase , default=0 )
__lowerCamelCase : Dict = parser.parse_args()
__lowerCamelCase : str = args.streaming
__lowerCamelCase : List[Any] = args.num_workers
__lowerCamelCase : Optional[Any] = {"shards": [F"""shard_{shard_idx}""" for shard_idx in range(_lowerCamelCase )]}
__lowerCamelCase : int = IterableDataset.from_generator(_lowerCamelCase , gen_kwargs=_lowerCamelCase )
if not streaming:
__lowerCamelCase : Optional[int] = Dataset.from_list(list(_lowerCamelCase ) )
__lowerCamelCase : Union[str, Any] = split_dataset_by_node(_lowerCamelCase , rank=_lowerCamelCase , world_size=_lowerCamelCase )
__lowerCamelCase : Optional[Any] = torch.utils.data.DataLoader(_lowerCamelCase , num_workers=_lowerCamelCase )
__lowerCamelCase : List[Any] = NUM_SHARDS * NUM_ITEMS_PER_SHARD
__lowerCamelCase : Optional[Any] = full_size // world_size
expected_local_size += int(rank < (full_size % world_size) )
__lowerCamelCase : Optional[Any] = sum(1 for _ in dataloader )
if local_size != expected_local_size:
raise FailedTestError(F"""local_size {local_size} != expected_local_size {expected_local_size}""" )
if __name__ == "__main__":
main()
| 366
| 0
|
'''simple docstring'''
from __future__ import annotations
def _lowerCAmelCase ( __magic_name__ : list[float] ) -> float:
lowercase : Any =0.0_0
lowercase : Tuple =0
for resistor in resistors:
if resistor <= 0:
lowercase : Dict =f'''Resistor at index {index} has a negative or zero value!'''
raise ValueError(__magic_name__ )
first_sum += 1 / float(__magic_name__ )
index += 1
return 1 / first_sum
def _lowerCAmelCase ( __magic_name__ : list[float] ) -> float:
lowercase : Optional[Any] =0.0_0
lowercase : int =0
for resistor in resistors:
sum_r += resistor
if resistor < 0:
lowercase : Tuple =f'''Resistor at index {index} has a negative value!'''
raise ValueError(__magic_name__ )
index += 1
return sum_r
if __name__ == "__main__":
import doctest
doctest.testmod()
| 92
|
def SCREAMING_SNAKE_CASE ( snake_case , snake_case ) -> bool:
__lowercase = len(snake_case ) + 1
__lowercase = len(snake_case ) + 1
# dp is a 2d matrix where dp[i][j] denotes whether prefix string of
# length i of input_string matches with prefix string of length j of
# given pattern.
# "dp" stands for dynamic programming.
__lowercase = [[0 for i in range(snake_case )] for j in range(snake_case )]
# since string of zero length match pattern of zero length
__lowercase = 1
# since pattern of zero length will never match with string of non-zero length
for i in range(1 , snake_case ):
__lowercase = 0
# since string of zero length will match with pattern where there
# is at least one * alternatively
for j in range(1 , snake_case ):
__lowercase = dp[0][j - 2] if pattern[j - 1] == '*' else 0
# now using bottom-up approach to find for all remaining lengths
for i in range(1 , snake_case ):
for j in range(1 , snake_case ):
if input_string[i - 1] == pattern[j - 1] or pattern[j - 1] == ".":
__lowercase = dp[i - 1][j - 1]
elif pattern[j - 1] == "*":
if dp[i][j - 2] == 1:
__lowercase = 1
elif pattern[j - 2] in (input_string[i - 1], "."):
__lowercase = dp[i - 1][j]
else:
__lowercase = 0
else:
__lowercase = 0
return bool(dp[-1][-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
# inputing the strings
# input_string = input("input a string :")
# pattern = input("input a pattern :")
SCREAMING_SNAKE_CASE_ : Any = '''aab'''
SCREAMING_SNAKE_CASE_ : str = '''c*a*b'''
# using function to check whether given string matches the given pattern
if match_pattern(input_string, pattern):
print(F"""{input_string} matches the given pattern {pattern}""")
else:
print(F"""{input_string} does not match with the given pattern {pattern}""")
| 375
| 0
|
def lowerCamelCase__ ( ):
'''simple docstring'''
for n in range(1 , 1000000 ):
yield n * (n + 1) // 2
def lowerCamelCase__ ( A__ : List[str] ):
'''simple docstring'''
__lowerCamelCase = 1
__lowerCamelCase = 2
while i * i <= n:
__lowerCamelCase = 0
while n % i == 0:
n //= i
multiplicity += 1
divisors_count *= multiplicity + 1
i += 1
if n > 1:
divisors_count *= 2
return divisors_count
def lowerCamelCase__ ( ):
'''simple docstring'''
return next(i for i in triangle_number_generator() if count_divisors(A__ ) > 500 )
if __name__ == "__main__":
print(solution())
| 80
|
class lowerCamelCase__: # Public class to implement a graph
def __init__( self: Dict , UpperCamelCase_: int , UpperCamelCase_: int , UpperCamelCase_: list[list[bool]] ):
__lowerCamelCase = row
__lowerCamelCase = col
__lowerCamelCase = graph
def lowerCAmelCase__ ( self: Union[str, Any] , UpperCamelCase_: int , UpperCamelCase_: int , UpperCamelCase_: list[list[bool]] ):
return (
0 <= i < self.ROW
and 0 <= j < self.COL
and not visited[i][j]
and self.graph[i][j]
)
def lowerCAmelCase__ ( self: List[str] , UpperCamelCase_: int , UpperCamelCase_: int , UpperCamelCase_: list[list[bool]] ):
# Checking all 8 elements surrounding nth element
__lowerCamelCase = [-1, -1, -1, 0, 0, 1, 1, 1] # Coordinate order
__lowerCamelCase = [-1, 0, 1, -1, 1, -1, 0, 1]
__lowerCamelCase = True # Make those cells visited
for k in range(8 ):
if self.is_safe(i + row_nbr[k] , j + col_nbr[k] , UpperCamelCase_ ):
self.diffs(i + row_nbr[k] , j + col_nbr[k] , UpperCamelCase_ )
def lowerCAmelCase__ ( self: List[Any] ): # And finally, count all islands.
__lowerCamelCase = [[False for j in range(self.COL )] for i in range(self.ROW )]
__lowerCamelCase = 0
for i in range(self.ROW ):
for j in range(self.COL ):
if visited[i][j] is False and self.graph[i][j] == 1:
self.diffs(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
count += 1
return count
| 80
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.