code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
"""simple docstring"""
import argparse
import json
import os
import torch
from transformers.file_utils import has_file
from diffusers import UNetaDConditionModel, UNetaDModel
_lowercase : List[str] = False
_lowercase : Tuple = True
_lowercase : Any = False
if __name__ == "__main__":
_lowercase : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument(
'--repo_path',
default=None,
type=str,
required=True,
help='The config json file corresponding to the architecture.',
)
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
_lowercase : Optional[Any] = parser.parse_args()
_lowercase : List[Any] = {
'image_size': 'sample_size',
'num_res_blocks': 'layers_per_block',
'block_channels': 'block_out_channels',
'down_blocks': 'down_block_types',
'up_blocks': 'up_block_types',
'downscale_freq_shift': 'freq_shift',
'resnet_num_groups': 'norm_num_groups',
'resnet_act_fn': 'act_fn',
'resnet_eps': 'norm_eps',
'num_head_channels': 'attention_head_dim',
}
_lowercase : Optional[Any] = {
'time_steps': 'time_proj',
'mid': 'mid_block',
'downsample_blocks': 'down_blocks',
'upsample_blocks': 'up_blocks',
}
_lowercase : str = '' if has_file(args.repo_path, 'config.json') else 'unet'
with open(os.path.join(args.repo_path, subfolder, 'config.json'), 'r', encoding='utf-8') as reader:
_lowercase : Union[str, Any] = reader.read()
_lowercase : Dict = json.loads(text)
if do_only_config:
for key in config_parameters_to_change.keys():
config.pop(key, None)
if has_file(args.repo_path, 'config.json'):
_lowercase : Optional[Any] = UNetaDModel(**config)
else:
_lowercase : Tuple = UNetaDConditionModel if 'ldm-text2im-large-256' in args.repo_path else UNetaDModel
_lowercase : Any = class_name(**config)
if do_only_config:
model.save_config(os.path.join(args.repo_path, subfolder))
_lowercase : Dict = dict(model.config)
if do_only_renaming:
for key, value in config_parameters_to_change.items():
if key in config:
_lowercase : Optional[Any] = config[key]
del config[key]
_lowercase : Optional[Any] = [k.replace('UNetRes', '') for k in config['down_block_types']]
_lowercase : Union[str, Any] = [k.replace('UNetRes', '') for k in config['up_block_types']]
if do_only_weights:
_lowercase : int = torch.load(os.path.join(args.repo_path, subfolder, 'diffusion_pytorch_model.bin'))
_lowercase : List[Any] = {}
for param_key, param_value in state_dict.items():
if param_key.endswith('.op.bias') or param_key.endswith('.op.weight'):
continue
_lowercase : Tuple = False
for key, new_key in key_parameters_to_change.items():
if not has_changed and param_key.split('.')[0] == key:
_lowercase : Dict = param_value
_lowercase : Union[str, Any] = True
if not has_changed:
_lowercase : int = param_value
model.load_state_dict(new_state_dict)
model.save_pretrained(os.path.join(args.repo_path, subfolder))
| 332
|
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision import transforms
from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def __UpperCamelCase ( _A ):
lowerCAmelCase_ = [2, 2, 6, 2] if '''tiny''' in model_name else [2, 2, 18, 2]
lowerCAmelCase_ = True if '''large''' in model_name or '''huge''' in model_name else False
lowerCAmelCase_ = True if '''large''' in model_name or '''huge''' in model_name else False
lowerCAmelCase_ = True if '''large''' in model_name or '''huge''' in model_name else False
if "large" in model_name or "xlarge" in model_name or "huge" in model_name:
if "fl3" in model_name:
lowerCAmelCase_ = [3, 3, 3, 3]
lowerCAmelCase_ = [5, 5, 5, 5]
elif "fl4" in model_name:
lowerCAmelCase_ = [4, 4, 4, 4]
lowerCAmelCase_ = [3, 3, 3, 3]
if "tiny" in model_name or "small" in model_name or "base" in model_name:
lowerCAmelCase_ = [3, 3, 3, 3]
if "lrf" in model_name:
lowerCAmelCase_ = [3, 3, 3, 3]
else:
lowerCAmelCase_ = [2, 2, 2, 2]
if "tiny" in model_name:
lowerCAmelCase_ = 96
elif "small" in model_name:
lowerCAmelCase_ = 96
elif "base" in model_name:
lowerCAmelCase_ = 128
elif "large" in model_name:
lowerCAmelCase_ = 192
elif "xlarge" in model_name:
lowerCAmelCase_ = 256
elif "huge" in model_name:
lowerCAmelCase_ = 352
# set label information
lowerCAmelCase_ = '''huggingface/label-files'''
if "large" in model_name or "huge" in model_name:
lowerCAmelCase_ = '''imagenet-22k-id2label.json'''
else:
lowerCAmelCase_ = '''imagenet-1k-id2label.json'''
lowerCAmelCase_ = json.load(open(hf_hub_download(_A , _A , repo_type='''dataset''' ) , '''r''' ) )
lowerCAmelCase_ = {int(_A ): v for k, v in idalabel.items()}
lowerCAmelCase_ = {v: k for k, v in idalabel.items()}
lowerCAmelCase_ = FocalNetConfig(
embed_dim=_A , depths=_A , focal_levels=_A , focal_windows=_A , use_conv_embed=_A , idalabel=_A , labelaid=_A , use_post_layernorm=_A , use_layerscale=_A , )
return config
def __UpperCamelCase ( _A ):
if "patch_embed.proj" in name:
lowerCAmelCase_ = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
lowerCAmelCase_ = name.replace('''patch_embed.norm''' , '''embeddings.norm''' )
if "layers" in name:
lowerCAmelCase_ = '''encoder.''' + name
if "encoder.layers" in name:
lowerCAmelCase_ = name.replace('''encoder.layers''' , '''encoder.stages''' )
if "downsample.proj" in name:
lowerCAmelCase_ = name.replace('''downsample.proj''' , '''downsample.projection''' )
if "blocks" in name:
lowerCAmelCase_ = name.replace('''blocks''' , '''layers''' )
if "modulation.f.weight" in name or "modulation.f.bias" in name:
lowerCAmelCase_ = name.replace('''modulation.f''' , '''modulation.projection_in''' )
if "modulation.h.weight" in name or "modulation.h.bias" in name:
lowerCAmelCase_ = name.replace('''modulation.h''' , '''modulation.projection_context''' )
if "modulation.proj.weight" in name or "modulation.proj.bias" in name:
lowerCAmelCase_ = name.replace('''modulation.proj''' , '''modulation.projection_out''' )
if name == "norm.weight":
lowerCAmelCase_ = '''layernorm.weight'''
if name == "norm.bias":
lowerCAmelCase_ = '''layernorm.bias'''
if "head" in name:
lowerCAmelCase_ = name.replace('''head''' , '''classifier''' )
else:
lowerCAmelCase_ = '''focalnet.''' + name
return name
def __UpperCamelCase ( _A , _A , _A=False ):
# fmt: off
lowerCAmelCase_ = {
'''focalnet-tiny''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth''',
'''focalnet-tiny-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth''',
'''focalnet-small''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth''',
'''focalnet-small-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth''',
'''focalnet-base''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth''',
'''focalnet-base-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth''',
'''focalnet-large-lrf-fl3''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth''',
'''focalnet-large-lrf-fl4''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth''',
'''focalnet-xlarge-lrf-fl3''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth''',
'''focalnet-xlarge-lrf-fl4''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth''',
}
# fmt: on
lowerCAmelCase_ = model_name_to_url[model_name]
print('''Checkpoint URL: ''' , _A )
lowerCAmelCase_ = torch.hub.load_state_dict_from_url(_A , map_location='''cpu''' )['''model''']
# rename keys
for key in state_dict.copy().keys():
lowerCAmelCase_ = state_dict.pop(_A )
lowerCAmelCase_ = val
lowerCAmelCase_ = get_focalnet_config(_A )
lowerCAmelCase_ = FocalNetForImageClassification(_A )
model.eval()
# load state dict
model.load_state_dict(_A )
# verify conversion
lowerCAmelCase_ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowerCAmelCase_ = BitImageProcessor(
do_resize=_A , size={'''shortest_edge''': 256} , resample=PILImageResampling.BILINEAR , do_center_crop=_A , crop_size=224 , do_normalize=_A , image_mean=_A , image_std=_A , )
lowerCAmelCase_ = Image.open(requests.get(_A , stream=_A ).raw )
lowerCAmelCase_ = processor(images=_A , return_tensors='''pt''' )
lowerCAmelCase_ = transforms.Compose(
[
transforms.Resize(256 ),
transforms.CenterCrop(224 ),
transforms.ToTensor(),
transforms.Normalize(mean=[0.4_8_5, 0.4_5_6, 0.4_0_6] , std=[0.2_2_9, 0.2_2_4, 0.2_2_5] ),
] )
lowerCAmelCase_ = image_transforms(_A ).unsqueeze(0 )
# verify pixel_values
assert torch.allclose(inputs.pixel_values , _A , atol=1E-4 )
lowerCAmelCase_ = model(**_A )
lowerCAmelCase_ = outputs.logits.argmax(-1 ).item()
print('''Predicted class:''' , model.config.idalabel[predicted_class_idx] )
print('''First values of logits:''' , outputs.logits[0, :3] )
if model_name == "focalnet-tiny":
lowerCAmelCase_ = torch.tensor([0.2_1_6_6, -0.4_3_6_8, 0.2_1_9_1] )
elif model_name == "focalnet-tiny-lrf":
lowerCAmelCase_ = torch.tensor([1.1_6_6_9, 0.0_1_2_5, -0.1_6_9_5] )
elif model_name == "focalnet-small":
lowerCAmelCase_ = torch.tensor([0.4_9_1_7, -0.0_4_3_0, 0.1_3_4_1] )
elif model_name == "focalnet-small-lrf":
lowerCAmelCase_ = torch.tensor([-0.2_5_8_8, -0.5_3_4_2, -0.2_3_3_1] )
elif model_name == "focalnet-base":
lowerCAmelCase_ = torch.tensor([-0.1_6_5_5, -0.4_0_9_0, -0.1_7_3_0] )
elif model_name == "focalnet-base-lrf":
lowerCAmelCase_ = torch.tensor([0.5_3_0_6, -0.0_4_8_3, -0.3_9_2_8] )
assert torch.allclose(outputs.logits[0, :3] , _A , atol=1E-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(f"Saving model and processor of {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(_A )
processor.save_pretrained(_A )
if push_to_hub:
print(f"Pushing model and processor of {model_name} to the hub..." )
model.push_to_hub(f"{model_name}" )
processor.push_to_hub(f"{model_name}" )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''focalnet-tiny''',
type=str,
help='''Name of the FocalNet model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether to push the model and processor to the hub.''',
)
_A = parser.parse_args()
convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 278
| 0
|
from typing import Union
import fire
import torch
from tqdm import tqdm
def SCREAMING_SNAKE_CASE__ ( __a , __a = "cpu" , __a = None ):
snake_case_ : str = torch.load(__a , map_location=__a )
for k, v in tqdm(state_dict.items() ):
if not isinstance(__a , torch.Tensor ):
raise TypeError('FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin' )
snake_case_ : List[Any] = v.half()
if save_path is None: # overwrite src_path
snake_case_ : Union[str, Any] = src_path
torch.save(__a , __a )
if __name__ == "__main__":
fire.Fire(convert)
| 88
|
from __future__ import annotations
import pandas as pd
def SCREAMING_SNAKE_CASE__ ( __a , __a , __a ):
snake_case_ : Optional[Any] = [0] * no_of_processes
snake_case_ : Tuple = [0] * no_of_processes
# Copy the burst time into remaining_time[]
for i in range(__a ):
snake_case_ : Union[str, Any] = burst_time[i]
snake_case_ : Optional[Any] = 0
snake_case_ : Dict = 0
snake_case_ : Any = 9_99_99_99_99
snake_case_ : Tuple = 0
snake_case_ : List[Any] = False
# Process until all processes are completed
while complete != no_of_processes:
for j in range(__a ):
if arrival_time[j] <= increment_time and remaining_time[j] > 0:
if remaining_time[j] < minm:
snake_case_ : str = remaining_time[j]
snake_case_ : Any = j
snake_case_ : List[str] = True
if not check:
increment_time += 1
continue
remaining_time[short] -= 1
snake_case_ : Any = remaining_time[short]
if minm == 0:
snake_case_ : Dict = 9_99_99_99_99
if remaining_time[short] == 0:
complete += 1
snake_case_ : List[str] = False
# Find finish time of current process
snake_case_ : List[str] = increment_time + 1
# Calculate waiting time
snake_case_ : Any = finish_time - arrival_time[short]
snake_case_ : Any = finar - burst_time[short]
if waiting_time[short] < 0:
snake_case_ : Optional[int] = 0
# Increment time
increment_time += 1
return waiting_time
def SCREAMING_SNAKE_CASE__ ( __a , __a , __a ):
snake_case_ : Tuple = [0] * no_of_processes
for i in range(__a ):
snake_case_ : str = burst_time[i] + waiting_time[i]
return turn_around_time
def SCREAMING_SNAKE_CASE__ ( __a , __a , __a ):
snake_case_ : int = 0
snake_case_ : Optional[Any] = 0
for i in range(__a ):
snake_case_ : int = total_waiting_time + waiting_time[i]
snake_case_ : Optional[Any] = total_turn_around_time + turn_around_time[i]
print(f"""Average waiting time = {total_waiting_time / no_of_processes:.5f}""" )
print('Average turn around time =' , total_turn_around_time / no_of_processes )
if __name__ == "__main__":
print("""Enter how many process you want to analyze""")
_SCREAMING_SNAKE_CASE = int(input())
_SCREAMING_SNAKE_CASE = [0] * no_of_processes
_SCREAMING_SNAKE_CASE = [0] * no_of_processes
_SCREAMING_SNAKE_CASE = list(range(1, no_of_processes + 1))
for i in range(no_of_processes):
print("""Enter the arrival time and burst time for process:--""" + str(i + 1))
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = map(int, input().split())
_SCREAMING_SNAKE_CASE = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
_SCREAMING_SNAKE_CASE = burst_time
_SCREAMING_SNAKE_CASE = no_of_processes
_SCREAMING_SNAKE_CASE = waiting_time
_SCREAMING_SNAKE_CASE = calculate_turnaroundtime(bt, n, wt)
calculate_average_times(waiting_time, turn_around_time, no_of_processes)
_SCREAMING_SNAKE_CASE = pd.DataFrame(
list(zip(processes, burst_time, arrival_time, waiting_time, turn_around_time)),
columns=[
"""Process""",
"""BurstTime""",
"""ArrivalTime""",
"""WaitingTime""",
"""TurnAroundTime""",
],
)
# Printing the dataFrame
pd.set_option("""display.max_rows""", fcfs.shape[0] + 1)
print(fcfs)
| 88
| 1
|
"""simple docstring"""
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self : int ,A_ : int ) -> Union[str, Any]:
A = n
A = [None] * self.n
A = 0 # index of the first element
A = 0
A = 0
def __len__( self : int ) -> int:
return self.size
def _SCREAMING_SNAKE_CASE ( self : Any ) -> bool:
return self.size == 0
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Tuple:
return False if self.is_empty() else self.array[self.front]
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : List[Any] ) -> int:
if self.size >= self.n:
raise Exception('QUEUE IS FULL' )
A = data
A = (self.rear + 1) % self.n
self.size += 1
return self
def _SCREAMING_SNAKE_CASE ( self : int ) -> List[Any]:
if self.size == 0:
raise Exception('UNDERFLOW' )
A = self.array[self.front]
A = None
A = (self.front + 1) % self.n
self.size -= 1
return temp
| 74
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowercase = logging.get_logger(__name__)
_lowercase = {
'''facebook/deit-base-distilled-patch16-224''': (
'''https://huggingface.co/facebook/deit-base-patch16-224/resolve/main/config.json'''
),
# See all DeiT models at https://huggingface.co/models?filter=deit
}
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
_lowerCamelCase: Optional[Any] = '''deit'''
def __init__( self : int ,A_ : Optional[Any]=768 ,A_ : Union[str, Any]=12 ,A_ : Dict=12 ,A_ : int=3072 ,A_ : Optional[Any]="gelu" ,A_ : Dict=0.0 ,A_ : Any=0.0 ,A_ : str=0.02 ,A_ : Tuple=1e-12 ,A_ : Union[str, Any]=224 ,A_ : Optional[Any]=16 ,A_ : List[Any]=3 ,A_ : Optional[Any]=True ,A_ : Optional[int]=16 ,**A_ : Union[str, Any] ,) -> Dict:
super().__init__(**A_ )
A = hidden_size
A = num_hidden_layers
A = num_attention_heads
A = intermediate_size
A = hidden_act
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = initializer_range
A = layer_norm_eps
A = image_size
A = patch_size
A = num_channels
A = qkv_bias
A = encoder_stride
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
_lowerCamelCase: int = version.parse('''1.11''' )
@property
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> float:
return 1e-4
| 74
| 1
|
"""simple docstring"""
import re
def UpperCAmelCase__ ( lowerCAmelCase__ :str ) -> list:
'''simple docstring'''
return [char.split() for char in re.split(R"""[^ a-z A-Z 0-9 \s]""" , str_ )]
def UpperCAmelCase__ ( lowerCAmelCase__ :str ) -> str:
'''simple docstring'''
lowercase = split_input(str_ )
return "".join(
["""""".join([char.capitalize() for char in sub_str] ) for sub_str in string_split] )
def UpperCAmelCase__ ( lowerCAmelCase__ :str , lowerCAmelCase__ :bool , lowerCAmelCase__ :str ) -> str:
'''simple docstring'''
try:
lowercase = split_input(lowerCAmelCase__ )
if upper:
lowercase = """""".join(
[
separator.join([char.upper() for char in sub_str] )
for sub_str in string_split
] )
else:
lowercase = """""".join(
[
separator.join([char.lower() for char in sub_str] )
for sub_str in string_split
] )
return res_str
except IndexError:
return "not valid string"
def UpperCAmelCase__ ( lowerCAmelCase__ :str ) -> str:
'''simple docstring'''
return to_simple_case(lowerCAmelCase__ )
def UpperCAmelCase__ ( lowerCAmelCase__ :str ) -> str:
'''simple docstring'''
try:
lowercase = to_simple_case(lowerCAmelCase__ )
return res_str[0].lower() + res_str[1:]
except IndexError:
return "not valid string"
def UpperCAmelCase__ ( lowerCAmelCase__ :str , lowerCAmelCase__ :bool ) -> str:
'''simple docstring'''
return to_complex_case(lowerCAmelCase__ , lowerCAmelCase__ , """_""" )
def UpperCAmelCase__ ( lowerCAmelCase__ :str , lowerCAmelCase__ :bool ) -> str:
'''simple docstring'''
return to_complex_case(lowerCAmelCase__ , lowerCAmelCase__ , """-""" )
if __name__ == "__main__":
__import__("""doctest""").testmod()
| 32
|
"""simple docstring"""
class _A :
def __init__( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
lowercase = None
lowercase = None
lowercase = graph
self._normalize_graph(__lowerCAmelCase , __lowerCAmelCase )
lowercase = len(__lowerCAmelCase )
lowercase = None
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
if sources is int:
lowercase = [sources]
if sinks is int:
lowercase = [sinks]
if len(__lowerCAmelCase ) == 0 or len(__lowerCAmelCase ) == 0:
return
lowercase = sources[0]
lowercase = sinks[0]
# make fake vertex if there are more
# than one source or sink
if len(__lowerCAmelCase ) > 1 or len(__lowerCAmelCase ) > 1:
lowercase = 0
for i in sources:
max_input_flow += sum(self.graph[i] )
lowercase = len(self.graph ) + 1
for room in self.graph:
room.insert(0 , 0 )
self.graph.insert(0 , [0] * size )
for i in sources:
lowercase = max_input_flow
lowercase = 0
lowercase = len(self.graph ) + 1
for room in self.graph:
room.append(0 )
self.graph.append([0] * size )
for i in sinks:
lowercase = max_input_flow
lowercase = size - 1
def A__ ( self ):
"""simple docstring"""
if self.maximum_flow_algorithm is None:
raise Exception("""You need to set maximum flow algorithm before.""" )
if self.source_index is None or self.sink_index is None:
return 0
self.maximum_flow_algorithm.execute()
return self.maximum_flow_algorithm.getMaximumFlow()
def A__ ( self , __lowerCAmelCase ):
"""simple docstring"""
lowercase = algorithm(self )
class _A :
def __init__( self , __lowerCAmelCase ):
"""simple docstring"""
lowercase = flow_network
lowercase = flow_network.verticesCount
lowercase = flow_network.sourceIndex
lowercase = flow_network.sinkIndex
# it's just a reference, so you shouldn't change
# it in your algorithms, use deep copy before doing that
lowercase = flow_network.graph
lowercase = False
def A__ ( self ):
"""simple docstring"""
if not self.executed:
self._algorithm()
lowercase = True
def A__ ( self ):
"""simple docstring"""
pass
class _A ( lowerCAmelCase ):
def __init__( self , __lowerCAmelCase ):
"""simple docstring"""
super().__init__(__lowerCAmelCase )
# use this to save your result
lowercase = -1
def A__ ( self ):
"""simple docstring"""
if not self.executed:
raise Exception("""You should execute algorithm before using its result!""" )
return self.maximum_flow
class _A ( lowerCAmelCase ):
def __init__( self , __lowerCAmelCase ):
"""simple docstring"""
super().__init__(__lowerCAmelCase )
lowercase = [[0] * self.verticies_count for i in range(self.verticies_count )]
lowercase = [0] * self.verticies_count
lowercase = [0] * self.verticies_count
def A__ ( self ):
"""simple docstring"""
lowercase = self.verticies_count
# push some substance to graph
for nextvertex_index, bandwidth in enumerate(self.graph[self.source_index] ):
self.preflow[self.source_index][nextvertex_index] += bandwidth
self.preflow[nextvertex_index][self.source_index] -= bandwidth
self.excesses[nextvertex_index] += bandwidth
# Relabel-to-front selection rule
lowercase = [
i
for i in range(self.verticies_count )
if i != self.source_index and i != self.sink_index
]
# move through list
lowercase = 0
while i < len(__lowerCAmelCase ):
lowercase = vertices_list[i]
lowercase = self.heights[vertex_index]
self.process_vertex(__lowerCAmelCase )
if self.heights[vertex_index] > previous_height:
# if it was relabeled, swap elements
# and start from 0 index
vertices_list.insert(0 , vertices_list.pop(__lowerCAmelCase ) )
lowercase = 0
else:
i += 1
lowercase = sum(self.preflow[self.source_index] )
def A__ ( self , __lowerCAmelCase ):
"""simple docstring"""
while self.excesses[vertex_index] > 0:
for neighbour_index in range(self.verticies_count ):
# if it's neighbour and current vertex is higher
if (
self.graph[vertex_index][neighbour_index]
- self.preflow[vertex_index][neighbour_index]
> 0
and self.heights[vertex_index] > self.heights[neighbour_index]
):
self.push(__lowerCAmelCase , __lowerCAmelCase )
self.relabel(__lowerCAmelCase )
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
lowercase = min(
self.excesses[from_index] , self.graph[from_index][to_index] - self.preflow[from_index][to_index] , )
self.preflow[from_index][to_index] += preflow_delta
self.preflow[to_index][from_index] -= preflow_delta
self.excesses[from_index] -= preflow_delta
self.excesses[to_index] += preflow_delta
def A__ ( self , __lowerCAmelCase ):
"""simple docstring"""
lowercase = None
for to_index in range(self.verticies_count ):
if (
self.graph[vertex_index][to_index]
- self.preflow[vertex_index][to_index]
> 0
) and (min_height is None or self.heights[to_index] < min_height):
lowercase = self.heights[to_index]
if min_height is not None:
lowercase = min_height + 1
if __name__ == "__main__":
__lowerCAmelCase : int =[0]
__lowerCAmelCase : List[Any] =[3]
# graph = [
# [0, 0, 4, 6, 0, 0],
# [0, 0, 5, 2, 0, 0],
# [0, 0, 0, 0, 4, 4],
# [0, 0, 0, 0, 6, 6],
# [0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0],
# ]
__lowerCAmelCase : Optional[int] =[[0, 7, 0, 0], [0, 0, 6, 0], [0, 0, 0, 8], [9, 0, 0, 0]]
# prepare our network
__lowerCAmelCase : Tuple =FlowNetwork(graph, entrances, exits)
# set algorithm
flow_network.set_maximum_flow_algorithm(PushRelabelExecutor)
# and calculate
__lowerCAmelCase : Optional[int] =flow_network.find_maximum_flow()
print(F"""maximum flow is {maximum_flow}""")
| 32
| 1
|
'''simple docstring'''
# Lint as: python3
import itertools
import os
import re
lowerCAmelCase : Dict =re.compile(r'''([A-Z]+)([A-Z][a-z])''')
lowerCAmelCase : int =re.compile(r'''([a-z\d])([A-Z])''')
lowerCAmelCase : List[str] =re.compile(r'''(?<!_)_(?!_)''')
lowerCAmelCase : Optional[Any] =re.compile(r'''(_{2,})''')
lowerCAmelCase : Union[str, Any] =r"^\w+(\.\w+)*$"
lowerCAmelCase : Optional[int] =r"<>:/\|?*"
def UpperCAmelCase_ ( __lowerCamelCase : List[Any] ):
lowercase_ :Any = _uppercase_uppercase_re.sub(r"\1_\2" ,__lowerCAmelCase )
lowercase_ :Optional[Any] = _lowercase_uppercase_re.sub(r"\1_\2" ,__lowerCAmelCase )
return name.lower()
def UpperCAmelCase_ ( __lowerCamelCase : Tuple ):
lowercase_ :int = _single_underscore_re.split(__lowerCAmelCase )
lowercase_ :List[Any] = [_multiple_underscores_re.split(__lowerCAmelCase ) for n in name]
return "".join(n.capitalize() for n in itertools.chain.from_iterable(__lowerCAmelCase ) if n != "" )
def UpperCAmelCase_ ( __lowerCamelCase : Optional[int] ):
if os.path.basename(__lowerCAmelCase ) != name:
raise ValueError(F'Should be a dataset name, not a path: {name}' )
return camelcase_to_snakecase(__lowerCAmelCase )
def UpperCAmelCase_ ( __lowerCamelCase : Tuple ,__lowerCamelCase : str ):
if os.path.basename(__lowerCAmelCase ) != name:
raise ValueError(F'Should be a dataset name, not a path: {name}' )
if not re.match(_split_re ,__lowerCAmelCase ):
raise ValueError(F'Split name should match \'{_split_re}\'\' but got \'{split}\'.' )
return F'{filename_prefix_for_name(__lowerCAmelCase )}-{split}'
def UpperCAmelCase_ ( __lowerCamelCase : List[str] ,__lowerCamelCase : Optional[Any] ,__lowerCamelCase : Any ,__lowerCamelCase : List[Any]=None ):
lowercase_ :List[str] = filename_prefix_for_split(__lowerCAmelCase ,__lowerCAmelCase )
if filetype_suffix:
prefix += F'.{filetype_suffix}'
lowercase_ :List[str] = os.path.join(__lowerCAmelCase ,__lowerCAmelCase )
return F'{filepath}*'
def UpperCAmelCase_ ( __lowerCamelCase : Optional[int] ,__lowerCamelCase : str ,__lowerCamelCase : str ,__lowerCamelCase : Tuple=None ,__lowerCamelCase : Union[str, Any]=None ):
lowercase_ :Tuple = filename_prefix_for_split(__lowerCAmelCase ,__lowerCAmelCase )
lowercase_ :str = os.path.join(__lowerCAmelCase ,__lowerCAmelCase )
if shard_lengths:
lowercase_ :Optional[int] = len(__lowerCAmelCase )
lowercase_ :Any = [F'{prefix}-{shard_id:05d}-of-{num_shards:05d}' for shard_id in range(__lowerCAmelCase )]
if filetype_suffix:
lowercase_ :str = [filename + F'.{filetype_suffix}' for filename in filenames]
return filenames
else:
lowercase_ :int = prefix
if filetype_suffix:
filename += F'.{filetype_suffix}'
return [filename]
| 223
|
import copy
from typing import Dict, List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
SCREAMING_SNAKE_CASE__ : Dict = {
"facebook/mask2former-swin-small-coco-instance": (
"https://huggingface.co/facebook/mask2former-swin-small-coco-instance/blob/main/config.json"
)
# See all Mask2Former models at https://huggingface.co/models?filter=mask2former
}
SCREAMING_SNAKE_CASE__ : int = logging.get_logger(__name__)
class lowerCAmelCase__ ( __lowercase ):
a__ : Any = """mask2former"""
a__ : Dict = ["""swin"""]
a__ : Any = {"""hidden_size""": """hidden_dim"""}
def __init__( self : Any , SCREAMING_SNAKE_CASE__ : Optional[Dict] = None , SCREAMING_SNAKE_CASE__ : int = 2_56 , SCREAMING_SNAKE_CASE__ : int = 2_56 , SCREAMING_SNAKE_CASE__ : int = 2_56 , SCREAMING_SNAKE_CASE__ : int = 10_24 , SCREAMING_SNAKE_CASE__ : str = "relu" , SCREAMING_SNAKE_CASE__ : int = 6 , SCREAMING_SNAKE_CASE__ : int = 10 , SCREAMING_SNAKE_CASE__ : int = 8 , SCREAMING_SNAKE_CASE__ : float = 0.0 , SCREAMING_SNAKE_CASE__ : int = 20_48 , SCREAMING_SNAKE_CASE__ : bool = False , SCREAMING_SNAKE_CASE__ : bool = False , SCREAMING_SNAKE_CASE__ : int = 4 , SCREAMING_SNAKE_CASE__ : int = 2_55 , SCREAMING_SNAKE_CASE__ : int = 1_00 , SCREAMING_SNAKE_CASE__ : float = 0.1 , SCREAMING_SNAKE_CASE__ : float = 2.0 , SCREAMING_SNAKE_CASE__ : float = 5.0 , SCREAMING_SNAKE_CASE__ : float = 5.0 , SCREAMING_SNAKE_CASE__ : int = 1_25_44 , SCREAMING_SNAKE_CASE__ : float = 3.0 , SCREAMING_SNAKE_CASE__ : float = 0.75 , SCREAMING_SNAKE_CASE__ : float = 0.02 , SCREAMING_SNAKE_CASE__ : float = 1.0 , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : List[int] = [4, 8, 16, 32] , SCREAMING_SNAKE_CASE__ : bool = None , **SCREAMING_SNAKE_CASE__ : Tuple , ) -> str:
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `Swin` backbone.''' )
__lowerCamelCase = CONFIG_MAPPING['''swin'''](
image_size=2_24 , in_channels=3 , patch_size=4 , embed_dim=96 , depths=[2, 2, 18, 2] , num_heads=[3, 6, 12, 24] , window_size=7 , drop_path_rate=0.3 , use_absolute_embeddings=SCREAMING_SNAKE_CASE__ , out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] , )
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase = backbone_config.pop('''model_type''' )
__lowerCamelCase = CONFIG_MAPPING[backbone_model_type]
__lowerCamelCase = config_class.from_dict(SCREAMING_SNAKE_CASE__ )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
f'''Backbone {backbone_config.model_type} is not a supported model and may not be compatible with Mask2Former. '''
f'''Supported model types: {','.join(self.backbones_supported )}''' )
__lowerCamelCase = backbone_config
__lowerCamelCase = feature_size
__lowerCamelCase = mask_feature_size
__lowerCamelCase = hidden_dim
__lowerCamelCase = encoder_feedforward_dim
__lowerCamelCase = activation_function
__lowerCamelCase = encoder_layers
__lowerCamelCase = decoder_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = dropout
__lowerCamelCase = dim_feedforward
__lowerCamelCase = pre_norm
__lowerCamelCase = enforce_input_projection
__lowerCamelCase = common_stride
__lowerCamelCase = ignore_value
__lowerCamelCase = num_queries
__lowerCamelCase = no_object_weight
__lowerCamelCase = class_weight
__lowerCamelCase = mask_weight
__lowerCamelCase = dice_weight
__lowerCamelCase = train_num_points
__lowerCamelCase = oversample_ratio
__lowerCamelCase = importance_sample_ratio
__lowerCamelCase = init_std
__lowerCamelCase = init_xavier_std
__lowerCamelCase = use_auxiliary_loss
__lowerCamelCase = feature_strides
__lowerCamelCase = output_auxiliary_logits
__lowerCamelCase = decoder_layers
super().__init__(**SCREAMING_SNAKE_CASE__ )
@classmethod
def __A ( cls : Any , SCREAMING_SNAKE_CASE__ : PretrainedConfig , **SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> List[Any]:
return cls(
backbone_config=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
def __A ( self : Any ) -> Dict[str, any]:
__lowerCamelCase = copy.deepcopy(self.__dict__ )
__lowerCamelCase = self.backbone_config.to_dict()
__lowerCamelCase = self.__class__.model_type
return output
| 270
| 0
|
class SCREAMING_SNAKE_CASE__ :
def __init__( self , a):
lowercase__ : str = size
lowercase__ : Union[str, Any] = [0] * size
lowercase__ : Any = [0] * size
@staticmethod
def snake_case_ ( a):
return index | (index + 1)
@staticmethod
def snake_case_ ( a):
return (index & (index + 1)) - 1
def snake_case_ ( self , a , a):
lowercase__ : List[Any] = value
while index < self.size:
lowercase__ : Tuple = self.get_prev(a) + 1
if current_left_border == index:
lowercase__ : int = value
else:
lowercase__ : Tuple = max(a , a , a)
lowercase__ : str = self.get_next(a)
def snake_case_ ( self , a , a):
right -= 1 # Because of right is exclusive
lowercase__ : Optional[int] = 0
while left <= right:
lowercase__ : int = self.get_prev(a)
if left <= current_left:
lowercase__ : str = max(a , self.tree[right])
lowercase__ : Optional[Any] = current_left
else:
lowercase__ : List[Any] = max(a , self.arr[right])
right -= 1
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 216
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
snake_case_ = {'''configuration_encoder_decoder''': ['''EncoderDecoderConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ = ['''EncoderDecoderModel''']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ = ['''TFEncoderDecoderModel''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ = ['''FlaxEncoderDecoderModel''']
if TYPE_CHECKING:
from .configuration_encoder_decoder import EncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encoder_decoder import EncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_encoder_decoder import TFEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_encoder_decoder import FlaxEncoderDecoderModel
else:
import sys
snake_case_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 216
| 1
|
import argparse
import json
import logging
import os
import shutil
import sys
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.utils import write_basic_config
from transformers.testing_utils import TestCasePlus, get_gpu_count, run_command, slow, torch_device
from transformers.utils import is_apex_available
logging.basicConfig(level=logging.DEBUG)
snake_case : Union[str, Any] = logging.getLogger()
def lowerCAmelCase_ ( ) -> str:
'''simple docstring'''
__magic_name__ : Tuple = argparse.ArgumentParser()
parser.add_argument("-f" )
__magic_name__ : Any = parser.parse_args()
return args.f
def lowerCAmelCase_ ( _snake_case : Any ) -> Optional[int]:
'''simple docstring'''
__magic_name__ : str = {}
__magic_name__ : List[Any] = os.path.join(_snake_case , "all_results.json" )
if os.path.exists(_snake_case ):
with open(_snake_case , "r" ) as f:
__magic_name__ : Optional[Any] = json.load(_snake_case )
else:
raise ValueError(F'''can\'t find {path}''' )
return results
def lowerCAmelCase_ ( ) -> Optional[int]:
'''simple docstring'''
__magic_name__ : List[str] = torch.cuda.is_available() and torch_device == "cuda"
return is_using_cuda and is_apex_available()
snake_case : Dict = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class _snake_case ( snake_case ):
@classmethod
def SCREAMING_SNAKE_CASE ( cls ):
# Write Accelerate config, will pick up on CPU, GPU, and multi-GPU
__magic_name__ : Any = tempfile.mkdtemp()
__magic_name__ : List[Any] = os.path.join(cls.tmpdir , "default_config.yml" )
write_basic_config(save_location=cls.configPath )
__magic_name__ : Any = ["accelerate", "launch", "--config_file", cls.configPath]
@classmethod
def SCREAMING_SNAKE_CASE ( cls ):
shutil.rmtree(cls.tmpdir )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Union[str, Any] = self.get_auto_remove_tmp_dir()
__magic_name__ : str = f'''
{self.examples_dir}/pytorch/text-classification/run_glue_no_trainer.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--seed=42
--checkpointing_steps epoch
--with_tracking
'''.split()
if is_cuda_and_apex_available():
testargs.append("--fp16" )
run_command(self._launch_args + testargs )
__magic_name__ : Optional[Any] = get_results(_a )
self.assertGreaterEqual(result["eval_accuracy"] , 0.75 )
self.assertTrue(os.path.exists(os.path.join(_a , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(_a , "glue_no_trainer" ) ) )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : int = self.get_auto_remove_tmp_dir()
__magic_name__ : List[str] = f'''
{self.examples_dir}/pytorch/language-modeling/run_clm_no_trainer.py
--model_name_or_path distilgpt2
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--block_size 128
--per_device_train_batch_size 5
--per_device_eval_batch_size 5
--num_train_epochs 2
--output_dir {tmp_dir}
--checkpointing_steps epoch
--with_tracking
'''.split()
if torch.cuda.device_count() > 1:
# Skipping because there are not enough batches to train the model + would need a drop_last to work.
return
run_command(self._launch_args + testargs )
__magic_name__ : Union[str, Any] = get_results(_a )
self.assertLess(result["perplexity"] , 100 )
self.assertTrue(os.path.exists(os.path.join(_a , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(_a , "clm_no_trainer" ) ) )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Optional[Any] = self.get_auto_remove_tmp_dir()
__magic_name__ : Dict = f'''
{self.examples_dir}/pytorch/language-modeling/run_mlm_no_trainer.py
--model_name_or_path distilroberta-base
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--output_dir {tmp_dir}
--num_train_epochs=1
--checkpointing_steps epoch
--with_tracking
'''.split()
run_command(self._launch_args + testargs )
__magic_name__ : Tuple = get_results(_a )
self.assertLess(result["perplexity"] , 42 )
self.assertTrue(os.path.exists(os.path.join(_a , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(_a , "mlm_no_trainer" ) ) )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def SCREAMING_SNAKE_CASE ( self ):
# with so little data distributed training needs more epochs to get the score on par with 0/1 gpu
__magic_name__ : Union[str, Any] = 7 if get_gpu_count() > 1 else 2
__magic_name__ : Optional[Any] = self.get_auto_remove_tmp_dir()
__magic_name__ : Union[str, Any] = f'''
{self.examples_dir}/pytorch/token-classification/run_ner_no_trainer.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/conll/sample.json
--validation_file tests/fixtures/tests_samples/conll/sample.json
--output_dir {tmp_dir}
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=2
--num_train_epochs={epochs}
--seed 7
--checkpointing_steps epoch
--with_tracking
'''.split()
run_command(self._launch_args + testargs )
__magic_name__ : List[Any] = get_results(_a )
self.assertGreaterEqual(result["eval_accuracy"] , 0.75 )
self.assertLess(result["train_loss"] , 0.5 )
self.assertTrue(os.path.exists(os.path.join(_a , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(_a , "ner_no_trainer" ) ) )
@unittest.skip(reason="Fix me @muellerzr" )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : List[str] = self.get_auto_remove_tmp_dir()
__magic_name__ : List[Any] = f'''
{self.examples_dir}/pytorch/question-answering/run_qa_no_trainer.py
--model_name_or_path bert-base-uncased
--version_2_with_negative
--train_file tests/fixtures/tests_samples/SQUAD/sample.json
--validation_file tests/fixtures/tests_samples/SQUAD/sample.json
--output_dir {tmp_dir}
--seed=42
--max_train_steps=10
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
--with_tracking
'''.split()
run_command(self._launch_args + testargs )
__magic_name__ : Optional[Any] = get_results(_a )
# Because we use --version_2_with_negative the testing script uses SQuAD v2 metrics.
self.assertGreaterEqual(result["eval_f1"] , 28 )
self.assertGreaterEqual(result["eval_exact"] , 28 )
self.assertTrue(os.path.exists(os.path.join(_a , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(_a , "qa_no_trainer" ) ) )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : List[Any] = self.get_auto_remove_tmp_dir()
__magic_name__ : str = f'''
{self.examples_dir}/pytorch/multiple-choice/run_swag_no_trainer.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/swag/sample.json
--validation_file tests/fixtures/tests_samples/swag/sample.json
--output_dir {tmp_dir}
--max_train_steps=20
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--with_tracking
'''.split()
run_command(self._launch_args + testargs )
__magic_name__ : Tuple = get_results(_a )
self.assertGreaterEqual(result["eval_accuracy"] , 0.8 )
self.assertTrue(os.path.exists(os.path.join(_a , "swag_no_trainer" ) ) )
@slow
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : str = self.get_auto_remove_tmp_dir()
__magic_name__ : Tuple = f'''
{self.examples_dir}/pytorch/summarization/run_summarization_no_trainer.py
--model_name_or_path t5-small
--train_file tests/fixtures/tests_samples/xsum/sample.json
--validation_file tests/fixtures/tests_samples/xsum/sample.json
--output_dir {tmp_dir}
--max_train_steps=50
--num_warmup_steps=8
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
--with_tracking
'''.split()
run_command(self._launch_args + testargs )
__magic_name__ : List[Any] = get_results(_a )
self.assertGreaterEqual(result["eval_rouge1"] , 10 )
self.assertGreaterEqual(result["eval_rouge2"] , 2 )
self.assertGreaterEqual(result["eval_rougeL"] , 7 )
self.assertGreaterEqual(result["eval_rougeLsum"] , 7 )
self.assertTrue(os.path.exists(os.path.join(_a , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(_a , "summarization_no_trainer" ) ) )
@slow
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : List[str] = self.get_auto_remove_tmp_dir()
__magic_name__ : List[str] = f'''
{self.examples_dir}/pytorch/translation/run_translation_no_trainer.py
--model_name_or_path sshleifer/student_marian_en_ro_6_1
--source_lang en
--target_lang ro
--train_file tests/fixtures/tests_samples/wmt16/sample.json
--validation_file tests/fixtures/tests_samples/wmt16/sample.json
--output_dir {tmp_dir}
--max_train_steps=50
--num_warmup_steps=8
--num_beams=6
--learning_rate=3e-3
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--source_lang en_XX
--target_lang ro_RO
--checkpointing_steps epoch
--with_tracking
'''.split()
run_command(self._launch_args + testargs )
__magic_name__ : Dict = get_results(_a )
self.assertGreaterEqual(result["eval_bleu"] , 30 )
self.assertTrue(os.path.exists(os.path.join(_a , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(_a , "translation_no_trainer" ) ) )
@slow
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Dict = logging.StreamHandler(sys.stdout )
logger.addHandler(_a )
__magic_name__ : str = self.get_auto_remove_tmp_dir()
__magic_name__ : int = f'''
{self.examples_dir}/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py
--dataset_name huggingface/semantic-segmentation-test-sample
--output_dir {tmp_dir}
--max_train_steps=10
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
'''.split()
run_command(self._launch_args + testargs )
__magic_name__ : Union[str, Any] = get_results(_a )
self.assertGreaterEqual(result["eval_overall_accuracy"] , 0.10 )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : List[Any] = self.get_auto_remove_tmp_dir()
__magic_name__ : Tuple = f'''
{self.examples_dir}/pytorch/image-classification/run_image_classification_no_trainer.py
--model_name_or_path google/vit-base-patch16-224-in21k
--dataset_name hf-internal-testing/cats_vs_dogs_sample
--learning_rate 1e-4
--per_device_train_batch_size 2
--per_device_eval_batch_size 1
--max_train_steps 2
--train_val_split 0.1
--seed 42
--output_dir {tmp_dir}
--with_tracking
--checkpointing_steps 1
'''.split()
if is_cuda_and_apex_available():
testargs.append("--fp16" )
run_command(self._launch_args + testargs )
__magic_name__ : List[str] = get_results(_a )
# The base model scores a 25%
self.assertGreaterEqual(result["eval_accuracy"] , 0.6 )
self.assertTrue(os.path.exists(os.path.join(_a , "step_1" ) ) )
self.assertTrue(os.path.exists(os.path.join(_a , "image_classification_no_trainer" ) ) )
| 281
|
import argparse
import csv
import logging
import os
import random
import numpy as np
import torch
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from tqdm import tqdm, trange
from transformers import (
CONFIG_NAME,
WEIGHTS_NAME,
AdamW,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTTokenizer,
get_linear_schedule_with_warmup,
)
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO
)
snake_case : Optional[int] = logging.getLogger(__name__)
def lowerCAmelCase_ ( _snake_case : Dict , _snake_case : Union[str, Any] ) -> Tuple:
'''simple docstring'''
__magic_name__ : List[str] = np.argmax(_snake_case , axis=1 )
return np.sum(outputs == labels )
def lowerCAmelCase_ ( _snake_case : Optional[Any] ) -> Dict:
'''simple docstring'''
with open(_snake_case , encoding="utf_8" ) as f:
__magic_name__ : List[str] = csv.reader(_snake_case )
__magic_name__ : List[Any] = []
next(_snake_case ) # skip the first line
for line in tqdm(_snake_case ):
output.append((" ".join(line[1:5] ), line[5], line[6], int(line[-1] ) - 1) )
return output
def lowerCAmelCase_ ( _snake_case : str , _snake_case : Tuple , _snake_case : Union[str, Any] , _snake_case : List[Any] , _snake_case : Tuple , _snake_case : Optional[int] ) -> int:
'''simple docstring'''
__magic_name__ : Optional[int] = []
for dataset in encoded_datasets:
__magic_name__ : Union[str, Any] = len(_snake_case )
__magic_name__ : Dict = np.zeros((n_batch, 2, input_len) , dtype=np.intaa )
__magic_name__ : List[str] = np.zeros((n_batch, 2) , dtype=np.intaa )
__magic_name__ : Optional[int] = np.full((n_batch, 2, input_len) , fill_value=-100 , dtype=np.intaa )
__magic_name__ : int = np.zeros((n_batch,) , dtype=np.intaa )
for (
i,
(story, conta, conta, mc_label),
) in enumerate(_snake_case ):
__magic_name__ : Dict = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
__magic_name__ : Dict = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
__magic_name__ : str = with_conta
__magic_name__ : Tuple = with_conta
__magic_name__ : Union[str, Any] = len(_snake_case ) - 1
__magic_name__ : int = len(_snake_case ) - 1
__magic_name__ : Optional[Any] = with_conta
__magic_name__ : Optional[Any] = with_conta
__magic_name__ : Optional[int] = mc_label
__magic_name__ : str = (input_ids, mc_token_ids, lm_labels, mc_labels)
tensor_datasets.append(tuple(torch.tensor(_snake_case ) for t in all_inputs ) )
return tensor_datasets
def lowerCAmelCase_ ( ) -> List[Any]:
'''simple docstring'''
__magic_name__ : Any = argparse.ArgumentParser()
parser.add_argument("--model_name" , type=_snake_case , default="openai-gpt" , help="pretrained model name" )
parser.add_argument("--do_train" , action="store_true" , help="Whether to run training." )
parser.add_argument("--do_eval" , action="store_true" , help="Whether to run eval on the dev set." )
parser.add_argument(
"--output_dir" , default=_snake_case , type=_snake_case , required=_snake_case , help="The output directory where the model predictions and checkpoints will be written." , )
parser.add_argument("--train_dataset" , type=_snake_case , default="" )
parser.add_argument("--eval_dataset" , type=_snake_case , default="" )
parser.add_argument("--seed" , type=_snake_case , default=42 )
parser.add_argument("--num_train_epochs" , type=_snake_case , default=3 )
parser.add_argument("--train_batch_size" , type=_snake_case , default=8 )
parser.add_argument("--eval_batch_size" , type=_snake_case , default=16 )
parser.add_argument("--adam_epsilon" , default=1E-8 , type=_snake_case , help="Epsilon for Adam optimizer." )
parser.add_argument("--max_grad_norm" , type=_snake_case , default=1 )
parser.add_argument(
"--max_steps" , default=-1 , type=_snake_case , help=(
"If > 0: set total number of training steps to perform. Override num_train_epochs."
) , )
parser.add_argument(
"--gradient_accumulation_steps" , type=_snake_case , default=1 , help="Number of updates steps to accumulate before performing a backward/update pass." , )
parser.add_argument("--learning_rate" , type=_snake_case , default=6.25E-5 )
parser.add_argument("--warmup_steps" , default=0 , type=_snake_case , help="Linear warmup over warmup_steps." )
parser.add_argument("--lr_schedule" , type=_snake_case , default="warmup_linear" )
parser.add_argument("--weight_decay" , type=_snake_case , default=0.01 )
parser.add_argument("--lm_coef" , type=_snake_case , default=0.9 )
parser.add_argument("--n_valid" , type=_snake_case , default=374 )
parser.add_argument("--server_ip" , type=_snake_case , default="" , help="Can be used for distant debugging." )
parser.add_argument("--server_port" , type=_snake_case , default="" , help="Can be used for distant debugging." )
__magic_name__ : List[Any] = parser.parse_args()
print(_snake_case )
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("Waiting for debugger attach" )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=_snake_case )
ptvsd.wait_for_attach()
random.seed(args.seed )
np.random.seed(args.seed )
torch.manual_seed(args.seed )
torch.cuda.manual_seed_all(args.seed )
__magic_name__ : Dict = torch.device("cuda" if torch.cuda.is_available() else "cpu" )
__magic_name__ : Optional[int] = torch.cuda.device_count()
logger.info("device: {}, n_gpu {}".format(_snake_case , _snake_case ) )
if not args.do_train and not args.do_eval:
raise ValueError("At least one of `do_train` or `do_eval` must be True." )
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
# Load tokenizer and model
# This loading functions also add new tokens and embeddings called `special tokens`
# These new embeddings will be fine-tuned on the RocStories dataset
__magic_name__ : List[Any] = ["_start_", "_delimiter_", "_classify_"]
__magic_name__ : Optional[int] = OpenAIGPTTokenizer.from_pretrained(args.model_name )
tokenizer.add_tokens(_snake_case )
__magic_name__ : Optional[Any] = tokenizer.convert_tokens_to_ids(_snake_case )
__magic_name__ : List[str] = OpenAIGPTDoubleHeadsModel.from_pretrained(args.model_name )
model.resize_token_embeddings(len(_snake_case ) )
model.to(_snake_case )
# Load and encode the datasets
def tokenize_and_encode(_snake_case : str ):
if isinstance(_snake_case , _snake_case ):
return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(_snake_case ) )
elif isinstance(_snake_case , _snake_case ):
return obj
return [tokenize_and_encode(_snake_case ) for o in obj]
logger.info("Encoding dataset..." )
__magic_name__ : Optional[int] = load_rocstories_dataset(args.train_dataset )
__magic_name__ : str = load_rocstories_dataset(args.eval_dataset )
__magic_name__ : int = (train_dataset, eval_dataset)
__magic_name__ : List[str] = tokenize_and_encode(_snake_case )
# Compute the max input length for the Transformer
__magic_name__ : Optional[Any] = model.config.n_positions // 2 - 2
__magic_name__ : Optional[int] = max(
len(story[:max_length] ) + max(len(conta[:max_length] ) , len(conta[:max_length] ) ) + 3
for dataset in encoded_datasets
for story, conta, conta, _ in dataset )
__magic_name__ : List[str] = min(_snake_case , model.config.n_positions ) # Max size of input for the pre-trained model
# Prepare inputs tensors and dataloaders
__magic_name__ : List[Any] = pre_process_datasets(_snake_case , _snake_case , _snake_case , *_snake_case )
__magic_name__ , __magic_name__ : Optional[int] = tensor_datasets[0], tensor_datasets[1]
__magic_name__ : Tuple = TensorDataset(*_snake_case )
__magic_name__ : Union[str, Any] = RandomSampler(_snake_case )
__magic_name__ : Dict = DataLoader(_snake_case , sampler=_snake_case , batch_size=args.train_batch_size )
__magic_name__ : Any = TensorDataset(*_snake_case )
__magic_name__ : Optional[Any] = SequentialSampler(_snake_case )
__magic_name__ : int = DataLoader(_snake_case , sampler=_snake_case , batch_size=args.eval_batch_size )
# Prepare optimizer
if args.do_train:
if args.max_steps > 0:
__magic_name__ : Tuple = args.max_steps
__magic_name__ : List[str] = args.max_steps // (len(_snake_case ) // args.gradient_accumulation_steps) + 1
else:
__magic_name__ : List[str] = len(_snake_case ) // args.gradient_accumulation_steps * args.num_train_epochs
__magic_name__ : str = list(model.named_parameters() )
__magic_name__ : Dict = ["bias", "LayerNorm.bias", "LayerNorm.weight"]
__magic_name__ : str = [
{
"params": [p for n, p in param_optimizer if not any(nd in n for nd in no_decay )],
"weight_decay": args.weight_decay,
},
{"params": [p for n, p in param_optimizer if any(nd in n for nd in no_decay )], "weight_decay": 0.0},
]
__magic_name__ : str = AdamW(_snake_case , lr=args.learning_rate , eps=args.adam_epsilon )
__magic_name__ : List[str] = get_linear_schedule_with_warmup(
_snake_case , num_warmup_steps=args.warmup_steps , num_training_steps=_snake_case )
if args.do_train:
__magic_name__ , __magic_name__ , __magic_name__ : Union[str, Any] = 0, 0, None
model.train()
for _ in trange(int(args.num_train_epochs ) , desc="Epoch" ):
__magic_name__ : List[str] = 0
__magic_name__ : Tuple = 0
__magic_name__ : Dict = tqdm(_snake_case , desc="Training" )
for step, batch in enumerate(_snake_case ):
__magic_name__ : Optional[Any] = tuple(t.to(_snake_case ) for t in batch )
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ : Dict = batch
__magic_name__ : Optional[Any] = model(_snake_case , mc_token_ids=_snake_case , lm_labels=_snake_case , mc_labels=_snake_case )
__magic_name__ : Optional[Any] = args.lm_coef * losses[0] + losses[1]
loss.backward()
optimizer.step()
scheduler.step()
optimizer.zero_grad()
tr_loss += loss.item()
__magic_name__ : List[str] = (
loss.item() if exp_average_loss is None else 0.7 * exp_average_loss + 0.3 * loss.item()
)
nb_tr_steps += 1
__magic_name__ : int = "Training loss: {:.2e} lr: {:.2e}".format(_snake_case , scheduler.get_lr()[0] )
# Save a trained model
if args.do_train:
# Save a trained model, configuration and tokenizer
__magic_name__ : Dict = model.module if hasattr(_snake_case , "module" ) else model # Only save the model itself
# If we save using the predefined names, we can load using `from_pretrained`
__magic_name__ : List[Any] = os.path.join(args.output_dir , _snake_case )
__magic_name__ : Dict = os.path.join(args.output_dir , _snake_case )
torch.save(model_to_save.state_dict() , _snake_case )
model_to_save.config.to_json_file(_snake_case )
tokenizer.save_vocabulary(args.output_dir )
# Load a trained model and vocabulary that you have fine-tuned
__magic_name__ : Dict = OpenAIGPTDoubleHeadsModel.from_pretrained(args.output_dir )
__magic_name__ : Optional[int] = OpenAIGPTTokenizer.from_pretrained(args.output_dir )
model.to(_snake_case )
if args.do_eval:
model.eval()
__magic_name__ , __magic_name__ : Any = 0, 0
__magic_name__ , __magic_name__ : Union[str, Any] = 0, 0
for batch in tqdm(_snake_case , desc="Evaluating" ):
__magic_name__ : int = tuple(t.to(_snake_case ) for t in batch )
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ : Union[str, Any] = batch
with torch.no_grad():
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ : Dict = model(
_snake_case , mc_token_ids=_snake_case , lm_labels=_snake_case , mc_labels=_snake_case )
__magic_name__ : Tuple = mc_logits.detach().cpu().numpy()
__magic_name__ : Any = mc_labels.to("cpu" ).numpy()
__magic_name__ : str = accuracy(_snake_case , _snake_case )
eval_loss += mc_loss.mean().item()
eval_accuracy += tmp_eval_accuracy
nb_eval_examples += input_ids.size(0 )
nb_eval_steps += 1
__magic_name__ : Tuple = eval_loss / nb_eval_steps
__magic_name__ : List[Any] = eval_accuracy / nb_eval_examples
__magic_name__ : int = tr_loss / nb_tr_steps if args.do_train else None
__magic_name__ : Any = {"eval_loss": eval_loss, "eval_accuracy": eval_accuracy, "train_loss": train_loss}
__magic_name__ : int = os.path.join(args.output_dir , "eval_results.txt" )
with open(_snake_case , "w" ) as writer:
logger.info("***** Eval results *****" )
for key in sorted(result.keys() ):
logger.info(" %s = %s" , _snake_case , str(result[key] ) )
writer.write("%s = %s\n" % (key, str(result[key] )) )
if __name__ == "__main__":
main()
| 281
| 1
|
"""simple docstring"""
import sys
def __SCREAMING_SNAKE_CASE ( lowercase__ ):
"""simple docstring"""
A = len(a__ )
A = [[0 for x in range(a__ )] for x in range(a__ )]
A = [[0 for x in range(a__ )] for x in range(a__ )]
for chain_length in range(2 , a__ ):
for a in range(1 , n - chain_length + 1 ):
A = a + chain_length - 1
A = sys.maxsize
for c in range(a__ , a__ ):
A = (
matrix[a][c] + matrix[c + 1][b] + array[a - 1] * array[c] * array[b]
)
if cost < matrix[a][b]:
A = cost
A = c
return matrix, sol
def __SCREAMING_SNAKE_CASE ( lowercase__ , lowercase__ , lowercase__ ):
"""simple docstring"""
if i == j:
print("A" + str(a__ ) , end=" " )
else:
print("(" , end=" " )
print_optiomal_solution(a__ , a__ , optimal_solution[i][j] )
print_optiomal_solution(a__ , optimal_solution[i][j] + 1 , a__ )
print(")" , end=" " )
def __SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
A = [30, 35, 15, 5, 10, 20, 25]
A = len(a__ )
# Size of matrix created from above array will be
# 30*35 35*15 15*5 5*10 10*20 20*25
A , A = matrix_chain_order(a__ )
print("No. of Operation required: " + str(matrix[1][n - 1] ) )
print_optiomal_solution(a__ , 1 , n - 1 )
if __name__ == "__main__":
main()
| 361
|
"""simple docstring"""
from __future__ import annotations
class __UpperCamelCase :
def __init__(self : Optional[Any] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str):
A , A = text, pattern
A , A = len(__SCREAMING_SNAKE_CASE), len(__SCREAMING_SNAKE_CASE)
def SCREAMING_SNAKE_CASE__ (self : Union[str, Any] , __SCREAMING_SNAKE_CASE : str):
for i in range(self.patLen - 1 , -1 , -1):
if char == self.pattern[i]:
return i
return -1
def SCREAMING_SNAKE_CASE__ (self : str , __SCREAMING_SNAKE_CASE : int):
for i in range(self.patLen - 1 , -1 , -1):
if self.pattern[i] != self.text[current_pos + i]:
return current_pos + i
return -1
def SCREAMING_SNAKE_CASE__ (self : List[Any]):
# searches pattern in text and returns index positions
A = []
for i in range(self.textLen - self.patLen + 1):
A = self.mismatch_in_text(__SCREAMING_SNAKE_CASE)
if mismatch_index == -1:
positions.append(__SCREAMING_SNAKE_CASE)
else:
A = self.match_in_pattern(self.text[mismatch_index])
A = (
mismatch_index - match_index
) # shifting index lgtm [py/multiple-definition]
return positions
__A : int = 'ABAABA'
__A : Optional[Any] = 'AB'
__A : Any = BoyerMooreSearch(text, pattern)
__A : Any = bms.bad_character_heuristic()
if len(positions) == 0:
print('No match found')
else:
print('Pattern found in following positions: ')
print(positions)
| 57
| 0
|
import uuid
from typing import Any, Dict, List, Optional, Union
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
__lowerCAmelCase : str = logging.get_logger(__name__)
class UpperCAmelCase_ :
'''simple docstring'''
def __init__( self : Optional[int] , UpperCamelCase__ : str = None , UpperCamelCase__ : uuid.UUID = None , UpperCamelCase__ : Any=None , UpperCamelCase__ : List[str]=None ) -> List[str]:
"""simple docstring"""
if not conversation_id:
__magic_name__ = uuid.uuida()
if past_user_inputs is None:
__magic_name__ = []
if generated_responses is None:
__magic_name__ = []
__magic_name__ = conversation_id
__magic_name__ = past_user_inputs
__magic_name__ = generated_responses
__magic_name__ = text
def __eq__( self : Union[str, Any] , UpperCamelCase__ : List[Any] ) -> List[str]:
"""simple docstring"""
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
return False
if self.uuid == other.uuid:
return True
return (
self.new_user_input == other.new_user_input
and self.past_user_inputs == other.past_user_inputs
and self.generated_responses == other.generated_responses
)
def _lowercase ( self : Optional[int] , UpperCamelCase__ : str , UpperCamelCase__ : bool = False ) -> Tuple:
"""simple docstring"""
if self.new_user_input:
if overwrite:
logger.warning(
F'''User input added while unprocessed input was existing: "{self.new_user_input}" was overwritten '''
F'''with: "{text}".''' )
__magic_name__ = text
else:
logger.warning(
F'''User input added while unprocessed input was existing: "{self.new_user_input}" new input '''
F'''ignored: "{text}". Set `overwrite` to True to overwrite unprocessed user input''' )
else:
__magic_name__ = text
def _lowercase ( self : Union[str, Any] ) -> str:
"""simple docstring"""
if self.new_user_input:
self.past_user_inputs.append(self.new_user_input )
__magic_name__ = None
def _lowercase ( self : Optional[Any] , UpperCamelCase__ : str ) -> Optional[int]:
"""simple docstring"""
self.generated_responses.append(UpperCamelCase__ )
def _lowercase ( self : Tuple ) -> List[Any]:
"""simple docstring"""
for user_input, generated_response in zip(self.past_user_inputs , self.generated_responses ):
yield True, user_input
yield False, generated_response
if self.new_user_input:
yield True, self.new_user_input
def __repr__( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
__magic_name__ = F'''Conversation id: {self.uuid} \n'''
for is_user, text in self.iter_texts():
__magic_name__ = """user""" if is_user else """bot"""
output += F'''{name} >> {text} \n'''
return output
@add_end_docstrings(
_A , R"""
min_length_for_response (`int`, *optional*, defaults to 32):
The minimum length (in number of tokens) for a response.
minimum_tokens (`int`, *optional*, defaults to 10):
The minimum length of tokens to leave for a response.
""" , )
class UpperCAmelCase_ ( _A ):
'''simple docstring'''
def __init__( self : List[str] , *UpperCamelCase__ : List[Any] , **UpperCamelCase__ : List[Any] ) -> str:
"""simple docstring"""
super().__init__(*UpperCamelCase__ , **UpperCamelCase__ )
if self.tokenizer.pad_token_id is None:
__magic_name__ = self.tokenizer.eos_token
def _lowercase ( self : str , UpperCamelCase__ : Optional[Any]=None , UpperCamelCase__ : Tuple=None , UpperCamelCase__ : Tuple=None , **UpperCamelCase__ : Tuple ) -> Any:
"""simple docstring"""
__magic_name__ = {}
__magic_name__ = {}
__magic_name__ = {}
if min_length_for_response is not None:
__magic_name__ = min_length_for_response
if minimum_tokens is not None:
__magic_name__ = minimum_tokens
if "max_length" in generate_kwargs:
__magic_name__ = generate_kwargs["""max_length"""]
# self.max_length = generate_kwargs.get("max_length", self.model.config.max_length)
if clean_up_tokenization_spaces is not None:
__magic_name__ = clean_up_tokenization_spaces
if generate_kwargs:
forward_params.update(UpperCamelCase__ )
return preprocess_params, forward_params, postprocess_params
def __call__( self : int , UpperCamelCase__ : Union[Conversation, List[Conversation]] , UpperCamelCase__ : Any=0 , **UpperCamelCase__ : List[Any] ) -> str:
"""simple docstring"""
__magic_name__ = super().__call__(UpperCamelCase__ , num_workers=UpperCamelCase__ , **UpperCamelCase__ )
if isinstance(UpperCamelCase__ , UpperCamelCase__ ) and len(UpperCamelCase__ ) == 1:
return outputs[0]
return outputs
def _lowercase ( self : List[Any] , UpperCamelCase__ : Conversation , UpperCamelCase__ : Union[str, Any]=32 ) -> Dict[str, Any]:
"""simple docstring"""
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
raise ValueError("""ConversationalPipeline, expects Conversation as inputs""" )
if conversation.new_user_input is None:
raise ValueError(
F'''Conversation with UUID {type(conversation.uuid )} does not contain new user input to process. '''
"""Add user inputs with the conversation's `add_user_input` method""" )
if hasattr(self.tokenizer , """_build_conversation_input_ids""" ):
__magic_name__ = self.tokenizer._build_conversation_input_ids(UpperCamelCase__ )
else:
# If the tokenizer cannot handle conversations, we default to only the old version
__magic_name__ = self._legacy_parse_and_tokenize(UpperCamelCase__ )
if self.framework == "pt":
__magic_name__ = torch.LongTensor([input_ids] )
elif self.framework == "tf":
__magic_name__ = tf.constant([input_ids] )
return {"input_ids": input_ids, "conversation": conversation}
def _lowercase ( self : Dict , UpperCamelCase__ : Dict , UpperCamelCase__ : List[str]=10 , **UpperCamelCase__ : int ) -> Dict:
"""simple docstring"""
__magic_name__ = generate_kwargs.get("""max_length""" , self.model.config.max_length )
__magic_name__ = model_inputs["""input_ids"""].shape[1]
if max_length - minimum_tokens < n:
logger.warning(F'''Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})''' )
__magic_name__ = max_length - minimum_tokens
__magic_name__ = model_inputs["""input_ids"""][:, -trim:]
if "attention_mask" in model_inputs:
__magic_name__ = model_inputs["""attention_mask"""][:, -trim:]
__magic_name__ = model_inputs.pop("""conversation""" )
__magic_name__ = max_length
__magic_name__ = self.model.generate(**UpperCamelCase__ , **UpperCamelCase__ )
if self.model.config.is_encoder_decoder:
__magic_name__ = 1
else:
__magic_name__ = n
return {"output_ids": output_ids[:, start_position:], "conversation": conversation}
def _lowercase ( self : Optional[int] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : List[str]=True ) -> str:
"""simple docstring"""
__magic_name__ = model_outputs["""output_ids"""]
__magic_name__ = self.tokenizer.decode(
output_ids[0] , skip_special_tokens=UpperCamelCase__ , clean_up_tokenization_spaces=UpperCamelCase__ , )
__magic_name__ = model_outputs["""conversation"""]
conversation.mark_processed()
conversation.append_response(UpperCamelCase__ )
return conversation
def _lowercase ( self : List[Any] , UpperCamelCase__ : Conversation ) -> Dict:
"""simple docstring"""
__magic_name__ = self.tokenizer.eos_token_id
__magic_name__ = []
for is_user, text in conversation.iter_texts():
if eos_token_id is not None:
input_ids.extend(self.tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ ) + [eos_token_id] )
else:
input_ids.extend(self.tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ ) )
if len(UpperCamelCase__ ) > self.tokenizer.model_max_length:
__magic_name__ = input_ids[-self.tokenizer.model_max_length :]
return input_ids
| 88
|
import unittest
from transformers import (
MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TextaTextGenerationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, require_tf, require_torch
from transformers.utils import is_torch_available
from .test_pipelines_common import ANY
if is_torch_available():
import torch
@is_pipeline_test
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
a__ = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
a__ = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
def _lowercase ( self : str , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : int , UpperCamelCase__ : int ) -> Tuple:
"""simple docstring"""
__magic_name__ = TextaTextGenerationPipeline(model=UpperCamelCase__ , tokenizer=UpperCamelCase__ )
return generator, ["Something to write", "Something else"]
def _lowercase ( self : Any , UpperCamelCase__ : Tuple , UpperCamelCase__ : List[str] ) -> Optional[Any]:
"""simple docstring"""
__magic_name__ = generator("""Something there""" )
self.assertEqual(UpperCamelCase__ , [{"""generated_text""": ANY(UpperCamelCase__ )}] )
# These are encoder decoder, they don't just append to incoming string
self.assertFalse(outputs[0]["""generated_text"""].startswith("""Something there""" ) )
__magic_name__ = generator(["""This is great !""", """Something else"""] , num_return_sequences=2 , do_sample=UpperCamelCase__ )
self.assertEqual(
UpperCamelCase__ , [
[{"""generated_text""": ANY(UpperCamelCase__ )}, {"""generated_text""": ANY(UpperCamelCase__ )}],
[{"""generated_text""": ANY(UpperCamelCase__ )}, {"""generated_text""": ANY(UpperCamelCase__ )}],
] , )
__magic_name__ = generator(
["""This is great !""", """Something else"""] , num_return_sequences=2 , batch_size=2 , do_sample=UpperCamelCase__ )
self.assertEqual(
UpperCamelCase__ , [
[{"""generated_text""": ANY(UpperCamelCase__ )}, {"""generated_text""": ANY(UpperCamelCase__ )}],
[{"""generated_text""": ANY(UpperCamelCase__ )}, {"""generated_text""": ANY(UpperCamelCase__ )}],
] , )
with self.assertRaises(UpperCamelCase__ ):
generator(4 )
@require_torch
def _lowercase ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
__magic_name__ = pipeline("""text2text-generation""" , model="""patrickvonplaten/t5-tiny-random""" , framework="""pt""" )
# do_sample=False necessary for reproducibility
__magic_name__ = generator("""Something there""" , do_sample=UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , [{"""generated_text""": """"""}] )
__magic_name__ = 3
__magic_name__ = generator(
"""Something there""" , num_return_sequences=UpperCamelCase__ , num_beams=UpperCamelCase__ , )
__magic_name__ = [
{"""generated_text""": """Beide Beide Beide Beide Beide Beide Beide Beide Beide"""},
{"""generated_text""": """Beide Beide Beide Beide Beide Beide Beide Beide"""},
{"""generated_text""": """"""},
]
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
__magic_name__ = generator("""This is a test""" , do_sample=UpperCamelCase__ , num_return_sequences=2 , return_tensors=UpperCamelCase__ )
self.assertEqual(
UpperCamelCase__ , [
{"""generated_token_ids""": ANY(torch.Tensor )},
{"""generated_token_ids""": ANY(torch.Tensor )},
] , )
__magic_name__ = generator.model.config.eos_token_id
__magic_name__ = """<pad>"""
__magic_name__ = generator(
["""This is a test""", """This is a second test"""] , do_sample=UpperCamelCase__ , num_return_sequences=2 , batch_size=2 , return_tensors=UpperCamelCase__ , )
self.assertEqual(
UpperCamelCase__ , [
[
{"""generated_token_ids""": ANY(torch.Tensor )},
{"""generated_token_ids""": ANY(torch.Tensor )},
],
[
{"""generated_token_ids""": ANY(torch.Tensor )},
{"""generated_token_ids""": ANY(torch.Tensor )},
],
] , )
@require_tf
def _lowercase ( self : int ) -> str:
"""simple docstring"""
__magic_name__ = pipeline("""text2text-generation""" , model="""patrickvonplaten/t5-tiny-random""" , framework="""tf""" )
# do_sample=False necessary for reproducibility
__magic_name__ = generator("""Something there""" , do_sample=UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , [{"""generated_text""": """"""}] )
| 88
| 1
|
import os
import string
import sys
A_ : List[str] = 1 << 8
A_ : Any = {
"""tab""": ord('\t'),
"""newline""": ord('\r'),
"""esc""": 27,
"""up""": 65 + ARROW_KEY_FLAG,
"""down""": 66 + ARROW_KEY_FLAG,
"""right""": 67 + ARROW_KEY_FLAG,
"""left""": 68 + ARROW_KEY_FLAG,
"""mod_int""": 91,
"""undefined""": sys.maxsize,
"""interrupt""": 3,
"""insert""": 50,
"""delete""": 51,
"""pg_up""": 53,
"""pg_down""": 54,
}
A_ : Any = KEYMAP["""up"""]
A_ : Tuple = KEYMAP["""left"""]
if sys.platform == "win32":
A_ : Tuple = []
A_ : str = {
b"""\xe0H""": KEYMAP["""up"""] - ARROW_KEY_FLAG,
b"""\x00H""": KEYMAP["""up"""] - ARROW_KEY_FLAG,
b"""\xe0P""": KEYMAP["""down"""] - ARROW_KEY_FLAG,
b"""\x00P""": KEYMAP["""down"""] - ARROW_KEY_FLAG,
b"""\xe0M""": KEYMAP["""right"""] - ARROW_KEY_FLAG,
b"""\x00M""": KEYMAP["""right"""] - ARROW_KEY_FLAG,
b"""\xe0K""": KEYMAP["""left"""] - ARROW_KEY_FLAG,
b"""\x00K""": KEYMAP["""left"""] - ARROW_KEY_FLAG,
}
for i in range(10):
A_ : str = ord(str(i))
def UpperCamelCase () -> Optional[Any]:
if os.name == "nt":
import msvcrt
A__ : Union[str, Any] = """mbcs"""
# Flush the keyboard buffer
while msvcrt.kbhit():
msvcrt.getch()
if len(SCREAMING_SNAKE_CASE_ ) == 0:
# Read the keystroke
A__ : List[Any] = msvcrt.getch()
# If it is a prefix char, get second part
if ch in (b"\x00", b"\xe0"):
A__ : str = ch + msvcrt.getch()
# Translate actual Win chars to bullet char types
try:
A__ : List[str] = chr(WIN_KEYMAP[cha] )
WIN_CH_BUFFER.append(chr(KEYMAP["""mod_int"""] ) )
WIN_CH_BUFFER.append(SCREAMING_SNAKE_CASE_ )
if ord(SCREAMING_SNAKE_CASE_ ) in (
KEYMAP["insert"] - 1 << 9,
KEYMAP["delete"] - 1 << 9,
KEYMAP["pg_up"] - 1 << 9,
KEYMAP["pg_down"] - 1 << 9,
):
WIN_CH_BUFFER.append(chr(126 ) )
A__ : str = chr(KEYMAP["""esc"""] )
except KeyError:
A__ : str = cha[1]
else:
A__ : Union[str, Any] = ch.decode(SCREAMING_SNAKE_CASE_ )
else:
A__ : Any = WIN_CH_BUFFER.pop(0 )
elif os.name == "posix":
import termios
import tty
A__ : List[str] = sys.stdin.fileno()
A__ : List[Any] = termios.tcgetattr(SCREAMING_SNAKE_CASE_ )
try:
tty.setraw(SCREAMING_SNAKE_CASE_ )
A__ : Dict = sys.stdin.read(1 )
finally:
termios.tcsetattr(SCREAMING_SNAKE_CASE_ , termios.TCSADRAIN , SCREAMING_SNAKE_CASE_ )
return ch
def UpperCamelCase () -> List[Any]:
A__ : List[str] = get_raw_chars()
if ord(SCREAMING_SNAKE_CASE_ ) in [KEYMAP["interrupt"], KEYMAP["newline"]]:
return char
elif ord(SCREAMING_SNAKE_CASE_ ) == KEYMAP["esc"]:
A__ : List[str] = get_raw_chars()
if ord(SCREAMING_SNAKE_CASE_ ) == KEYMAP["mod_int"]:
A__ : Dict = get_raw_chars()
if ord(SCREAMING_SNAKE_CASE_ ) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(SCREAMING_SNAKE_CASE_ ) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG:
return chr(ord(SCREAMING_SNAKE_CASE_ ) + ARROW_KEY_FLAG )
else:
return KEYMAP["undefined"]
else:
return get_raw_chars()
else:
if char in string.printable:
return char
else:
return KEYMAP["undefined"]
| 366
|
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
A_ : Optional[Any] = 16
A_ : Optional[int] = 32
def UpperCamelCase (lowercase_: Accelerator , lowercase_: int = 16 , lowercase_: str = "bert-base-cased" ) -> List[str]:
A__ : int = AutoTokenizer.from_pretrained(lowercase_ )
A__ : Union[str, Any] = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(lowercase_: Tuple ):
# max_length=None => use the model max length (it's actually the default)
A__ : Optional[Any] = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=lowercase_ , max_length=lowercase_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
A__ : int = datasets.map(
lowercase_ , batched=lowercase_ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , load_from_cache_file=lowercase_ )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
A__ : int = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(lowercase_: Optional[int] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(lowercase_ , padding="""max_length""" , max_length=128 , return_tensors="""pt""" )
return tokenizer.pad(lowercase_ , padding="""longest""" , return_tensors="""pt""" )
# Instantiate dataloaders.
A__ : int = DataLoader(
tokenized_datasets["""train"""] , shuffle=lowercase_ , collate_fn=lowercase_ , batch_size=lowercase_ )
A__ : Optional[Any] = DataLoader(
tokenized_datasets["""validation"""] , shuffle=lowercase_ , collate_fn=lowercase_ , batch_size=lowercase_ )
return train_dataloader, eval_dataloader
def UpperCamelCase (lowercase_: Dict , lowercase_: Dict , lowercase_: Tuple , lowercase_: Optional[int] ) -> int:
model.eval()
A__ : str = 0
for step, batch in enumerate(lowercase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
A__ : Any = model(**lowercase_ )
A__ : List[Any] = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
A__ , A__ : str = accelerator.gather(
(predictions, batch["""labels"""]) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(lowercase_ ) - 1:
A__ : List[str] = predictions[: len(eval_dataloader.dataset ) - samples_seen]
A__ : Dict = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=lowercase_ , references=lowercase_ , )
A__ : int = metric.compute()
return eval_metric["accuracy"]
def UpperCamelCase (lowercase_: List[Any] , lowercase_: str ) -> List[str]:
# Initialize accelerator
A__ : str = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
A__ : List[Any] = config["""lr"""]
A__ : Union[str, Any] = int(config["""num_epochs"""] )
A__ : List[Any] = int(config["""seed"""] )
A__ : Optional[Any] = int(config["""batch_size"""] )
A__ : Tuple = args.model_name_or_path
set_seed(lowercase_ )
A__ , A__ : Optional[Any] = get_dataloaders(lowercase_ , lowercase_ , lowercase_ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
A__ : Tuple = AutoModelForSequenceClassification.from_pretrained(lowercase_ , return_dict=lowercase_ )
# Instantiate optimizer
A__ : Dict = (
AdamW
if accelerator.state.deepspeed_plugin is None
or """optimizer""" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
A__ : Union[str, Any] = optimizer_cls(params=model.parameters() , lr=lowercase_ )
if accelerator.state.deepspeed_plugin is not None:
A__ : List[Any] = accelerator.state.deepspeed_plugin.deepspeed_config[
"""gradient_accumulation_steps"""
]
else:
A__ : Optional[int] = 1
A__ : Optional[int] = (len(lowercase_ ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
A__ : Optional[int] = get_linear_schedule_with_warmup(
optimizer=lowercase_ , num_warmup_steps=0 , num_training_steps=lowercase_ , )
else:
A__ : int = DummyScheduler(lowercase_ , total_num_steps=lowercase_ , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
A__ , A__ , A__ , A__ , A__ : str = accelerator.prepare(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
# We need to keep track of how many total steps we have iterated over
A__ : Dict = 0
# We also need to keep track of the stating epoch so files are named properly
A__ : Any = 0
A__ : Optional[Any] = evaluate.load("""glue""" , """mrpc""" )
A__ : Optional[Any] = num_epochs
if args.partial_train_epoch is not None:
A__ : Tuple = args.partial_train_epoch
if args.resume_from_checkpoint:
accelerator.load_state(args.resume_from_checkpoint )
A__ : Dict = args.resume_from_checkpoint.split("""epoch_""" )[1]
A__ : int = """"""
for char in epoch_string:
if char.isdigit():
state_epoch_num += char
else:
break
A__ : Any = int(lowercase_ ) + 1
A__ : Any = evaluation_loop(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
accelerator.print("""resumed checkpoint performance:""" , lowercase_ )
accelerator.print("""resumed checkpoint's scheduler's lr:""" , lr_scheduler.get_lr()[0] )
accelerator.print("""resumed optimizers's lr:""" , optimizer.param_groups[0]["""lr"""] )
with open(os.path.join(args.output_dir , f"""state_{starting_epoch-1}.json""" ) , """r""" ) as f:
A__ : int = json.load(lowercase_ )
assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed"
assert (
resumed_state["lr"] == lr_scheduler.get_lr()[0]
), "Scheduler learning rate mismatch, loading from checkpoint failed"
assert (
resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"]
), "Optimizer learning rate mismatch, loading from checkpoint failed"
assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed"
return
# Now we train the model
A__ : Optional[Any] = {}
for epoch in range(lowercase_ , lowercase_ ):
model.train()
for step, batch in enumerate(lowercase_ ):
A__ : int = model(**lowercase_ )
A__ : int = outputs.loss
A__ : int = loss / gradient_accumulation_steps
accelerator.backward(lowercase_ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
A__ : Any = f"""epoch_{epoch}"""
A__ : int = os.path.join(args.output_dir , lowercase_ )
accelerator.save_state(lowercase_ )
A__ : List[Any] = evaluation_loop(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
A__ : Tuple = accuracy
A__ : Optional[Any] = lr_scheduler.get_lr()[0]
A__ : Tuple = optimizer.param_groups[0]["""lr"""]
A__ : int = epoch
A__ : int = overall_step
accelerator.print(f"""epoch {epoch}:""" , lowercase_ )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , f"""state_{epoch}.json""" ) , """w""" ) as f:
json.dump(lowercase_ , lowercase_ )
def UpperCamelCase () -> int:
A__ : Optional[int] = argparse.ArgumentParser(description="""Simple example of training script tracking peak GPU memory usage.""" )
parser.add_argument(
"""--model_name_or_path""" , type=lowercase_ , default="""bert-base-cased""" , help="""Path to pretrained model or model identifier from huggingface.co/models.""" , required=lowercase_ , )
parser.add_argument(
"""--output_dir""" , type=lowercase_ , default=""".""" , help="""Optional save directory where all checkpoint folders will be stored. Default is the current working directory.""" , )
parser.add_argument(
"""--resume_from_checkpoint""" , type=lowercase_ , default=lowercase_ , help="""If the training should continue from a checkpoint folder.""" , )
parser.add_argument(
"""--partial_train_epoch""" , type=lowercase_ , default=lowercase_ , help="""If passed, the training will stop after this number of epochs.""" , )
parser.add_argument(
"""--num_epochs""" , type=lowercase_ , default=2 , help="""Number of train epochs.""" , )
A__ : List[str] = parser.parse_args()
A__ : List[str] = {"""lr""": 2E-5, """num_epochs""": args.num_epochs, """seed""": 42, """batch_size""": 16}
training_function(lowercase_ , lowercase_ )
if __name__ == "__main__":
main()
| 141
| 0
|
# Imports
import numpy as np
class SCREAMING_SNAKE_CASE__ :
def __init__( self : str , SCREAMING_SNAKE_CASE__ : List[Any]=None , SCREAMING_SNAKE_CASE__ : List[str]=None , SCREAMING_SNAKE_CASE__ : List[Any]=None , SCREAMING_SNAKE_CASE__ : Optional[Any]=None , SCREAMING_SNAKE_CASE__ : List[Any]=None ) -> Union[str, Any]:
self.set_matricies(red=SCREAMING_SNAKE_CASE__ , green=SCREAMING_SNAKE_CASE__ , blue=SCREAMING_SNAKE_CASE__ , red_edge=SCREAMING_SNAKE_CASE__ , nir=SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE ( self : List[Any] , SCREAMING_SNAKE_CASE__ : List[Any]=None , SCREAMING_SNAKE_CASE__ : Tuple=None , SCREAMING_SNAKE_CASE__ : Optional[Any]=None , SCREAMING_SNAKE_CASE__ : List[Any]=None , SCREAMING_SNAKE_CASE__ : int=None ) -> List[Any]:
if red is not None:
a_ : int = red
if green is not None:
a_ : Any = green
if blue is not None:
a_ : int = blue
if red_edge is not None:
a_ : List[str] = red_edge
if nir is not None:
a_ : int = nir
return True
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[Any]="" , SCREAMING_SNAKE_CASE__ : Optional[Any]=None , SCREAMING_SNAKE_CASE__ : int=None , SCREAMING_SNAKE_CASE__ : Optional[Any]=None , SCREAMING_SNAKE_CASE__ : str=None , SCREAMING_SNAKE_CASE__ : Union[str, Any]=None ) -> List[str]:
self.set_matricies(red=SCREAMING_SNAKE_CASE__ , green=SCREAMING_SNAKE_CASE__ , blue=SCREAMING_SNAKE_CASE__ , red_edge=SCREAMING_SNAKE_CASE__ , nir=SCREAMING_SNAKE_CASE__ )
a_ : List[Any] = {
'ARVI2': self.arvaa,
'CCCI': self.ccci,
'CVI': self.cvi,
'GLI': self.gli,
'NDVI': self.ndvi,
'BNDVI': self.bndvi,
'redEdgeNDVI': self.red_edge_ndvi,
'GNDVI': self.gndvi,
'GBNDVI': self.gbndvi,
'GRNDVI': self.grndvi,
'RBNDVI': self.rbndvi,
'PNDVI': self.pndvi,
'ATSAVI': self.atsavi,
'BWDRVI': self.bwdrvi,
'CIgreen': self.ci_green,
'CIrededge': self.ci_rededge,
'CI': self.ci,
'CTVI': self.ctvi,
'GDVI': self.gdvi,
'EVI': self.evi,
'GEMI': self.gemi,
'GOSAVI': self.gosavi,
'GSAVI': self.gsavi,
'Hue': self.hue,
'IVI': self.ivi,
'IPVI': self.ipvi,
'I': self.i,
'RVI': self.rvi,
'MRVI': self.mrvi,
'MSAVI': self.m_savi,
'NormG': self.norm_g,
'NormNIR': self.norm_nir,
'NormR': self.norm_r,
'NGRDI': self.ngrdi,
'RI': self.ri,
'S': self.s,
'IF': self._if,
'DVI': self.dvi,
'TVI': self.tvi,
'NDRE': self.ndre,
}
try:
return funcs[index]()
except KeyError:
print('Index not in the list!' )
return False
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Union[str, Any]:
return -0.18 + (1.17 * ((self.nir - self.red) / (self.nir + self.red)))
def SCREAMING_SNAKE_CASE ( self : Any ) -> int:
return ((self.nir - self.redEdge) / (self.nir + self.redEdge)) / (
(self.nir - self.red) / (self.nir + self.red)
)
def SCREAMING_SNAKE_CASE ( self : int ) -> Dict:
return self.nir * (self.red / (self.green**2))
def SCREAMING_SNAKE_CASE ( self : str ) -> Any:
return (2 * self.green - self.red - self.blue) / (
2 * self.green + self.red + self.blue
)
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[Any]:
return (self.nir - self.red) / (self.nir + self.red)
def SCREAMING_SNAKE_CASE ( self : Any ) -> Dict:
return (self.nir - self.blue) / (self.nir + self.blue)
def SCREAMING_SNAKE_CASE ( self : str ) -> Any:
return (self.redEdge - self.red) / (self.redEdge + self.red)
def SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[Any]:
return (self.nir - self.green) / (self.nir + self.green)
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[str]:
return (self.nir - (self.green + self.blue)) / (
self.nir + (self.green + self.blue)
)
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> int:
return (self.nir - (self.green + self.red)) / (
self.nir + (self.green + self.red)
)
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> int:
return (self.nir - (self.blue + self.red)) / (self.nir + (self.blue + self.red))
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Union[str, Any]:
return (self.nir - (self.green + self.red + self.blue)) / (
self.nir + (self.green + self.red + self.blue)
)
def SCREAMING_SNAKE_CASE ( self : int , SCREAMING_SNAKE_CASE__ : str=0.08 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=1.22 , SCREAMING_SNAKE_CASE__ : List[Any]=0.03 ) -> List[Any]:
return a * (
(self.nir - a * self.red - b)
/ (a * self.nir + self.red - a * b + x * (1 + a**2))
)
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> int:
return (0.1 * self.nir - self.blue) / (0.1 * self.nir + self.blue)
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[int]:
return (self.nir / self.green) - 1
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Any:
return (self.nir / self.redEdge) - 1
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[Any]:
return (self.red - self.blue) / self.red
def SCREAMING_SNAKE_CASE ( self : str ) -> List[str]:
a_ : List[Any] = self.ndvi()
return ((ndvi + 0.5) / (abs(ndvi + 0.5 ))) * (abs(ndvi + 0.5 ) ** (1 / 2))
def SCREAMING_SNAKE_CASE ( self : Dict ) -> int:
return self.nir - self.green
def SCREAMING_SNAKE_CASE ( self : int ) -> Dict:
return 2.5 * (
(self.nir - self.red) / (self.nir + 6 * self.red - 7.5 * self.blue + 1)
)
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> str:
a_ : Union[str, Any] = (2 * (self.nir**2 - self.red**2) + 1.5 * self.nir + 0.5 * self.red) / (
self.nir + self.red + 0.5
)
return n * (1 - 0.25 * n) - (self.red - 0.125) / (1 - self.red)
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : str=0.16 ) -> int:
return (self.nir - self.green) / (self.nir + self.green + y)
def SCREAMING_SNAKE_CASE ( self : List[str] , SCREAMING_SNAKE_CASE__ : Union[str, Any]=0.5 ) -> Optional[Any]:
return ((self.nir - self.green) / (self.nir + self.green + n)) * (1 + n)
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Dict:
return np.arctan(
((2 * self.red - self.green - self.blue) / 30.5) * (self.green - self.blue) )
def SCREAMING_SNAKE_CASE ( self : Any , SCREAMING_SNAKE_CASE__ : int=None , SCREAMING_SNAKE_CASE__ : List[str]=None ) -> Union[str, Any]:
return (self.nir - b) / (a * self.red)
def SCREAMING_SNAKE_CASE ( self : Any ) -> List[Any]:
return (self.nir / ((self.nir + self.red) / 2)) * (self.ndvi() + 1)
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[str]:
return (self.red + self.green + self.blue) / 30.5
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Union[str, Any]:
return self.nir / self.red
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Dict:
return (self.rvi() - 1) / (self.rvi() + 1)
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Dict:
return (
(2 * self.nir + 1)
- ((2 * self.nir + 1) ** 2 - 8 * (self.nir - self.red)) ** (1 / 2)
) / 2
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Union[str, Any]:
return self.green / (self.nir + self.red + self.green)
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Tuple:
return self.nir / (self.nir + self.red + self.green)
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[int]:
return self.red / (self.nir + self.red + self.green)
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[Any]:
return (self.green - self.red) / (self.green + self.red)
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Union[str, Any]:
return (self.red - self.green) / (self.red + self.green)
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Dict:
a_ : List[Any] = np.max([np.max(self.red ), np.max(self.green ), np.max(self.blue )] )
a_ : Tuple = np.min([np.min(self.red ), np.min(self.green ), np.min(self.blue )] )
return (max_value - min_value) / max_value
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[int]:
return (2 * self.red - self.green - self.blue) / (self.green - self.blue)
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[Any]:
return self.nir / self.red
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Union[str, Any]:
return (self.ndvi() + 0.5) ** (1 / 2)
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Any:
return (self.nir - self.redEdge) / (self.nir + self.redEdge)
| 32
|
from collections import OrderedDict
from ...utils import logging
from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
from .configuration_auto import CONFIG_MAPPING_NAMES
UpperCAmelCase_ : Union[str, Any] = logging.get_logger(__name__)
UpperCAmelCase_ : List[str] = OrderedDict(
[
# Base model mapping
('albert', 'FlaxAlbertModel'),
('bart', 'FlaxBartModel'),
('beit', 'FlaxBeitModel'),
('bert', 'FlaxBertModel'),
('big_bird', 'FlaxBigBirdModel'),
('blenderbot', 'FlaxBlenderbotModel'),
('blenderbot-small', 'FlaxBlenderbotSmallModel'),
('clip', 'FlaxCLIPModel'),
('distilbert', 'FlaxDistilBertModel'),
('electra', 'FlaxElectraModel'),
('gpt-sw3', 'FlaxGPT2Model'),
('gpt2', 'FlaxGPT2Model'),
('gpt_neo', 'FlaxGPTNeoModel'),
('gptj', 'FlaxGPTJModel'),
('longt5', 'FlaxLongT5Model'),
('marian', 'FlaxMarianModel'),
('mbart', 'FlaxMBartModel'),
('mt5', 'FlaxMT5Model'),
('opt', 'FlaxOPTModel'),
('pegasus', 'FlaxPegasusModel'),
('regnet', 'FlaxRegNetModel'),
('resnet', 'FlaxResNetModel'),
('roberta', 'FlaxRobertaModel'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormModel'),
('roformer', 'FlaxRoFormerModel'),
('t5', 'FlaxT5Model'),
('vision-text-dual-encoder', 'FlaxVisionTextDualEncoderModel'),
('vit', 'FlaxViTModel'),
('wav2vec2', 'FlaxWav2Vec2Model'),
('whisper', 'FlaxWhisperModel'),
('xglm', 'FlaxXGLMModel'),
('xlm-roberta', 'FlaxXLMRobertaModel'),
]
)
UpperCAmelCase_ : str = OrderedDict(
[
# Model for pre-training mapping
('albert', 'FlaxAlbertForPreTraining'),
('bart', 'FlaxBartForConditionalGeneration'),
('bert', 'FlaxBertForPreTraining'),
('big_bird', 'FlaxBigBirdForPreTraining'),
('electra', 'FlaxElectraForPreTraining'),
('longt5', 'FlaxLongT5ForConditionalGeneration'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('mt5', 'FlaxMT5ForConditionalGeneration'),
('roberta', 'FlaxRobertaForMaskedLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMaskedLM'),
('roformer', 'FlaxRoFormerForMaskedLM'),
('t5', 'FlaxT5ForConditionalGeneration'),
('wav2vec2', 'FlaxWav2Vec2ForPreTraining'),
('whisper', 'FlaxWhisperForConditionalGeneration'),
('xlm-roberta', 'FlaxXLMRobertaForMaskedLM'),
]
)
UpperCAmelCase_ : Dict = OrderedDict(
[
# Model for Masked LM mapping
('albert', 'FlaxAlbertForMaskedLM'),
('bart', 'FlaxBartForConditionalGeneration'),
('bert', 'FlaxBertForMaskedLM'),
('big_bird', 'FlaxBigBirdForMaskedLM'),
('distilbert', 'FlaxDistilBertForMaskedLM'),
('electra', 'FlaxElectraForMaskedLM'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('roberta', 'FlaxRobertaForMaskedLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMaskedLM'),
('roformer', 'FlaxRoFormerForMaskedLM'),
('xlm-roberta', 'FlaxXLMRobertaForMaskedLM'),
]
)
UpperCAmelCase_ : Optional[Any] = OrderedDict(
[
# Model for Seq2Seq Causal LM mapping
('bart', 'FlaxBartForConditionalGeneration'),
('blenderbot', 'FlaxBlenderbotForConditionalGeneration'),
('blenderbot-small', 'FlaxBlenderbotSmallForConditionalGeneration'),
('encoder-decoder', 'FlaxEncoderDecoderModel'),
('longt5', 'FlaxLongT5ForConditionalGeneration'),
('marian', 'FlaxMarianMTModel'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('mt5', 'FlaxMT5ForConditionalGeneration'),
('pegasus', 'FlaxPegasusForConditionalGeneration'),
('t5', 'FlaxT5ForConditionalGeneration'),
]
)
UpperCAmelCase_ : List[str] = OrderedDict(
[
# Model for Image-classsification
('beit', 'FlaxBeitForImageClassification'),
('regnet', 'FlaxRegNetForImageClassification'),
('resnet', 'FlaxResNetForImageClassification'),
('vit', 'FlaxViTForImageClassification'),
]
)
UpperCAmelCase_ : int = OrderedDict(
[
('vision-encoder-decoder', 'FlaxVisionEncoderDecoderModel'),
]
)
UpperCAmelCase_ : List[str] = OrderedDict(
[
# Model for Causal LM mapping
('bart', 'FlaxBartForCausalLM'),
('bert', 'FlaxBertForCausalLM'),
('big_bird', 'FlaxBigBirdForCausalLM'),
('electra', 'FlaxElectraForCausalLM'),
('gpt-sw3', 'FlaxGPT2LMHeadModel'),
('gpt2', 'FlaxGPT2LMHeadModel'),
('gpt_neo', 'FlaxGPTNeoForCausalLM'),
('gptj', 'FlaxGPTJForCausalLM'),
('opt', 'FlaxOPTForCausalLM'),
('roberta', 'FlaxRobertaForCausalLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForCausalLM'),
('xglm', 'FlaxXGLMForCausalLM'),
('xlm-roberta', 'FlaxXLMRobertaForCausalLM'),
]
)
UpperCAmelCase_ : List[str] = OrderedDict(
[
# Model for Sequence Classification mapping
('albert', 'FlaxAlbertForSequenceClassification'),
('bart', 'FlaxBartForSequenceClassification'),
('bert', 'FlaxBertForSequenceClassification'),
('big_bird', 'FlaxBigBirdForSequenceClassification'),
('distilbert', 'FlaxDistilBertForSequenceClassification'),
('electra', 'FlaxElectraForSequenceClassification'),
('mbart', 'FlaxMBartForSequenceClassification'),
('roberta', 'FlaxRobertaForSequenceClassification'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForSequenceClassification'),
('roformer', 'FlaxRoFormerForSequenceClassification'),
('xlm-roberta', 'FlaxXLMRobertaForSequenceClassification'),
]
)
UpperCAmelCase_ : List[str] = OrderedDict(
[
# Model for Question Answering mapping
('albert', 'FlaxAlbertForQuestionAnswering'),
('bart', 'FlaxBartForQuestionAnswering'),
('bert', 'FlaxBertForQuestionAnswering'),
('big_bird', 'FlaxBigBirdForQuestionAnswering'),
('distilbert', 'FlaxDistilBertForQuestionAnswering'),
('electra', 'FlaxElectraForQuestionAnswering'),
('mbart', 'FlaxMBartForQuestionAnswering'),
('roberta', 'FlaxRobertaForQuestionAnswering'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForQuestionAnswering'),
('roformer', 'FlaxRoFormerForQuestionAnswering'),
('xlm-roberta', 'FlaxXLMRobertaForQuestionAnswering'),
]
)
UpperCAmelCase_ : Union[str, Any] = OrderedDict(
[
# Model for Token Classification mapping
('albert', 'FlaxAlbertForTokenClassification'),
('bert', 'FlaxBertForTokenClassification'),
('big_bird', 'FlaxBigBirdForTokenClassification'),
('distilbert', 'FlaxDistilBertForTokenClassification'),
('electra', 'FlaxElectraForTokenClassification'),
('roberta', 'FlaxRobertaForTokenClassification'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForTokenClassification'),
('roformer', 'FlaxRoFormerForTokenClassification'),
('xlm-roberta', 'FlaxXLMRobertaForTokenClassification'),
]
)
UpperCAmelCase_ : Dict = OrderedDict(
[
# Model for Multiple Choice mapping
('albert', 'FlaxAlbertForMultipleChoice'),
('bert', 'FlaxBertForMultipleChoice'),
('big_bird', 'FlaxBigBirdForMultipleChoice'),
('distilbert', 'FlaxDistilBertForMultipleChoice'),
('electra', 'FlaxElectraForMultipleChoice'),
('roberta', 'FlaxRobertaForMultipleChoice'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMultipleChoice'),
('roformer', 'FlaxRoFormerForMultipleChoice'),
('xlm-roberta', 'FlaxXLMRobertaForMultipleChoice'),
]
)
UpperCAmelCase_ : List[str] = OrderedDict(
[
('bert', 'FlaxBertForNextSentencePrediction'),
]
)
UpperCAmelCase_ : Dict = OrderedDict(
[
('speech-encoder-decoder', 'FlaxSpeechEncoderDecoderModel'),
('whisper', 'FlaxWhisperForConditionalGeneration'),
]
)
UpperCAmelCase_ : Union[str, Any] = OrderedDict(
[
('whisper', 'FlaxWhisperForAudioClassification'),
]
)
UpperCAmelCase_ : Any = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES)
UpperCAmelCase_ : int = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES)
UpperCAmelCase_ : Dict = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES)
UpperCAmelCase_ : List[Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
)
UpperCAmelCase_ : int = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
)
UpperCAmelCase_ : Any = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES)
UpperCAmelCase_ : int = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
UpperCAmelCase_ : Dict = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
)
UpperCAmelCase_ : Optional[int] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
)
UpperCAmelCase_ : List[Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
)
UpperCAmelCase_ : int = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES
)
UpperCAmelCase_ : Tuple = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES
)
UpperCAmelCase_ : Tuple = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES
)
UpperCAmelCase_ : Optional[Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES
)
class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ):
snake_case__ : List[Any] = FLAX_MODEL_MAPPING
UpperCAmelCase_ : Tuple = auto_class_update(FlaxAutoModel)
class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ):
snake_case__ : Any = FLAX_MODEL_FOR_PRETRAINING_MAPPING
UpperCAmelCase_ : Optional[Any] = auto_class_update(FlaxAutoModelForPreTraining, head_doc='pretraining')
class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ):
snake_case__ : List[Any] = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING
UpperCAmelCase_ : Optional[Any] = auto_class_update(FlaxAutoModelForCausalLM, head_doc='causal language modeling')
class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ):
snake_case__ : Optional[Any] = FLAX_MODEL_FOR_MASKED_LM_MAPPING
UpperCAmelCase_ : Union[str, Any] = auto_class_update(FlaxAutoModelForMaskedLM, head_doc='masked language modeling')
class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ):
snake_case__ : Tuple = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
UpperCAmelCase_ : Optional[int] = auto_class_update(
FlaxAutoModelForSeqaSeqLM, head_doc='sequence-to-sequence language modeling', checkpoint_for_example='t5-base'
)
class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ):
snake_case__ : Tuple = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
UpperCAmelCase_ : Optional[Any] = auto_class_update(
FlaxAutoModelForSequenceClassification, head_doc='sequence classification'
)
class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ):
snake_case__ : Tuple = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING
UpperCAmelCase_ : str = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc='question answering')
class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ):
snake_case__ : List[str] = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
UpperCAmelCase_ : Tuple = auto_class_update(
FlaxAutoModelForTokenClassification, head_doc='token classification'
)
class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ):
snake_case__ : Dict = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING
UpperCAmelCase_ : int = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc='multiple choice')
class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ):
snake_case__ : Optional[int] = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
UpperCAmelCase_ : Dict = auto_class_update(
FlaxAutoModelForNextSentencePrediction, head_doc='next sentence prediction'
)
class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ):
snake_case__ : Dict = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
UpperCAmelCase_ : str = auto_class_update(
FlaxAutoModelForImageClassification, head_doc='image classification'
)
class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ):
snake_case__ : Optional[Any] = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING
UpperCAmelCase_ : Tuple = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc='vision-to-text modeling')
class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ):
snake_case__ : Optional[int] = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
UpperCAmelCase_ : Union[str, Any] = auto_class_update(
FlaxAutoModelForSpeechSeqaSeq, head_doc='sequence-to-sequence speech-to-text modeling'
)
| 32
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowercase__ = {
"""configuration_efficientformer""": [
"""EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""EfficientFormerConfig""",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = ["""EfficientFormerImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = [
"""EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""EfficientFormerForImageClassification""",
"""EfficientFormerForImageClassificationWithTeacher""",
"""EfficientFormerModel""",
"""EfficientFormerPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = [
"""TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFEfficientFormerForImageClassification""",
"""TFEfficientFormerForImageClassificationWithTeacher""",
"""TFEfficientFormerModel""",
"""TFEfficientFormerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_efficientformer import EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, EfficientFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientformer import EfficientFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientformer import (
EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientFormerForImageClassification,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerModel,
EfficientFormerPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
TFEfficientFormerPreTrainedModel,
)
else:
import sys
lowercase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 354
|
"""simple docstring"""
from __future__ import annotations
import math
class __lowerCamelCase :
'''simple docstring'''
def __init__( self : Dict , a_ : int ):
lowerCAmelCase_ : Union[str, Any] = size
# approximate the overall size of segment tree with given value
lowerCAmelCase_ : Union[str, Any] = [0 for i in range(0 , 4 * size )]
# create array to store lazy update
lowerCAmelCase_ : int = [0 for i in range(0 , 4 * size )]
lowerCAmelCase_ : Any = [0 for i in range(0 , 4 * size )] # flag for lazy update
def lowerCamelCase ( self : List[Any] , a_ : int ):
return idx * 2
def lowerCamelCase ( self : Tuple , a_ : int ):
return idx * 2 + 1
def lowerCamelCase ( self : Tuple , a_ : int , a_ : int , a_ : int , a_ : list[int] ):
if left_element == right_element:
lowerCAmelCase_ : Tuple = a[left_element - 1]
else:
lowerCAmelCase_ : Tuple = (left_element + right_element) // 2
self.build(self.left(a_ ) , a_ , a_ , a_ )
self.build(self.right(a_ ) , mid + 1 , a_ , a_ )
lowerCAmelCase_ : int = max(
self.segment_tree[self.left(a_ )] , self.segment_tree[self.right(a_ )] )
def lowerCamelCase ( self : Union[str, Any] , a_ : int , a_ : int , a_ : int , a_ : int , a_ : int , a_ : int ):
if self.flag[idx] is True:
lowerCAmelCase_ : Dict = self.lazy[idx]
lowerCAmelCase_ : Optional[Any] = False
if left_element != right_element:
lowerCAmelCase_ : str = self.lazy[idx]
lowerCAmelCase_ : Dict = self.lazy[idx]
lowerCAmelCase_ : List[Any] = True
lowerCAmelCase_ : Union[str, Any] = True
if right_element < a or left_element > b:
return True
if left_element >= a and right_element <= b:
lowerCAmelCase_ : Dict = val
if left_element != right_element:
lowerCAmelCase_ : Union[str, Any] = val
lowerCAmelCase_ : Dict = val
lowerCAmelCase_ : List[Any] = True
lowerCAmelCase_ : List[str] = True
return True
lowerCAmelCase_ : Optional[Any] = (left_element + right_element) // 2
self.update(self.left(a_ ) , a_ , a_ , a_ , a_ , a_ )
self.update(self.right(a_ ) , mid + 1 , a_ , a_ , a_ , a_ )
lowerCAmelCase_ : int = max(
self.segment_tree[self.left(a_ )] , self.segment_tree[self.right(a_ )] )
return True
def lowerCamelCase ( self : int , a_ : int , a_ : int , a_ : int , a_ : int , a_ : int ):
if self.flag[idx] is True:
lowerCAmelCase_ : Union[str, Any] = self.lazy[idx]
lowerCAmelCase_ : Optional[int] = False
if left_element != right_element:
lowerCAmelCase_ : int = self.lazy[idx]
lowerCAmelCase_ : int = self.lazy[idx]
lowerCAmelCase_ : Optional[Any] = True
lowerCAmelCase_ : Dict = True
if right_element < a or left_element > b:
return -math.inf
if left_element >= a and right_element <= b:
return self.segment_tree[idx]
lowerCAmelCase_ : Any = (left_element + right_element) // 2
lowerCAmelCase_ : Union[str, Any] = self.query(self.left(a_ ) , a_ , a_ , a_ , a_ )
lowerCAmelCase_ : List[str] = self.query(self.right(a_ ) , mid + 1 , a_ , a_ , a_ )
return max(a_ , a_ )
def __str__( self : str ):
return str([self.query(1 , 1 , self.size , a_ , a_ ) for i in range(1 , self.size + 1 )] )
if __name__ == "__main__":
lowercase__ = [1, 2, -4, 7, 3, -5, 6, 11, -20, 9, 14, 15, 5, 2, -8]
lowercase__ = 15
lowercase__ = SegmentTree(size)
segt.build(1, 1, size, A)
print(segt.query(1, 1, size, 4, 6))
print(segt.query(1, 1, size, 7, 11))
print(segt.query(1, 1, size, 7, 12))
segt.update(1, 1, size, 1, 3, 111)
print(segt.query(1, 1, size, 1, 15))
segt.update(1, 1, size, 7, 8, 235)
print(segt)
| 161
| 0
|
import json
from typing import Dict, List, Optional, Tuple, Union
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_led import LEDTokenizer
lowercase__ =logging.get_logger(__name__)
lowercase__ ={'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
lowercase__ ={
'vocab_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json',
},
'merges_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt',
},
'tokenizer_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json',
},
}
lowercase__ ={
'allenai/led-base-16384': 16384,
}
class UpperCamelCase__ ( __lowercase ):
_SCREAMING_SNAKE_CASE : List[Any] = VOCAB_FILES_NAMES
_SCREAMING_SNAKE_CASE : int = PRETRAINED_VOCAB_FILES_MAP
_SCREAMING_SNAKE_CASE : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_SCREAMING_SNAKE_CASE : str = LEDTokenizer
_SCREAMING_SNAKE_CASE : Optional[int] = ["input_ids", "attention_mask"]
def __init__(self : Tuple , snake_case_ : Optional[int]=None , snake_case_ : Any=None , snake_case_ : List[str]=None , snake_case_ : Union[str, Any]="replace" , snake_case_ : Any="<s>" , snake_case_ : str="</s>" , snake_case_ : List[str]="</s>" , snake_case_ : str="<s>" , snake_case_ : Tuple="<unk>" , snake_case_ : str="<pad>" , snake_case_ : Union[str, Any]="<mask>" , snake_case_ : Optional[Any]=False , snake_case_ : str=True , **snake_case_ : Dict , ):
super().__init__(
snake_case_ , snake_case_ , tokenizer_file=snake_case_ , errors=snake_case_ , bos_token=snake_case_ , eos_token=snake_case_ , sep_token=snake_case_ , cls_token=snake_case_ , unk_token=snake_case_ , pad_token=snake_case_ , mask_token=snake_case_ , add_prefix_space=snake_case_ , trim_offsets=snake_case_ , **snake_case_ , )
__a : Optional[Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , snake_case_ ) != add_prefix_space:
__a : str = getattr(snake_case_ , pre_tok_state.pop('''type''' ) )
__a : Optional[int] = add_prefix_space
__a : str = pre_tok_class(**snake_case_ )
__a : Optional[int] = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
__a : List[Any] = '''post_processor'''
__a : Optional[int] = getattr(self.backend_tokenizer , snake_case_ , snake_case_ )
if tokenizer_component_instance:
__a : Union[str, Any] = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
__a : Tuple = tuple(state['''sep'''] )
if "cls" in state:
__a : Optional[int] = tuple(state['''cls'''] )
__a : Union[str, Any] = False
if state.get('''add_prefix_space''' , snake_case_ ) != add_prefix_space:
__a : Optional[int] = add_prefix_space
__a : List[str] = True
if state.get('''trim_offsets''' , snake_case_ ) != trim_offsets:
__a : Tuple = trim_offsets
__a : List[str] = True
if changes_to_apply:
__a : Union[str, Any] = getattr(snake_case_ , state.pop('''type''' ) )
__a : Tuple = component_class(**snake_case_ )
setattr(self.backend_tokenizer , snake_case_ , snake_case_ )
@property
# Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED
def lowerCAmelCase (self : Optional[int] ):
if self._mask_token is None:
if self.verbose:
logger.error('''Using mask_token, but it is not set yet.''' )
return None
return str(self._mask_token )
@mask_token.setter
def lowerCAmelCase (self : Optional[Any] , snake_case_ : Any ):
__a : Dict = AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else value
__a : Union[str, Any] = value
def lowerCAmelCase (self : Dict , *snake_case_ : Dict , **snake_case_ : Dict ):
__a : Any = kwargs.get('''is_split_into_words''' , snake_case_ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
'''to use it with pretokenized inputs.''' )
return super()._batch_encode_plus(*snake_case_ , **snake_case_ )
def lowerCAmelCase (self : List[Any] , *snake_case_ : Optional[int] , **snake_case_ : List[Any] ):
__a : Optional[int] = kwargs.get('''is_split_into_words''' , snake_case_ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
'''to use it with pretokenized inputs.''' )
return super()._encode_plus(*snake_case_ , **snake_case_ )
def lowerCAmelCase (self : List[Any] , snake_case_ : str , snake_case_ : Optional[str] = None ):
__a : str = self._tokenizer.model.save(snake_case_ , name=snake_case_ )
return tuple(snake_case_ )
def lowerCAmelCase (self : str , snake_case_ : Dict , snake_case_ : Tuple=None ):
__a : Dict = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def lowerCAmelCase (self : Union[str, Any] , snake_case_ : List[int] , snake_case_ : Optional[List[int]] = None ):
__a : Tuple = [self.sep_token_id]
__a : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCAmelCase (self : List[str] , snake_case_ : Union[Dict[str, EncodedInput], BatchEncoding] , snake_case_ : Optional[int] = None , snake_case_ : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , snake_case_ : Optional[int] = None , snake_case_ : Optional[bool] = None , ):
__a : int = super()._pad(
encoded_inputs=snake_case_ , max_length=snake_case_ , padding_strategy=snake_case_ , pad_to_multiple_of=snake_case_ , return_attention_mask=snake_case_ , )
# Load from model defaults
if return_attention_mask is None:
__a : List[str] = '''attention_mask''' in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
__a : int = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
__a : List[str] = len(encoded_inputs['''global_attention_mask'''] ) != len(snake_case_ )
if needs_to_be_padded:
__a : str = len(snake_case_ ) - len(encoded_inputs['''global_attention_mask'''] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
__a : Tuple = (
encoded_inputs['''global_attention_mask'''] + [-1] * difference
)
elif self.padding_side == "left":
__a : Dict = [-1] * difference + encoded_inputs[
'''global_attention_mask'''
]
else:
raise ValueError('''Invalid padding strategy:''' + str(self.padding_side ) )
return encoded_inputs
| 216
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from tokenizers.pre_tokenizers import BertPreTokenizer, PreTokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roformer import RoFormerTokenizer
from .tokenization_utils import JiebaPreTokenizer
lowercase__ =logging.get_logger(__name__)
lowercase__ ={'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
lowercase__ ={
'vocab_file': {
'junnyu/roformer_chinese_small': 'https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/vocab.txt',
'junnyu/roformer_chinese_base': 'https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/vocab.txt',
'junnyu/roformer_chinese_char_small': (
'https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/vocab.txt'
),
'junnyu/roformer_chinese_char_base': (
'https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/vocab.txt'
),
'junnyu/roformer_small_discriminator': (
'https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/vocab.txt'
),
'junnyu/roformer_small_generator': (
'https://huggingface.co/junnyu/roformer_small_generator/resolve/main/vocab.txt'
),
}
}
lowercase__ ={
'junnyu/roformer_chinese_small': 1536,
'junnyu/roformer_chinese_base': 1536,
'junnyu/roformer_chinese_char_small': 512,
'junnyu/roformer_chinese_char_base': 512,
'junnyu/roformer_small_discriminator': 128,
'junnyu/roformer_small_generator': 128,
}
lowercase__ ={
'junnyu/roformer_chinese_small': {'do_lower_case': True},
'junnyu/roformer_chinese_base': {'do_lower_case': True},
'junnyu/roformer_chinese_char_small': {'do_lower_case': True},
'junnyu/roformer_chinese_char_base': {'do_lower_case': True},
'junnyu/roformer_small_discriminator': {'do_lower_case': True},
'junnyu/roformer_small_generator': {'do_lower_case': True},
}
class UpperCamelCase__ ( __lowercase ):
_SCREAMING_SNAKE_CASE : Optional[Any] = VOCAB_FILES_NAMES
_SCREAMING_SNAKE_CASE : List[Any] = PRETRAINED_VOCAB_FILES_MAP
_SCREAMING_SNAKE_CASE : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_SCREAMING_SNAKE_CASE : Optional[Any] = PRETRAINED_INIT_CONFIGURATION
_SCREAMING_SNAKE_CASE : Optional[int] = RoFormerTokenizer
def __init__(self : List[str] , snake_case_ : Optional[int]=None , snake_case_ : str=None , snake_case_ : Optional[Any]=True , snake_case_ : str="[UNK]" , snake_case_ : Dict="[SEP]" , snake_case_ : Any="[PAD]" , snake_case_ : str="[CLS]" , snake_case_ : List[Any]="[MASK]" , snake_case_ : Any=True , snake_case_ : List[str]=None , **snake_case_ : Optional[int] , ):
super().__init__(
snake_case_ , tokenizer_file=snake_case_ , do_lower_case=snake_case_ , unk_token=snake_case_ , sep_token=snake_case_ , pad_token=snake_case_ , cls_token=snake_case_ , mask_token=snake_case_ , tokenize_chinese_chars=snake_case_ , strip_accents=snake_case_ , **snake_case_ , )
__a : Optional[int] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
pre_tok_state.get('''lowercase''' , snake_case_ ) != do_lower_case
or pre_tok_state.get('''strip_accents''' , snake_case_ ) != strip_accents
):
__a : List[str] = getattr(snake_case_ , pre_tok_state.pop('''type''' ) )
__a : Optional[Any] = do_lower_case
__a : Optional[int] = strip_accents
__a : List[str] = pre_tok_class(**snake_case_ )
__a : Optional[Any] = do_lower_case
def __getstate__(self : Union[str, Any] ):
__a : Any = self.__dict__.copy()
__a : Union[str, Any] = BertPreTokenizer()
return state
def __setstate__(self : Tuple , snake_case_ : Optional[Any] ):
__a : Dict = d
__a : str = self.__dict__['''_tokenizer'''].get_vocab()
__a : Optional[Any] = PreTokenizer.custom(JiebaPreTokenizer(snake_case_ ) )
def lowerCAmelCase (self : Optional[int] , snake_case_ : List[Any] , snake_case_ : Optional[Any]=None ):
__a : Optional[int] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowerCAmelCase (self : Optional[int] , snake_case_ : List[int] , snake_case_ : Optional[List[int]] = None ):
__a : int = [self.sep_token_id]
__a : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCAmelCase (self : int , snake_case_ : str , snake_case_ : Optional[str] = None ):
__a : Optional[Any] = self._tokenizer.model.save(snake_case_ , name=snake_case_ )
return tuple(snake_case_ )
def lowerCAmelCase (self : Dict , snake_case_ : Dict , snake_case_ : Tuple=None , snake_case_ : Optional[Any]=None , snake_case_ : Union[str, Any]=False , **snake_case_ : Tuple , ):
__a : List[str] = BertPreTokenizer()
return super().save_pretrained(snake_case_ , snake_case_ , snake_case_ , snake_case_ , **snake_case_ )
| 216
| 1
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowercase = {"""configuration_mra""": ["""MRA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MraConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = [
"""MRA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MraForMaskedLM""",
"""MraForMultipleChoice""",
"""MraForQuestionAnswering""",
"""MraForSequenceClassification""",
"""MraForTokenClassification""",
"""MraLayer""",
"""MraModel""",
"""MraPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mra import (
MRA_PRETRAINED_MODEL_ARCHIVE_LIST,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraLayer,
MraModel,
MraPreTrainedModel,
)
else:
import sys
lowercase = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 355
|
import warnings
from ...utils import logging
from .image_processing_chinese_clip import ChineseCLIPImageProcessor
lowercase = logging.get_logger(__name__)
class __lowercase ( A ):
'''simple docstring'''
def __init__( self : List[str] , *_a : Any , **_a : str ):
warnings.warn(
'''The class ChineseCLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use ChineseCLIPImageProcessor instead.''' , _a , )
super().__init__(*_a , **_a )
| 35
| 0
|
'''simple docstring'''
def UpperCamelCase_ ( _UpperCAmelCase : list ) -> list:
"""simple docstring"""
_UpperCAmelCase : List[Any] = len(_UpperCAmelCase )
for _ in range(_UpperCAmelCase ):
for i in range(_ % 2 , arr_size - 1 , 2 ):
if arr[i + 1] < arr[i]:
_UpperCAmelCase , _UpperCAmelCase : int = arr[i + 1], arr[i]
return arr
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : Optional[Any] = list(range(10, 0, -1))
print(F'Original: {arr}. Sorted: {odd_even_transposition(arr)}')
| 31
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
A : Optional[int] = {
"configuration_altclip": [
"ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"AltCLIPConfig",
"AltCLIPTextConfig",
"AltCLIPVisionConfig",
],
"processing_altclip": ["AltCLIPProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Tuple = [
"ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"AltCLIPPreTrainedModel",
"AltCLIPModel",
"AltCLIPTextModel",
"AltCLIPVisionModel",
]
if TYPE_CHECKING:
from .configuration_altclip import (
ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
AltCLIPConfig,
AltCLIPTextConfig,
AltCLIPVisionConfig,
)
from .processing_altclip import AltCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_altclip import (
ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
AltCLIPModel,
AltCLIPPreTrainedModel,
AltCLIPTextModel,
AltCLIPVisionModel,
)
else:
import sys
A : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 57
| 0
|
'''simple docstring'''
import os
from math import logaa
def __lowerCAmelCase ( UpperCamelCase__ = "base_exp.txt" ) -> int:
__lowerCamelCase = 0
__lowerCamelCase = 0
for i, line in enumerate(open(os.path.join(os.path.dirname(UpperCamelCase__ ) , UpperCamelCase__ ) ) ):
__lowerCamelCase , __lowerCamelCase = list(map(UpperCamelCase__ , line.split(''',''' ) ) )
if x * logaa(UpperCamelCase__ ) > largest:
__lowerCamelCase = x * logaa(UpperCamelCase__ )
__lowerCamelCase = i + 1
return result
if __name__ == "__main__":
print(solution())
| 237
|
'''simple docstring'''
import itertools
import json
import os
import unittest
from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast
from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class a__ ( UpperCAmelCase__ , unittest.TestCase ):
lowerCamelCase : Optional[Any] =LongformerTokenizer
lowerCamelCase : Optional[Any] =True
lowerCamelCase : List[str] =LongformerTokenizerFast
lowerCamelCase : Union[str, Any] =True
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__lowerCamelCase = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
__lowerCamelCase = dict(zip(a , range(len(a ) ) ) )
__lowerCamelCase = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
__lowerCamelCase = {'''unk_token''': '''<unk>'''}
__lowerCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
__lowerCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(a ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(a ) )
def SCREAMING_SNAKE_CASE__ ( self : int , **a : int ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **a )
def SCREAMING_SNAKE_CASE__ ( self : str , **a : Dict ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **a )
def SCREAMING_SNAKE_CASE__ ( self : List[str] , a : int ):
"""simple docstring"""
__lowerCamelCase = '''lower newer'''
__lowerCamelCase = '''lower newer'''
return input_text, output_text
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
"""simple docstring"""
__lowerCamelCase = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
__lowerCamelCase = '''lower newer'''
__lowerCamelCase = ['''l''', '''o''', '''w''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er''']
__lowerCamelCase = tokenizer.tokenize(a ) # , add_prefix_space=True)
self.assertListEqual(a , a )
__lowerCamelCase = tokens + [tokenizer.unk_token]
__lowerCamelCase = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(a ) , a )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
"""simple docstring"""
__lowerCamelCase = self.get_tokenizer()
self.assertListEqual(tokenizer.encode('''Hello world!''' , add_special_tokens=a ) , [0, 3_14_14, 2_32, 3_28, 2] )
self.assertListEqual(
tokenizer.encode('''Hello world! cécé herlolip 418''' , add_special_tokens=a ) , [0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2] , )
@slow
def SCREAMING_SNAKE_CASE__ ( self : Any ):
"""simple docstring"""
__lowerCamelCase = self.tokenizer_class.from_pretrained('''allenai/longformer-base-4096''' )
__lowerCamelCase = tokenizer.encode('''sequence builders''' , add_special_tokens=a )
__lowerCamelCase = tokenizer.encode('''multi-sequence build''' , add_special_tokens=a )
__lowerCamelCase = tokenizer.encode(
'''sequence builders''' , add_special_tokens=a , add_prefix_space=a )
__lowerCamelCase = tokenizer.encode(
'''sequence builders''' , '''multi-sequence build''' , add_special_tokens=a , add_prefix_space=a )
__lowerCamelCase = tokenizer.build_inputs_with_special_tokens(a )
__lowerCamelCase = tokenizer.build_inputs_with_special_tokens(a , a )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
"""simple docstring"""
__lowerCamelCase = self.get_tokenizer()
__lowerCamelCase = '''Encode this sequence.'''
__lowerCamelCase = tokenizer.byte_encoder[''' '''.encode('''utf-8''' )[0]]
# Testing encoder arguments
__lowerCamelCase = tokenizer.encode(a , add_special_tokens=a , add_prefix_space=a )
__lowerCamelCase = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(a , a )
__lowerCamelCase = tokenizer.encode(a , add_special_tokens=a , add_prefix_space=a )
__lowerCamelCase = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(a , a )
tokenizer.add_special_tokens({'''bos_token''': '''<s>'''} )
__lowerCamelCase = tokenizer.encode(a , add_special_tokens=a )
__lowerCamelCase = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(a , a )
# Testing spaces after special tokens
__lowerCamelCase = '''<mask>'''
tokenizer.add_special_tokens(
{'''mask_token''': AddedToken(a , lstrip=a , rstrip=a )} ) # mask token has a left space
__lowerCamelCase = tokenizer.convert_tokens_to_ids(a )
__lowerCamelCase = '''Encode <mask> sequence'''
__lowerCamelCase = '''Encode <mask>sequence'''
__lowerCamelCase = tokenizer.encode(a )
__lowerCamelCase = encoded.index(a )
__lowerCamelCase = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(a , a )
__lowerCamelCase = tokenizer.encode(a )
__lowerCamelCase = encoded.index(a )
__lowerCamelCase = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(a , a )
def SCREAMING_SNAKE_CASE__ ( self : str ):
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
__lowerCamelCase = self.rust_tokenizer_class.from_pretrained(a , **a )
__lowerCamelCase = self.tokenizer_class.from_pretrained(a , **a )
__lowerCamelCase = '''A, <mask> AllenNLP sentence.'''
__lowerCamelCase = tokenizer_r.encode_plus(a , add_special_tokens=a , return_token_type_ids=a )
__lowerCamelCase = tokenizer_p.encode_plus(a , add_special_tokens=a , return_token_type_ids=a )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r['''token_type_ids'''] ) , sum(tokens_p['''token_type_ids'''] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r['''attention_mask'''] ) / len(tokens_r['''attention_mask'''] ) , sum(tokens_p['''attention_mask'''] ) / len(tokens_p['''attention_mask'''] ) , )
__lowerCamelCase = tokenizer_r.convert_ids_to_tokens(tokens_r['''input_ids'''] )
__lowerCamelCase = tokenizer_p.convert_ids_to_tokens(tokens_p['''input_ids'''] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p['''input_ids'''] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(tokens_r['''input_ids'''] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(
a , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
self.assertSequenceEqual(
a , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
def SCREAMING_SNAKE_CASE__ ( self : str ):
"""simple docstring"""
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
__lowerCamelCase = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=a , add_prefix_space=a , trim_offsets=a )
__lowerCamelCase = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
__lowerCamelCase = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state['''add_prefix_space'''] , a )
self.assertEqual(post_processor_state['''add_prefix_space'''] , a )
self.assertEqual(post_processor_state['''trim_offsets'''] , a )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
__lowerCamelCase = '''hello''' # `hello` is a token in the vocabulary of `pretrained_name`
__lowerCamelCase = f"""{text_of_1_token} {text_of_1_token}"""
__lowerCamelCase = self.rust_tokenizer_class.from_pretrained(
a , use_fast=a , add_prefix_space=a , trim_offsets=a )
__lowerCamelCase = tokenizer_r(a , return_offsets_mapping=a , add_special_tokens=a )
self.assertEqual(encoding.offset_mapping[0] , (0, len(a )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(a ) + 1, len(a ) + 1 + len(a )) , )
__lowerCamelCase = self.rust_tokenizer_class.from_pretrained(
a , use_fast=a , add_prefix_space=a , trim_offsets=a )
__lowerCamelCase = tokenizer_r(a , return_offsets_mapping=a , add_special_tokens=a )
self.assertEqual(encoding.offset_mapping[0] , (0, len(a )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(a ) + 1, len(a ) + 1 + len(a )) , )
__lowerCamelCase = self.rust_tokenizer_class.from_pretrained(
a , use_fast=a , add_prefix_space=a , trim_offsets=a )
__lowerCamelCase = tokenizer_r(a , return_offsets_mapping=a , add_special_tokens=a )
self.assertEqual(encoding.offset_mapping[0] , (0, len(a )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(a ), len(a ) + 1 + len(a )) , )
__lowerCamelCase = self.rust_tokenizer_class.from_pretrained(
a , use_fast=a , add_prefix_space=a , trim_offsets=a )
__lowerCamelCase = tokenizer_r(a , return_offsets_mapping=a , add_special_tokens=a )
self.assertEqual(encoding.offset_mapping[0] , (0, len(a )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(a ), len(a ) + 1 + len(a )) , )
__lowerCamelCase = f""" {text}"""
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
__lowerCamelCase = self.rust_tokenizer_class.from_pretrained(
a , use_fast=a , add_prefix_space=a , trim_offsets=a )
__lowerCamelCase = tokenizer_r(a , return_offsets_mapping=a , add_special_tokens=a )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(a )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(a ) + 1, 1 + len(a ) + 1 + len(a )) , )
__lowerCamelCase = self.rust_tokenizer_class.from_pretrained(
a , use_fast=a , add_prefix_space=a , trim_offsets=a )
__lowerCamelCase = tokenizer_r(a , return_offsets_mapping=a , add_special_tokens=a )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(a )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(a ), 1 + len(a ) + 1 + len(a )) , )
__lowerCamelCase = self.rust_tokenizer_class.from_pretrained(
a , use_fast=a , add_prefix_space=a , trim_offsets=a )
__lowerCamelCase = tokenizer_r(a , return_offsets_mapping=a , add_special_tokens=a )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(a )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(a ), 1 + len(a ) + 1 + len(a )) , )
| 237
| 1
|
"""simple docstring"""
import heapq as hq
import math
from collections.abc import Iterator
class _UpperCamelCase :
'''simple docstring'''
def __init__( self , __a ):
__lowerCAmelCase = str(id_ )
__lowerCAmelCase = None
__lowerCAmelCase = None
__lowerCAmelCase = []
__lowerCAmelCase = {} # {vertex:distance}
def __lt__( self , __a ):
return self.key < other.key
def __repr__( self ):
return self.id
def snake_case ( self , __a ):
self.neighbors.append(__a )
def snake_case ( self , __a , __a ):
__lowerCAmelCase = weight
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
graph[a - 1].add_neighbor(graph[b - 1] )
graph[b - 1].add_neighbor(graph[a - 1] )
# add the edges:
graph[a - 1].add_edge(graph[b - 1] , _UpperCamelCase )
graph[b - 1].add_edge(graph[a - 1] , _UpperCamelCase )
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = []
for u in graph:
__lowerCAmelCase = math.inf
__lowerCAmelCase = None
__lowerCAmelCase = 0
__lowerCAmelCase = graph[:]
while q:
__lowerCAmelCase = min(_UpperCamelCase )
q.remove(_UpperCamelCase )
for v in u.neighbors:
if (v in q) and (u.edges[v.id] < v.key):
__lowerCAmelCase = u
__lowerCAmelCase = u.edges[v.id]
for i in range(1 , len(_UpperCamelCase ) ):
a.append((int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) )
return a
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
for u in graph:
__lowerCAmelCase = math.inf
__lowerCAmelCase = None
__lowerCAmelCase = 0
__lowerCAmelCase = list(_UpperCamelCase )
hq.heapify(_UpperCamelCase )
while h:
__lowerCAmelCase = hq.heappop(_UpperCamelCase )
for v in u.neighbors:
if (v in h) and (u.edges[v.id] < v.key):
__lowerCAmelCase = u
__lowerCAmelCase = u.edges[v.id]
hq.heapify(_UpperCamelCase )
for i in range(1 , len(_UpperCamelCase ) ):
yield (int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1)
def _lowerCamelCase ( ):
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod()
| 57
|
'''simple docstring'''
from collections.abc import Sequence
def __UpperCamelCase ( lowercase__ : Sequence[float], lowercase__ : float ):
'''simple docstring'''
return sum(c * (x**i) for i, c in enumerate(lowercase__ ) )
def __UpperCamelCase ( lowercase__ : Sequence[float], lowercase__ : float ):
'''simple docstring'''
__lowercase =0.0
for coeff in reversed(lowercase__ ):
__lowercase =result * x + coeff
return result
if __name__ == "__main__":
UpperCAmelCase = (0.0, 0.0, 5.0, 9.3, 7.0)
UpperCAmelCase = 10.0
print(evaluate_poly(poly, x))
print(horner(poly, x))
| 141
| 0
|
from __future__ import annotations
def A_ ( _lowerCAmelCase ) -> bool:
UpperCamelCase : str = str(_lowerCAmelCase )
return len(_lowerCAmelCase ) == 9 and set(_lowerCAmelCase ) == set("123456789" )
def A_ ( ) -> int | None:
for base_num in range(9999 , 4999 , -1 ):
UpperCamelCase : Tuple = 10_0002 * base_num
if is_9_pandigital(_lowerCAmelCase ):
return candidate
for base_num in range(333 , 99 , -1 ):
UpperCamelCase : Optional[Any] = 100_2003 * base_num
if is_9_pandigital(_lowerCAmelCase ):
return candidate
return None
if __name__ == "__main__":
print(f"""{solution() = }""")
| 140
|
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import DeiTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
)
from transformers.models.deit.modeling_tf_deit import TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class A__ :
def __init__( self , A_ , A_=13 , A_=30 , A_=2 , A_=3 , A_=True , A_=True , A_=32 , A_=2 , A_=4 , A_=37 , A_="gelu" , A_=0.1 , A_=0.1 , A_=10 , A_=0.02 , A_=3 , A_=None , A_=2 , ):
'''simple docstring'''
UpperCamelCase : List[str] = parent
UpperCamelCase : Tuple = batch_size
UpperCamelCase : Union[str, Any] = image_size
UpperCamelCase : Optional[int] = patch_size
UpperCamelCase : List[str] = num_channels
UpperCamelCase : Any = is_training
UpperCamelCase : Dict = use_labels
UpperCamelCase : List[str] = hidden_size
UpperCamelCase : Dict = num_hidden_layers
UpperCamelCase : Union[str, Any] = num_attention_heads
UpperCamelCase : str = intermediate_size
UpperCamelCase : Optional[int] = hidden_act
UpperCamelCase : List[Any] = hidden_dropout_prob
UpperCamelCase : Dict = attention_probs_dropout_prob
UpperCamelCase : List[Any] = type_sequence_label_size
UpperCamelCase : List[str] = initializer_range
UpperCamelCase : Union[str, Any] = scope
UpperCamelCase : Union[str, Any] = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
UpperCamelCase : Optional[Any] = (image_size // patch_size) ** 2
UpperCamelCase : int = num_patches + 2
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase : Tuple = None
if self.use_labels:
UpperCamelCase : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase : Union[str, Any] = self.get_config()
return config, pixel_values, labels
def __UpperCamelCase( self ):
'''simple docstring'''
return DeiTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=A_ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def __UpperCamelCase( self , A_ , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = TFDeiTModel(config=A_ )
UpperCamelCase : Tuple = model(A_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCamelCase( self , A_ , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : List[str] = TFDeiTForMaskedImageModeling(config=A_ )
UpperCamelCase : Optional[Any] = model(A_ )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
UpperCamelCase : Dict = 1
UpperCamelCase : Optional[Any] = TFDeiTForMaskedImageModeling(A_ )
UpperCamelCase : Optional[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCamelCase : Any = model(A_ )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def __UpperCamelCase( self , A_ , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = self.type_sequence_label_size
UpperCamelCase : List[Any] = TFDeiTForImageClassification(A_ )
UpperCamelCase : Optional[int] = model(A_ , labels=A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
UpperCamelCase : List[Any] = 1
UpperCamelCase : Optional[Any] = TFDeiTForImageClassification(A_ )
UpperCamelCase : Optional[int] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCamelCase : List[Any] = model(A_ , labels=A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = self.prepare_config_and_inputs()
UpperCamelCase , UpperCamelCase , UpperCamelCase : int = config_and_inputs
UpperCamelCase : Union[str, Any] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class A__ ( __snake_case , __snake_case , unittest.TestCase ):
_UpperCAmelCase :str = (
(
TFDeiTModel,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
)
if is_tf_available()
else ()
)
_UpperCAmelCase :Tuple = (
{
'feature-extraction': TFDeiTModel,
'image-classification': (TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher),
}
if is_tf_available()
else {}
)
_UpperCAmelCase :Dict = False
_UpperCAmelCase :List[str] = False
_UpperCAmelCase :Optional[Any] = False
_UpperCAmelCase :Optional[int] = False
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = TFDeiTModelTester(self )
UpperCamelCase : Optional[Any] = ConfigTester(self , config_class=A_ , has_text_modality=A_ , hidden_size=37 )
def __UpperCamelCase( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="DeiT does not use inputs_embeds" )
def __UpperCamelCase( self ):
'''simple docstring'''
pass
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase , UpperCamelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase : Optional[int] = model_class(A_ )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
UpperCamelCase : Union[str, Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(A_ , tf.keras.layers.Dense ) )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase , UpperCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase : str = model_class(A_ )
UpperCamelCase : List[str] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase : Optional[Any] = [*signature.parameters.keys()]
UpperCamelCase : Any = ["pixel_values"]
self.assertListEqual(arg_names[:1] , A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A_ )
def __UpperCamelCase( self , A_ , A_ , A_=False ):
'''simple docstring'''
UpperCamelCase : List[str] = super()._prepare_for_class(A_ , A_ , return_labels=A_ )
if return_labels:
if "labels" in inputs_dict and "labels" not in inspect.signature(model_class.call ).parameters:
del inputs_dict["labels"]
return inputs_dict
@slow
def __UpperCamelCase( self ):
'''simple docstring'''
for model_name in TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase : str = TFDeiTModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
def A_ ( ) -> str:
UpperCamelCase : Union[str, Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
@require_vision
class A__ ( unittest.TestCase ):
@cached_property
def __UpperCamelCase( self ):
'''simple docstring'''
return (
DeiTImageProcessor.from_pretrained("facebook/deit-base-distilled-patch16-224" )
if is_vision_available()
else None
)
@slow
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : str = TFDeiTForImageClassificationWithTeacher.from_pretrained("facebook/deit-base-distilled-patch16-224" )
UpperCamelCase : List[Any] = self.default_image_processor
UpperCamelCase : Union[str, Any] = prepare_img()
UpperCamelCase : Union[str, Any] = image_processor(images=A_ , return_tensors="tf" )
# forward pass
UpperCamelCase : str = model(**A_ )
# verify the logits
UpperCamelCase : Dict = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , A_ )
UpperCamelCase : Tuple = tf.constant([-1.02_66, 0.19_12, -1.28_61] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , A_ , atol=1e-4 ) )
| 140
| 1
|
"""simple docstring"""
from __future__ import annotations
def snake_case_ ( A_ : List[str] ):
'''simple docstring'''
_lowerCamelCase : Any = 0.00
_lowerCamelCase : Union[str, Any] = 0
for resistor in resistors:
if resistor <= 0:
_lowerCamelCase : Any = F'''Resistor at index {index} has a negative or zero value!'''
raise ValueError(A_ )
first_sum += 1 / float(A_ )
index += 1
return 1 / first_sum
def snake_case_ ( A_ : int ):
'''simple docstring'''
_lowerCamelCase : Dict = 0.00
_lowerCamelCase : Tuple = 0
for resistor in resistors:
sum_r += resistor
if resistor < 0:
_lowerCamelCase : Any = F'''Resistor at index {index} has a negative value!'''
raise ValueError(A_ )
index += 1
return sum_r
if __name__ == "__main__":
import doctest
doctest.testmod()
| 72
|
'''simple docstring'''
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_outputs import (
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import logging
from .configuration_regnet import RegNetConfig
a__ : Tuple = logging.get_logger(__name__)
# General docstring
a__ : List[Any] = "RegNetConfig"
# Base docstring
a__ : Dict = "facebook/regnet-y-040"
a__ : Optional[int] = [1, 1_0_8_8, 7, 7]
# Image classification docstring
a__ : Union[str, Any] = "facebook/regnet-y-040"
a__ : Union[str, Any] = "tabby, tabby cat"
a__ : int = [
"facebook/regnet-y-040",
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class UpperCamelCase__ ( nn.Module):
def __init__( self :Union[str, Any] , _A :int , _A :int , _A :int = 3 , _A :int = 1 , _A :int = 1 , _A :Optional[str] = "relu" , ) -> int:
'''simple docstring'''
super().__init__()
__A = nn.Convad(
_A , _A , kernel_size=_A , stride=_A , padding=kernel_size // 2 , groups=_A , bias=_A , )
__A = nn.BatchNormad(_A )
__A = ACTaFN[activation] if activation is not None else nn.Identity()
def lowercase_ ( self :Tuple , _A :Union[str, Any] ) -> int:
'''simple docstring'''
__A = self.convolution(_A )
__A = self.normalization(_A )
__A = self.activation(_A )
return hidden_state
class UpperCamelCase__ ( nn.Module):
def __init__( self :Optional[int] , _A :RegNetConfig ) -> List[str]:
'''simple docstring'''
super().__init__()
__A = RegNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act )
__A = config.num_channels
def lowercase_ ( self :Any , _A :Optional[int] ) -> Optional[int]:
'''simple docstring'''
__A = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
'Make sure that the channel dimension of the pixel values match with the one set in the configuration.' )
__A = self.embedder(_A )
return hidden_state
class UpperCamelCase__ ( nn.Module):
def __init__( self :Optional[int] , _A :int , _A :int , _A :int = 2 ) -> Any:
'''simple docstring'''
super().__init__()
__A = nn.Convad(_A , _A , kernel_size=1 , stride=_A , bias=_A )
__A = nn.BatchNormad(_A )
def lowercase_ ( self :Optional[int] , _A :Tensor ) -> Tensor:
'''simple docstring'''
__A = self.convolution(_A )
__A = self.normalization(_A )
return hidden_state
class UpperCamelCase__ ( nn.Module):
def __init__( self :Optional[Any] , _A :int , _A :int ) -> List[str]:
'''simple docstring'''
super().__init__()
__A = nn.AdaptiveAvgPoolad((1, 1) )
__A = nn.Sequential(
nn.Convad(_A , _A , kernel_size=1 ) , nn.ReLU() , nn.Convad(_A , _A , kernel_size=1 ) , nn.Sigmoid() , )
def lowercase_ ( self :Any , _A :str ) -> int:
'''simple docstring'''
__A = self.pooler(_A )
__A = self.attention(_A )
__A = hidden_state * attention
return hidden_state
class UpperCamelCase__ ( nn.Module):
def __init__( self :int , _A :RegNetConfig , _A :int , _A :int , _A :int = 1 ) -> List[Any]:
'''simple docstring'''
super().__init__()
__A = in_channels != out_channels or stride != 1
__A = max(1 , out_channels // config.groups_width )
__A = (
RegNetShortCut(_A , _A , stride=_A ) if should_apply_shortcut else nn.Identity()
)
__A = nn.Sequential(
RegNetConvLayer(_A , _A , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(_A , _A , stride=_A , groups=_A , activation=config.hidden_act ) , RegNetConvLayer(_A , _A , kernel_size=1 , activation=_A ) , )
__A = ACTaFN[config.hidden_act]
def lowercase_ ( self :Optional[Any] , _A :int ) -> int:
'''simple docstring'''
__A = hidden_state
__A = self.layer(_A )
__A = self.shortcut(_A )
hidden_state += residual
__A = self.activation(_A )
return hidden_state
class UpperCamelCase__ ( nn.Module):
def __init__( self :Optional[int] , _A :RegNetConfig , _A :int , _A :int , _A :int = 1 ) -> Any:
'''simple docstring'''
super().__init__()
__A = in_channels != out_channels or stride != 1
__A = max(1 , out_channels // config.groups_width )
__A = (
RegNetShortCut(_A , _A , stride=_A ) if should_apply_shortcut else nn.Identity()
)
__A = nn.Sequential(
RegNetConvLayer(_A , _A , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(_A , _A , stride=_A , groups=_A , activation=config.hidden_act ) , RegNetSELayer(_A , reduced_channels=int(round(in_channels / 4 ) ) ) , RegNetConvLayer(_A , _A , kernel_size=1 , activation=_A ) , )
__A = ACTaFN[config.hidden_act]
def lowercase_ ( self :int , _A :int ) -> int:
'''simple docstring'''
__A = hidden_state
__A = self.layer(_A )
__A = self.shortcut(_A )
hidden_state += residual
__A = self.activation(_A )
return hidden_state
class UpperCamelCase__ ( nn.Module):
def __init__( self :Tuple , _A :RegNetConfig , _A :int , _A :int , _A :int = 2 , _A :int = 2 , ) -> Any:
'''simple docstring'''
super().__init__()
__A = RegNetXLayer if config.layer_type == 'x' else RegNetYLayer
__A = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(
_A , _A , _A , stride=_A , ) , *[layer(_A , _A , _A ) for _ in range(depth - 1 )] , )
def lowercase_ ( self :List[str] , _A :Optional[int] ) -> Tuple:
'''simple docstring'''
__A = self.layers(_A )
return hidden_state
class UpperCamelCase__ ( nn.Module):
def __init__( self :Union[str, Any] , _A :RegNetConfig ) -> List[str]:
'''simple docstring'''
super().__init__()
__A = nn.ModuleList([] )
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
RegNetStage(
_A , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) )
__A = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(_A , config.depths[1:] ):
self.stages.append(RegNetStage(_A , _A , _A , depth=_A ) )
def lowercase_ ( self :str , _A :Tensor , _A :bool = False , _A :bool = True ) -> BaseModelOutputWithNoAttention:
'''simple docstring'''
__A = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
__A = hidden_states + (hidden_state,)
__A = stage_module(_A )
if output_hidden_states:
__A = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=_A , hidden_states=_A )
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE):
UpperCAmelCase__ : int = RegNetConfig
UpperCAmelCase__ : Dict = 'regnet'
UpperCAmelCase__ : int = 'pixel_values'
UpperCAmelCase__ : Optional[int] = True
def lowercase_ ( self :str , _A :Optional[int] ) -> Tuple:
'''simple docstring'''
if isinstance(_A , nn.Convad ):
nn.init.kaiming_normal_(module.weight , mode='fan_out' , nonlinearity='relu' )
elif isinstance(_A , (nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight , 1 )
nn.init.constant_(module.bias , 0 )
def lowercase_ ( self :int , _A :str , _A :Dict=False ) -> Dict:
'''simple docstring'''
if isinstance(_A , _A ):
__A = value
a__ : Optional[int] = R"\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n"
a__ : int = R"\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConvNextImageProcessor.__call__`] for details.\n\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.\n"
@add_start_docstrings(
'The bare RegNet model outputting raw features without any specific head on top.' , SCREAMING_SNAKE_CASE , )
# Copied from transformers.models.resnet.modeling_resnet.ResNetModel with RESNET->REGNET,ResNet->RegNet
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE):
def __init__( self :List[str] , _A :List[Any] ) -> List[str]:
'''simple docstring'''
super().__init__(_A )
__A = config
__A = RegNetEmbeddings(_A )
__A = RegNetEncoder(_A )
__A = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(_A )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=_A , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def lowercase_ ( self :List[Any] , _A :Tensor , _A :Optional[bool] = None , _A :Optional[bool] = None ) -> BaseModelOutputWithPoolingAndNoAttention:
'''simple docstring'''
__A = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__A = return_dict if return_dict is not None else self.config.use_return_dict
__A = self.embedder(_A )
__A = self.encoder(
_A , output_hidden_states=_A , return_dict=_A )
__A = encoder_outputs[0]
__A = self.pooler(_A )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=_A , pooler_output=_A , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
'\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ' , SCREAMING_SNAKE_CASE , )
# Copied from transformers.models.resnet.modeling_resnet.ResNetForImageClassification with RESNET->REGNET,ResNet->RegNet,resnet->regnet
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE):
def __init__( self :Optional[int] , _A :Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
super().__init__(_A )
__A = config.num_labels
__A = RegNetModel(_A )
# classification head
__A = nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(_A )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=_A , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def lowercase_ ( self :Optional[int] , _A :Optional[torch.FloatTensor] = None , _A :Optional[torch.LongTensor] = None , _A :Optional[bool] = None , _A :Optional[bool] = None , ) -> ImageClassifierOutputWithNoAttention:
'''simple docstring'''
__A = return_dict if return_dict is not None else self.config.use_return_dict
__A = self.regnet(_A , output_hidden_states=_A , return_dict=_A )
__A = outputs.pooler_output if return_dict else outputs[1]
__A = self.classifier(_A )
__A = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
__A = 'regression'
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
__A = 'single_label_classification'
else:
__A = 'multi_label_classification'
if self.config.problem_type == "regression":
__A = MSELoss()
if self.num_labels == 1:
__A = loss_fct(logits.squeeze() , labels.squeeze() )
else:
__A = loss_fct(_A , _A )
elif self.config.problem_type == "single_label_classification":
__A = CrossEntropyLoss()
__A = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
__A = BCEWithLogitsLoss()
__A = loss_fct(_A , _A )
if not return_dict:
__A = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=_A , logits=_A , hidden_states=outputs.hidden_states )
| 161
| 0
|
from decimal import Decimal, getcontext
from math import ceil, factorial
def lowerCamelCase__ ( snake_case_ : int ) -> str:
if not isinstance(snake_case_ , snake_case_ ):
raise TypeError('''Undefined for non-integers''' )
elif precision < 1:
raise ValueError('''Undefined for non-natural numbers''' )
__snake_case = precision
__snake_case = ceil(precision / 14 )
__snake_case = 42_6880 * Decimal(1_0005 ).sqrt()
__snake_case = 1
__snake_case = 1359_1409
__snake_case = Decimal(snake_case_ )
for k in range(1 , snake_case_ ):
__snake_case = factorial(6 * k ) // (factorial(3 * k ) * factorial(snake_case_ ) ** 3)
linear_term += 5_4514_0134
exponential_term *= -26_2537_4126_4076_8000
partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term
return str(constant_term / partial_sum )[:-1]
if __name__ == "__main__":
snake_case_ = 50
print(F'The first {n} digits of pi is: {pi(n)}')
| 238
|
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
A_ : Union[str, Any] = (DPMSolverSinglestepScheduler,)
A_ : Union[str, Any] = (('num_inference_steps', 25),)
def a (self : Dict , **a__ : Tuple ):
"""simple docstring"""
__snake_case = {
'''num_train_timesteps''': 1000,
'''beta_start''': 0.0_0_0_1,
'''beta_end''': 0.0_2,
'''beta_schedule''': '''linear''',
'''solver_order''': 2,
'''prediction_type''': '''epsilon''',
'''thresholding''': False,
'''sample_max_value''': 1.0,
'''algorithm_type''': '''dpmsolver++''',
'''solver_type''': '''midpoint''',
'''lambda_min_clipped''': -float('''inf''' ),
'''variance_type''': None,
}
config.update(**a__ )
return config
def a (self : str , a__ : Any=0 , **a__ : Tuple ):
"""simple docstring"""
__snake_case = dict(self.forward_default_kwargs )
__snake_case = kwargs.pop('''num_inference_steps''' , a__ )
__snake_case = self.dummy_sample
__snake_case = 0.1 * sample
__snake_case = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
for scheduler_class in self.scheduler_classes:
__snake_case = self.get_scheduler_config(**a__ )
__snake_case = scheduler_class(**a__ )
scheduler.set_timesteps(a__ )
# copy over dummy past residuals
__snake_case = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(a__ )
__snake_case = scheduler_class.from_pretrained(a__ )
new_scheduler.set_timesteps(a__ )
# copy over dummy past residuals
__snake_case = dummy_past_residuals[: new_scheduler.config.solver_order]
__snake_case , __snake_case = sample, sample
for t in range(a__ , time_step + scheduler.config.solver_order + 1 ):
__snake_case = scheduler.step(a__ , a__ , a__ , **a__ ).prev_sample
__snake_case = new_scheduler.step(a__ , a__ , a__ , **a__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def a (self : Union[str, Any] ):
"""simple docstring"""
pass
def a (self : List[Any] , a__ : Dict=0 , **a__ : List[str] ):
"""simple docstring"""
__snake_case = dict(self.forward_default_kwargs )
__snake_case = kwargs.pop('''num_inference_steps''' , a__ )
__snake_case = self.dummy_sample
__snake_case = 0.1 * sample
__snake_case = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
for scheduler_class in self.scheduler_classes:
__snake_case = self.get_scheduler_config()
__snake_case = scheduler_class(**a__ )
scheduler.set_timesteps(a__ )
# copy over dummy past residuals (must be after setting timesteps)
__snake_case = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(a__ )
__snake_case = scheduler_class.from_pretrained(a__ )
# copy over dummy past residuals
new_scheduler.set_timesteps(a__ )
# copy over dummy past residual (must be after setting timesteps)
__snake_case = dummy_past_residuals[: new_scheduler.config.solver_order]
__snake_case = scheduler.step(a__ , a__ , a__ , **a__ ).prev_sample
__snake_case = new_scheduler.step(a__ , a__ , a__ , **a__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def a (self : int , a__ : Tuple=None , **a__ : List[str] ):
"""simple docstring"""
if scheduler is None:
__snake_case = self.scheduler_classes[0]
__snake_case = self.get_scheduler_config(**a__ )
__snake_case = scheduler_class(**a__ )
__snake_case = self.scheduler_classes[0]
__snake_case = self.get_scheduler_config(**a__ )
__snake_case = scheduler_class(**a__ )
__snake_case = 10
__snake_case = self.dummy_model()
__snake_case = self.dummy_sample_deter
scheduler.set_timesteps(a__ )
for i, t in enumerate(scheduler.timesteps ):
__snake_case = model(a__ , a__ )
__snake_case = scheduler.step(a__ , a__ , a__ ).prev_sample
return sample
def a (self : str ):
"""simple docstring"""
__snake_case = DPMSolverSinglestepScheduler(**self.get_scheduler_config() )
__snake_case = 50
__snake_case = self.dummy_model()
__snake_case = self.dummy_sample_deter
scheduler.set_timesteps(a__ )
# make sure that the first t is uneven
for i, t in enumerate(scheduler.timesteps[3:] ):
__snake_case = model(a__ , a__ )
__snake_case = scheduler.step(a__ , a__ , a__ ).prev_sample
__snake_case = torch.mean(torch.abs(a__ ) )
assert abs(result_mean.item() - 0.2_5_7_4 ) < 1E-3
def a (self : int ):
"""simple docstring"""
for timesteps in [25, 50, 100, 999, 1000]:
self.check_over_configs(num_train_timesteps=a__ )
def a (self : List[str] ):
"""simple docstring"""
__snake_case = DPMSolverSinglestepScheduler(**self.get_scheduler_config() )
__snake_case = self.full_loop(scheduler=a__ )
__snake_case = torch.mean(torch.abs(a__ ) )
assert abs(result_mean.item() - 0.2_7_9_1 ) < 1E-3
__snake_case = DEISMultistepScheduler.from_config(scheduler.config )
__snake_case = DPMSolverMultistepScheduler.from_config(scheduler.config )
__snake_case = UniPCMultistepScheduler.from_config(scheduler.config )
__snake_case = DPMSolverSinglestepScheduler.from_config(scheduler.config )
__snake_case = self.full_loop(scheduler=a__ )
__snake_case = torch.mean(torch.abs(a__ ) )
assert abs(result_mean.item() - 0.2_7_9_1 ) < 1E-3
def a (self : List[str] ):
"""simple docstring"""
self.check_over_configs(thresholding=a__ )
for order in [1, 2, 3]:
for solver_type in ["midpoint", "heun"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=a__ , prediction_type=a__ , sample_max_value=a__ , algorithm_type='''dpmsolver++''' , solver_order=a__ , solver_type=a__ , )
def a (self : Union[str, Any] ):
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=a__ )
def a (self : Union[str, Any] ):
"""simple docstring"""
for algorithm_type in ["dpmsolver", "dpmsolver++"]:
for solver_type in ["midpoint", "heun"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=a__ , solver_type=a__ , prediction_type=a__ , algorithm_type=a__ , )
__snake_case = self.full_loop(
solver_order=a__ , solver_type=a__ , prediction_type=a__ , algorithm_type=a__ , )
assert not torch.isnan(a__ ).any(), "Samples have nan numbers"
def a (self : List[str] ):
"""simple docstring"""
self.check_over_configs(lower_order_final=a__ )
self.check_over_configs(lower_order_final=a__ )
def a (self : Optional[Any] ):
"""simple docstring"""
self.check_over_configs(lambda_min_clipped=-float('''inf''' ) )
self.check_over_configs(lambda_min_clipped=-5.1 )
def a (self : Tuple ):
"""simple docstring"""
self.check_over_configs(variance_type=a__ )
self.check_over_configs(variance_type='''learned_range''' )
def a (self : int ):
"""simple docstring"""
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]:
self.check_over_forward(num_inference_steps=a__ , time_step=0 )
def a (self : Optional[Any] ):
"""simple docstring"""
__snake_case = self.full_loop()
__snake_case = torch.mean(torch.abs(a__ ) )
assert abs(result_mean.item() - 0.2_7_9_1 ) < 1E-3
def a (self : int ):
"""simple docstring"""
__snake_case = self.full_loop(use_karras_sigmas=a__ )
__snake_case = torch.mean(torch.abs(a__ ) )
assert abs(result_mean.item() - 0.2_2_4_8 ) < 1E-3
def a (self : Tuple ):
"""simple docstring"""
__snake_case = self.full_loop(prediction_type='''v_prediction''' )
__snake_case = torch.mean(torch.abs(a__ ) )
assert abs(result_mean.item() - 0.1_4_5_3 ) < 1E-3
def a (self : List[Any] ):
"""simple docstring"""
__snake_case = self.full_loop(prediction_type='''v_prediction''' , use_karras_sigmas=a__ )
__snake_case = torch.mean(torch.abs(a__ ) )
assert abs(result_mean.item() - 0.0_6_4_9 ) < 1E-3
def a (self : int ):
"""simple docstring"""
__snake_case = self.scheduler_classes[0]
__snake_case = self.get_scheduler_config(thresholding=a__ , dynamic_thresholding_ratio=0 )
__snake_case = scheduler_class(**a__ )
__snake_case = 10
__snake_case = self.dummy_model()
__snake_case = self.dummy_sample_deter.half()
scheduler.set_timesteps(a__ )
for i, t in enumerate(scheduler.timesteps ):
__snake_case = model(a__ , a__ )
__snake_case = scheduler.step(a__ , a__ , a__ ).prev_sample
assert sample.dtype == torch.floataa
| 238
| 1
|
import argparse
import torch
from ...utils import logging
from . import AlbertConfig, AlbertForPreTraining, load_tf_weights_in_albert
logging.set_verbosity_info()
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
# Initialise PyTorch model
__a = AlbertConfig.from_json_file(_UpperCAmelCase )
print(f'Building PyTorch model from configuration: {config}' )
__a = AlbertForPreTraining(_UpperCAmelCase )
# Load weights from tf checkpoint
load_tf_weights_in_albert(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# Save pytorch-model
print(f'Save PyTorch model to {pytorch_dump_path}' )
torch.save(model.state_dict() , _UpperCAmelCase )
if __name__ == "__main__":
__snake_case :List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--albert_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained ALBERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
__snake_case :List[str] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.albert_config_file, args.pytorch_dump_path)
| 49
|
'''simple docstring'''
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import ResNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFResNetForImageClassification, TFResNetModel
from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self : int , snake_case_ : Tuple , snake_case_ : List[str]=3 , snake_case_ : Tuple=32 , snake_case_ : List[Any]=3 , snake_case_ : List[str]=10 , snake_case_ : List[str]=[10, 20, 30, 40] , snake_case_ : Tuple=[1, 1, 2, 1] , snake_case_ : Tuple=True , snake_case_ : str=True , snake_case_ : int="relu" , snake_case_ : List[Any]=3 , snake_case_ : str=None , ):
snake_case__ : List[Any] = parent
snake_case__ : List[Any] = batch_size
snake_case__ : int = image_size
snake_case__ : List[Any] = num_channels
snake_case__ : Optional[Any] = embeddings_size
snake_case__ : Optional[int] = hidden_sizes
snake_case__ : Tuple = depths
snake_case__ : Any = is_training
snake_case__ : Optional[int] = use_labels
snake_case__ : Optional[int] = hidden_act
snake_case__ : Optional[int] = num_labels
snake_case__ : int = scope
snake_case__ : Tuple = len(snake_case_ )
def lowerCamelCase ( self : Any ):
snake_case__ : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case__ : Union[str, Any] = None
if self.use_labels:
snake_case__ : Optional[Any] = ids_tensor([self.batch_size] , self.num_labels )
snake_case__ : List[str] = self.get_config()
return config, pixel_values, labels
def lowerCamelCase ( self : int ):
return ResNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def lowerCamelCase ( self : Tuple , snake_case_ : Tuple , snake_case_ : List[Any] , snake_case_ : Optional[int] ):
snake_case__ : Optional[Any] = TFResNetModel(config=snake_case_ )
snake_case__ : int = model(snake_case_ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def lowerCamelCase ( self : List[str] , snake_case_ : List[str] , snake_case_ : str , snake_case_ : Union[str, Any] ):
snake_case__ : str = self.num_labels
snake_case__ : Optional[int] = TFResNetForImageClassification(snake_case_ )
snake_case__ : Tuple = model(snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase ( self : Tuple ):
snake_case__ : List[Any] = self.prepare_config_and_inputs()
snake_case__ , snake_case__ , snake_case__ : str = config_and_inputs
snake_case__ : int = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class UpperCAmelCase_ ( _a , _a , unittest.TestCase ):
"""simple docstring"""
lowercase = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else ()
lowercase = (
{"feature-extraction": TFResNetModel, "image-classification": TFResNetForImageClassification}
if is_tf_available()
else {}
)
lowercase = False
lowercase = False
lowercase = False
lowercase = False
lowercase = False
def lowerCamelCase ( self : Optional[int] ):
snake_case__ : Tuple = TFResNetModelTester(self )
snake_case__ : List[str] = ConfigTester(self , config_class=snake_case_ , has_text_modality=snake_case_ )
def lowerCamelCase ( self : Dict ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCamelCase ( self : str ):
return
@unittest.skip(reason="""ResNet does not use inputs_embeds""" )
def lowerCamelCase ( self : int ):
pass
@unittest.skip(reason="""ResNet does not support input and output embeddings""" )
def lowerCamelCase ( self : List[Any] ):
pass
def lowerCamelCase ( self : List[Any] ):
snake_case__ , snake_case__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ : Dict = model_class(snake_case_ )
snake_case__ : Optional[Any] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case__ : Union[str, Any] = [*signature.parameters.keys()]
snake_case__ : Optional[int] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , snake_case_ )
def lowerCamelCase ( self : Union[str, Any] ):
snake_case__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case_ )
def lowerCamelCase ( self : List[str] ):
def check_hidden_states_output(snake_case_ : Any , snake_case_ : Any , snake_case_ : List[str] ):
snake_case__ : List[Any] = model_class(snake_case_ )
snake_case__ : Dict = model(**self._prepare_for_class(snake_case_ , snake_case_ ) )
snake_case__ : str = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
snake_case__ : List[Any] = self.model_tester.num_stages
self.assertEqual(len(snake_case_ ) , expected_num_stages + 1 )
# ResNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
snake_case__ , snake_case__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ : List[Any] = ["""basic""", """bottleneck"""]
for model_class in self.all_model_classes:
for layer_type in layers_type:
snake_case__ : Dict = layer_type
snake_case__ : Optional[int] = True
check_hidden_states_output(snake_case_ , snake_case_ , snake_case_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case__ : List[Any] = True
check_hidden_states_output(snake_case_ , snake_case_ , snake_case_ )
def lowerCamelCase ( self : Optional[Any] ):
snake_case__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*snake_case_ )
@slow
def lowerCamelCase ( self : Optional[Any] ):
for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case__ : str = TFResNetModel.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
def __snake_case( ) -> Optional[int]:
snake_case__ : Optional[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def lowerCamelCase ( self : List[Any] ):
return (
AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def lowerCamelCase ( self : Optional[int] ):
snake_case__ : List[str] = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
snake_case__ : List[Any] = self.default_image_processor
snake_case__ : List[Any] = prepare_img()
snake_case__ : List[str] = image_processor(images=snake_case_ , return_tensors="""tf""" )
# forward pass
snake_case__ : Optional[Any] = model(**snake_case_ )
# verify the logits
snake_case__ : Union[str, Any] = tf.TensorShape((1, 1_000) )
self.assertEqual(outputs.logits.shape , snake_case_ )
snake_case__ : List[str] = tf.constant([-11.1069, -9.7877, -8.3777] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , snake_case_ , atol=1E-4 ) )
| 35
| 0
|
'''simple docstring'''
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = '▁'
lowerCamelCase__ = {
'vocab_file': 'vocab.json',
'spm_file': 'sentencepiece.bpe.model',
}
lowerCamelCase__ = {
'vocab_file': {
'facebook/s2t-small-librispeech-asr': (
'https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/vocab.json'
),
},
'spm_file': {
'facebook/s2t-small-librispeech-asr': (
'https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/sentencepiece.bpe.model'
)
},
}
lowerCamelCase__ = {
'facebook/s2t-small-librispeech-asr': 1_024,
}
lowerCamelCase__ = ['pt', 'fr', 'ru', 'nl', 'ro', 'it', 'es', 'de']
lowerCamelCase__ = {'mustc': MUSTC_LANGS}
class lowerCAmelCase__ ( UpperCAmelCase__ ):
lowerCAmelCase : List[str] = VOCAB_FILES_NAMES
lowerCAmelCase : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase : int = MAX_MODEL_INPUT_SIZES
lowerCAmelCase : Any = ["input_ids", "attention_mask"]
lowerCAmelCase : List[int] = []
def __init__( self : int , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Tuple , lowerCamelCase__ : Optional[int]="<s>" , lowerCamelCase__ : str="</s>" , lowerCamelCase__ : Tuple="<pad>" , lowerCamelCase__ : List[Any]="<unk>" , lowerCamelCase__ : List[Any]=False , lowerCamelCase__ : Union[str, Any]=False , lowerCamelCase__ : int=None , lowerCamelCase__ : str=None , lowerCamelCase__ : Optional[Dict[str, Any]] = None , **lowerCamelCase__ : str , ) ->None:
'''simple docstring'''
_UpperCAmelCase : Any = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=lowerCamelCase__ , eos_token=lowerCamelCase__ , unk_token=lowerCamelCase__ , pad_token=lowerCamelCase__ , do_upper_case=lowerCamelCase__ , do_lower_case=lowerCamelCase__ , tgt_lang=lowerCamelCase__ , lang_codes=lowerCamelCase__ , sp_model_kwargs=self.sp_model_kwargs , **lowerCamelCase__ , )
_UpperCAmelCase : Union[str, Any] = do_upper_case
_UpperCAmelCase : int = do_lower_case
_UpperCAmelCase : Union[str, Any] = load_json(lowerCamelCase__ )
_UpperCAmelCase : Optional[Any] = {v: k for k, v in self.encoder.items()}
_UpperCAmelCase : List[Any] = spm_file
_UpperCAmelCase : int = load_spm(lowerCamelCase__ , self.sp_model_kwargs )
if lang_codes is not None:
_UpperCAmelCase : List[str] = lang_codes
_UpperCAmelCase : List[Any] = LANGUAGES[lang_codes]
_UpperCAmelCase : int = [F"""<lang:{lang}>""" for lang in self.langs]
_UpperCAmelCase : List[Any] = {lang: self.sp_model.PieceToId(F"""<lang:{lang}>""" ) for lang in self.langs}
_UpperCAmelCase : List[Any] = self.lang_tokens
_UpperCAmelCase : List[Any] = tgt_lang if tgt_lang is not None else self.langs[0]
self.set_tgt_lang_special_tokens(self._tgt_lang )
else:
_UpperCAmelCase : List[Any] = {}
@property
def lowerCAmelCase__ ( self : List[str] ) ->int:
'''simple docstring'''
return len(self.encoder )
@property
def lowerCAmelCase__ ( self : Optional[int] ) ->str:
'''simple docstring'''
return self._tgt_lang
@tgt_lang.setter
def lowerCAmelCase__ ( self : int , lowerCamelCase__ : Dict ) ->None:
'''simple docstring'''
_UpperCAmelCase : Tuple = new_tgt_lang
self.set_tgt_lang_special_tokens(lowerCamelCase__ )
def lowerCAmelCase__ ( self : Tuple , lowerCamelCase__ : str ) ->None:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = self.lang_code_to_id[tgt_lang]
_UpperCAmelCase : int = [lang_code_id]
def lowerCAmelCase__ ( self : Dict , lowerCamelCase__ : str ) ->List[str]:
'''simple docstring'''
return self.sp_model.encode(lowerCamelCase__ , out_type=lowerCamelCase__ )
def lowerCAmelCase__ ( self : List[str] , lowerCamelCase__ : str ) ->Tuple:
'''simple docstring'''
return self.encoder.get(lowerCamelCase__ , self.encoder[self.unk_token] )
def lowerCAmelCase__ ( self : int , lowerCamelCase__ : int ) ->str:
'''simple docstring'''
return self.decoder.get(lowerCamelCase__ , self.unk_token )
def lowerCAmelCase__ ( self : List[str] , lowerCamelCase__ : List[str] ) ->str:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = []
_UpperCAmelCase : Any = ""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
_UpperCAmelCase : Dict = self.sp_model.decode(lowerCamelCase__ )
out_string += (decoded.upper() if self.do_upper_case else decoded) + token + " "
_UpperCAmelCase : int = []
else:
current_sub_tokens.append(lowerCamelCase__ )
_UpperCAmelCase : List[str] = self.sp_model.decode(lowerCamelCase__ )
out_string += decoded.upper() if self.do_upper_case else decoded
return out_string.strip()
def lowerCAmelCase__ ( self : Tuple , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : int=None ) ->List[int]:
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + [self.eos_token_id]
def lowerCAmelCase__ ( self : List[str] , lowerCamelCase__ : List[int] , lowerCamelCase__ : Optional[List[int]] = None , lowerCamelCase__ : bool = False ) ->List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase__ , token_ids_a=lowerCamelCase__ , already_has_special_tokens=lowerCamelCase__ )
_UpperCAmelCase : Tuple = [1] * len(self.prefix_tokens )
_UpperCAmelCase : Optional[Any] = [1]
if token_ids_a is None:
return prefix_ones + ([0] * len(lowerCamelCase__ )) + suffix_ones
return prefix_ones + ([0] * len(lowerCamelCase__ )) + ([0] * len(lowerCamelCase__ )) + suffix_ones
def lowerCAmelCase__ ( self : str ) ->Dict:
'''simple docstring'''
_UpperCAmelCase : int = self.encoder.copy()
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Any ) ->Dict:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = self.__dict__.copy()
_UpperCAmelCase : Any = None
return state
def __setstate__( self : Optional[Any] , lowerCamelCase__ : Dict ) ->None:
'''simple docstring'''
_UpperCAmelCase : List[Any] = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
_UpperCAmelCase : List[Any] = {}
_UpperCAmelCase : Union[str, Any] = load_spm(self.spm_file , self.sp_model_kwargs )
def lowerCAmelCase__ ( self : Tuple , lowerCamelCase__ : str , lowerCamelCase__ : Optional[str] = None ) ->Tuple[str]:
'''simple docstring'''
_UpperCAmelCase : Dict = Path(lowerCamelCase__ )
assert save_dir.is_dir(), F"""{save_directory} should be a directory"""
_UpperCAmelCase : Union[str, Any] = save_dir / (
(filename_prefix + "-" if filename_prefix else "") + self.vocab_files_names["vocab_file"]
)
_UpperCAmelCase : List[Any] = save_dir / (
(filename_prefix + "-" if filename_prefix else "") + self.vocab_files_names["spm_file"]
)
save_json(self.encoder , lowerCamelCase__ )
if os.path.abspath(self.spm_file ) != os.path.abspath(lowerCamelCase__ ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , lowerCamelCase__ )
elif not os.path.isfile(self.spm_file ):
with open(lowerCamelCase__ , "wb" ) as fi:
_UpperCAmelCase : Optional[Any] = self.sp_model.serialized_model_proto()
fi.write(lowerCamelCase__ )
return (str(lowerCamelCase__ ), str(lowerCamelCase__ ))
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
_UpperCAmelCase : Tuple = sentencepiece.SentencePieceProcessor(**__lowerCAmelCase )
spm.Load(str(__lowerCAmelCase ) )
return spm
def __lowerCAmelCase (__lowerCAmelCase ):
with open(__lowerCAmelCase , "r" ) as f:
return json.load(__lowerCAmelCase )
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
with open(__lowerCAmelCase , "w" ) as f:
json.dump(__lowerCAmelCase , __lowerCAmelCase , indent=2 )
| 322
|
'''simple docstring'''
from __future__ import annotations
from collections.abc import Callable
lowerCamelCase__ = list[list[float | int]]
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
_UpperCAmelCase : int = len(__lowerCAmelCase )
_UpperCAmelCase : Matrix = [[0 for _ in range(size + 1 )] for _ in range(__lowerCAmelCase )]
_UpperCAmelCase : int
_UpperCAmelCase : int
_UpperCAmelCase : int
_UpperCAmelCase : int
_UpperCAmelCase : int
_UpperCAmelCase : float
for row in range(__lowerCAmelCase ):
for col in range(__lowerCAmelCase ):
_UpperCAmelCase : Optional[Any] = matrix[row][col]
_UpperCAmelCase : Optional[int] = vector[row][0]
_UpperCAmelCase : int = 0
_UpperCAmelCase : Union[str, Any] = 0
while row < size and col < size:
# pivoting
_UpperCAmelCase : Optional[Any] = max((abs(augmented[rowa][col] ), rowa) for rowa in range(__lowerCAmelCase , __lowerCAmelCase ) )[
1
]
if augmented[pivot_row][col] == 0:
col += 1
continue
else:
_UpperCAmelCase , _UpperCAmelCase : str = augmented[pivot_row], augmented[row]
for rowa in range(row + 1 , __lowerCAmelCase ):
_UpperCAmelCase : Dict = augmented[rowa][col] / augmented[row][col]
_UpperCAmelCase : Optional[Any] = 0
for cola in range(col + 1 , size + 1 ):
augmented[rowa][cola] -= augmented[row][cola] * ratio
row += 1
col += 1
# back substitution
for col in range(1 , __lowerCAmelCase ):
for row in range(__lowerCAmelCase ):
_UpperCAmelCase : Dict = augmented[row][col] / augmented[col][col]
for cola in range(__lowerCAmelCase , size + 1 ):
augmented[row][cola] -= augmented[col][cola] * ratio
# round to get rid of numbers like 2.000000000000004
return [
[round(augmented[row][size] / augmented[row][row] , 10 )] for row in range(__lowerCAmelCase )
]
def __lowerCAmelCase (__lowerCAmelCase ):
_UpperCAmelCase : int = len(__lowerCAmelCase )
_UpperCAmelCase : Matrix = [[0 for _ in range(__lowerCAmelCase )] for _ in range(__lowerCAmelCase )]
_UpperCAmelCase : Matrix = [[0] for _ in range(__lowerCAmelCase )]
_UpperCAmelCase : Matrix
_UpperCAmelCase : int
_UpperCAmelCase : int
_UpperCAmelCase : int
for x_val, y_val in enumerate(__lowerCAmelCase ):
for col in range(__lowerCAmelCase ):
_UpperCAmelCase : Dict = (x_val + 1) ** (size - col - 1)
_UpperCAmelCase : int = y_val
_UpperCAmelCase : List[str] = solve(__lowerCAmelCase , __lowerCAmelCase )
def interpolated_func(__lowerCAmelCase ) -> int:
return sum(
round(coeffs[x_val][0] ) * (var ** (size - x_val - 1))
for x_val in range(__lowerCAmelCase ) )
return interpolated_func
def __lowerCAmelCase (__lowerCAmelCase ):
return (
1
- variable
+ variable**2
- variable**3
+ variable**4
- variable**5
+ variable**6
- variable**7
+ variable**8
- variable**9
+ variable**10
)
def __lowerCAmelCase (__lowerCAmelCase = question_function , __lowerCAmelCase = 10 ):
_UpperCAmelCase : list[int] = [func(__lowerCAmelCase ) for x_val in range(1 , order + 1 )]
_UpperCAmelCase : list[Callable[[int], int]] = [
interpolate(data_points[:max_coeff] ) for max_coeff in range(1 , order + 1 )
]
_UpperCAmelCase : int = 0
_UpperCAmelCase : Callable[[int], int]
_UpperCAmelCase : int
for poly in polynomials:
_UpperCAmelCase : int = 1
while func(__lowerCAmelCase ) == poly(__lowerCAmelCase ):
x_val += 1
ret += poly(__lowerCAmelCase )
return ret
if __name__ == "__main__":
print(F'''{solution() = }''')
| 322
| 1
|
'''simple docstring'''
import os
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from ...models.controlnet import ControlNetModel, ControlNetOutput
from ...models.modeling_utils import ModelMixin
from ...utils import logging
__lowerCAmelCase : Tuple =logging.get_logger(__name__)
class UpperCAmelCase ( UpperCamelCase__ ):
def __init__( self :Union[str, Any] , lowercase_ :Union[List[ControlNetModel], Tuple[ControlNetModel]] )-> str:
super().__init__()
A__ = nn.ModuleList(lowercase_ )
def UpperCAmelCase_ ( self :Dict , lowercase_ :torch.FloatTensor , lowercase_ :Union[torch.Tensor, float, int] , lowercase_ :torch.Tensor , lowercase_ :List[torch.tensor] , lowercase_ :List[float] , lowercase_ :Optional[torch.Tensor] = None , lowercase_ :Optional[torch.Tensor] = None , lowercase_ :Optional[torch.Tensor] = None , lowercase_ :Optional[Dict[str, Any]] = None , lowercase_ :bool = False , lowercase_ :bool = True , )-> Union[ControlNetOutput, Tuple]:
for i, (image, scale, controlnet) in enumerate(zip(lowercase_ , lowercase_ , self.nets ) ):
A__, A__ = controlnet(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , )
# merge samples
if i == 0:
A__, A__ = down_samples, mid_sample
else:
A__ = [
samples_prev + samples_curr
for samples_prev, samples_curr in zip(lowercase_ , lowercase_ )
]
mid_block_res_sample += mid_sample
return down_block_res_samples, mid_block_res_sample
def UpperCAmelCase_ ( self :Any , lowercase_ :Union[str, os.PathLike] , lowercase_ :bool = True , lowercase_ :Callable = None , lowercase_ :bool = False , lowercase_ :Optional[str] = None , )-> List[Any]:
A__ = 0
A__ = save_directory
for controlnet in self.nets:
controlnet.save_pretrained(
lowercase_ , is_main_process=lowercase_ , save_function=lowercase_ , safe_serialization=lowercase_ , variant=lowercase_ , )
idx += 1
A__ = model_path_to_save + F"_{idx}"
@classmethod
def UpperCAmelCase_ ( cls :Any , lowercase_ :Optional[Union[str, os.PathLike]] , **lowercase_ :Dict )-> Union[str, Any]:
A__ = 0
A__ = []
# load controlnet and append to list until no controlnet directory exists anymore
# first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained`
# second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ...
A__ = pretrained_model_path
while os.path.isdir(lowercase_ ):
A__ = ControlNetModel.from_pretrained(lowercase_ , **lowercase_ )
controlnets.append(lowercase_ )
idx += 1
A__ = pretrained_model_path + F"_{idx}"
logger.info(F"{len(lowercase_ )} controlnets loaded from {pretrained_model_path}." )
if len(lowercase_ ) == 0:
raise ValueError(
F"No ControlNets found under {os.path.dirname(lowercase_ )}. Expected at least {pretrained_model_path + '_0'}." )
return cls(lowercase_ )
| 237
|
'''simple docstring'''
import warnings
warnings.warn(
"memory_utils has been reorganized to utils.memory. Import `find_executable_batchsize` from the main `__init__`: "
"`from accelerate import find_executable_batch_size` to avoid this warning.",
FutureWarning,
)
| 237
| 1
|
"""simple docstring"""
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision.transforms import functional as F
from transformers import DetrImageProcessor, TableTransformerConfig, TableTransformerForObjectDetection
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase__ = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
lowerCAmelCase__ = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F"""transformer.encoder.layers.{i}.self_attn.out_proj.weight""", F"""encoder.layers.{i}.self_attn.out_proj.weight""")
)
rename_keys.append(
(F"""transformer.encoder.layers.{i}.self_attn.out_proj.bias""", F"""encoder.layers.{i}.self_attn.out_proj.bias""")
)
rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.weight""", F"""encoder.layers.{i}.fc1.weight"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.bias""", F"""encoder.layers.{i}.fc1.bias"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.weight""", F"""encoder.layers.{i}.fc2.weight"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.bias""", F"""encoder.layers.{i}.fc2.bias"""))
rename_keys.append(
(F"""transformer.encoder.layers.{i}.norm1.weight""", F"""encoder.layers.{i}.self_attn_layer_norm.weight""")
)
rename_keys.append((F"""transformer.encoder.layers.{i}.norm1.bias""", F"""encoder.layers.{i}.self_attn_layer_norm.bias"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.norm2.weight""", F"""encoder.layers.{i}.final_layer_norm.weight"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.norm2.bias""", F"""encoder.layers.{i}.final_layer_norm.bias"""))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(F"""transformer.decoder.layers.{i}.self_attn.out_proj.weight""", F"""decoder.layers.{i}.self_attn.out_proj.weight""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.self_attn.out_proj.bias""", F"""decoder.layers.{i}.self_attn.out_proj.bias""")
)
rename_keys.append(
(
F"""transformer.decoder.layers.{i}.multihead_attn.out_proj.weight""",
F"""decoder.layers.{i}.encoder_attn.out_proj.weight""",
)
)
rename_keys.append(
(
F"""transformer.decoder.layers.{i}.multihead_attn.out_proj.bias""",
F"""decoder.layers.{i}.encoder_attn.out_proj.bias""",
)
)
rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.weight""", F"""decoder.layers.{i}.fc1.weight"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.bias""", F"""decoder.layers.{i}.fc1.bias"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.weight""", F"""decoder.layers.{i}.fc2.weight"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.bias""", F"""decoder.layers.{i}.fc2.bias"""))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm1.weight""", F"""decoder.layers.{i}.self_attn_layer_norm.weight""")
)
rename_keys.append((F"""transformer.decoder.layers.{i}.norm1.bias""", F"""decoder.layers.{i}.self_attn_layer_norm.bias"""))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm2.weight""", F"""decoder.layers.{i}.encoder_attn_layer_norm.weight""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm2.bias""", F"""decoder.layers.{i}.encoder_attn_layer_norm.bias""")
)
rename_keys.append((F"""transformer.decoder.layers.{i}.norm3.weight""", F"""decoder.layers.{i}.final_layer_norm.weight"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.norm3.bias""", F"""decoder.layers.{i}.final_layer_norm.bias"""))
# convolutional projection + query embeddings + layernorm of encoder + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
('''input_proj.weight''', '''input_projection.weight'''),
('''input_proj.bias''', '''input_projection.bias'''),
('''query_embed.weight''', '''query_position_embeddings.weight'''),
('''transformer.encoder.norm.weight''', '''encoder.layernorm.weight'''),
('''transformer.encoder.norm.bias''', '''encoder.layernorm.bias'''),
('''transformer.decoder.norm.weight''', '''decoder.layernorm.weight'''),
('''transformer.decoder.norm.bias''', '''decoder.layernorm.bias'''),
('''class_embed.weight''', '''class_labels_classifier.weight'''),
('''class_embed.bias''', '''class_labels_classifier.bias'''),
('''bbox_embed.layers.0.weight''', '''bbox_predictor.layers.0.weight'''),
('''bbox_embed.layers.0.bias''', '''bbox_predictor.layers.0.bias'''),
('''bbox_embed.layers.1.weight''', '''bbox_predictor.layers.1.weight'''),
('''bbox_embed.layers.1.bias''', '''bbox_predictor.layers.1.bias'''),
('''bbox_embed.layers.2.weight''', '''bbox_predictor.layers.2.weight'''),
('''bbox_embed.layers.2.bias''', '''bbox_predictor.layers.2.bias'''),
]
)
def snake_case_ ( A_ : str, A_ : Dict, A_ : List[str] ):
'''simple docstring'''
_lowerCamelCase : Any = state_dict.pop(A_ )
_lowerCamelCase : Union[str, Any] = val
def snake_case_ ( A_ : Tuple ):
'''simple docstring'''
_lowerCamelCase : List[Any] = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
_lowerCamelCase : Union[str, Any] = key.replace('''backbone.0.body''', '''backbone.conv_encoder.model''' )
_lowerCamelCase : Union[str, Any] = value
else:
_lowerCamelCase : Tuple = value
return new_state_dict
def snake_case_ ( A_ : Any ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = ''''''
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
_lowerCamelCase : int = state_dict.pop(F'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight''' )
_lowerCamelCase : Union[str, Any] = state_dict.pop(F'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
_lowerCamelCase : Union[str, Any] = in_proj_weight[:2_56, :]
_lowerCamelCase : int = in_proj_bias[:2_56]
_lowerCamelCase : Any = in_proj_weight[2_56:5_12, :]
_lowerCamelCase : Union[str, Any] = in_proj_bias[2_56:5_12]
_lowerCamelCase : Optional[Any] = in_proj_weight[-2_56:, :]
_lowerCamelCase : Optional[Any] = in_proj_bias[-2_56:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
_lowerCamelCase : List[str] = state_dict.pop(F'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight''' )
_lowerCamelCase : int = state_dict.pop(F'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
_lowerCamelCase : Optional[Any] = in_proj_weight[:2_56, :]
_lowerCamelCase : Tuple = in_proj_bias[:2_56]
_lowerCamelCase : List[Any] = in_proj_weight[2_56:5_12, :]
_lowerCamelCase : Dict = in_proj_bias[2_56:5_12]
_lowerCamelCase : Union[str, Any] = in_proj_weight[-2_56:, :]
_lowerCamelCase : List[str] = in_proj_bias[-2_56:]
# read in weights + bias of input projection layer of cross-attention
_lowerCamelCase : Optional[Any] = state_dict.pop(
F'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight''' )
_lowerCamelCase : List[str] = state_dict.pop(F'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) of cross-attention to the state dict
_lowerCamelCase : Tuple = in_proj_weight_cross_attn[:2_56, :]
_lowerCamelCase : Optional[int] = in_proj_bias_cross_attn[:2_56]
_lowerCamelCase : Union[str, Any] = in_proj_weight_cross_attn[2_56:5_12, :]
_lowerCamelCase : Union[str, Any] = in_proj_bias_cross_attn[2_56:5_12]
_lowerCamelCase : List[str] = in_proj_weight_cross_attn[-2_56:, :]
_lowerCamelCase : Tuple = in_proj_bias_cross_attn[-2_56:]
def snake_case_ ( A_ : Dict, A_ : List[str] ):
'''simple docstring'''
_lowerCamelCase , _lowerCamelCase : Any = image.size
_lowerCamelCase : Any = max(A_, A_ )
_lowerCamelCase : str = 8_00 if '''detection''' in checkpoint_url else 10_00
_lowerCamelCase : str = target_max_size / current_max_size
_lowerCamelCase : List[Any] = image.resize((int(round(scale * width ) ), int(round(scale * height ) )) )
return resized_image
def snake_case_ ( A_ : int ):
'''simple docstring'''
_lowerCamelCase : Any = F.to_tensor(A_ )
_lowerCamelCase : Optional[Any] = F.normalize(A_, mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225] )
return image
@torch.no_grad()
def snake_case_ ( A_ : List[str], A_ : int, A_ : str ):
'''simple docstring'''
logger.info('''Converting model...''' )
# load original state dict
_lowerCamelCase : Optional[int] = torch.hub.load_state_dict_from_url(A_, map_location='''cpu''' )
# rename keys
for src, dest in rename_keys:
rename_key(A_, A_, A_ )
_lowerCamelCase : int = rename_backbone_keys(A_ )
# query, key and value matrices need special treatment
read_in_q_k_v(A_ )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
_lowerCamelCase : Dict = '''model.'''
for key in state_dict.copy().keys():
if not key.startswith('''class_labels_classifier''' ) and not key.startswith('''bbox_predictor''' ):
_lowerCamelCase : Any = state_dict.pop(A_ )
_lowerCamelCase : int = val
# create HuggingFace model and load state dict
_lowerCamelCase : Union[str, Any] = TableTransformerConfig(
backbone='''resnet18''', mask_loss_coefficient=1, dice_loss_coefficient=1, ce_loss_coefficient=1, bbox_loss_coefficient=5, giou_loss_coefficient=2, eos_coefficient=0.4, class_cost=1, bbox_cost=5, giou_cost=2, )
if "detection" in checkpoint_url:
_lowerCamelCase : str = 15
_lowerCamelCase : Dict = 2
_lowerCamelCase : List[Any] = {0: '''table''', 1: '''table rotated'''}
_lowerCamelCase : Optional[Any] = idalabel
_lowerCamelCase : Union[str, Any] = {v: k for k, v in idalabel.items()}
else:
_lowerCamelCase : Union[str, Any] = 1_25
_lowerCamelCase : Tuple = 6
_lowerCamelCase : str = {
0: '''table''',
1: '''table column''',
2: '''table row''',
3: '''table column header''',
4: '''table projected row header''',
5: '''table spanning cell''',
}
_lowerCamelCase : int = idalabel
_lowerCamelCase : Tuple = {v: k for k, v in idalabel.items()}
_lowerCamelCase : str = DetrImageProcessor(
format='''coco_detection''', max_size=8_00 if '''detection''' in checkpoint_url else 10_00 )
_lowerCamelCase : List[Any] = TableTransformerForObjectDetection(A_ )
model.load_state_dict(A_ )
model.eval()
# verify our conversion
_lowerCamelCase : Tuple = '''example_pdf.png''' if '''detection''' in checkpoint_url else '''example_table.png'''
_lowerCamelCase : List[Any] = hf_hub_download(repo_id='''nielsr/example-pdf''', repo_type='''dataset''', filename=A_ )
_lowerCamelCase : Optional[Any] = Image.open(A_ ).convert('''RGB''' )
_lowerCamelCase : Union[str, Any] = normalize(resize(A_, A_ ) ).unsqueeze(0 )
_lowerCamelCase : str = model(A_ )
if "detection" in checkpoint_url:
_lowerCamelCase : int = (1, 15, 3)
_lowerCamelCase : str = torch.tensor(
[[-6.7897, -16.9985, 6.7937], [-8.0186, -22.2192, 6.9677], [-7.3117, -21.0708, 7.4055]] )
_lowerCamelCase : List[str] = torch.tensor([[0.4867, 0.1767, 0.6732], [0.6718, 0.4479, 0.3830], [0.4716, 0.1760, 0.6364]] )
else:
_lowerCamelCase : Dict = (1, 1_25, 7)
_lowerCamelCase : str = torch.tensor(
[[-18.1430, -8.3214, 4.8274], [-18.4685, -7.1361, -4.2667], [-26.3693, -9.3429, -4.9962]] )
_lowerCamelCase : int = torch.tensor([[0.4983, 0.5595, 0.9440], [0.4916, 0.6315, 0.5954], [0.6108, 0.8637, 0.1135]] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, :3, :3], A_, atol=1E-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3], A_, atol=1E-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(F'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(A_ ).mkdir(exist_ok=A_ )
model.save_pretrained(A_ )
image_processor.save_pretrained(A_ )
if push_to_hub:
# Push model to HF hub
logger.info('''Pushing model to the hub...''' )
_lowerCamelCase : Dict = (
'''microsoft/table-transformer-detection'''
if '''detection''' in checkpoint_url
else '''microsoft/table-transformer-structure-recognition'''
)
model.push_to_hub(A_ )
image_processor.push_to_hub(A_ )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_url''',
default='''https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth''',
type=str,
choices=[
'''https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth''',
'''https://pubtables1m.blob.core.windows.net/model/pubtables1m_structure_detr_r18.pth''',
],
help='''URL of the Table Transformer checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
lowerCAmelCase__ = parser.parse_args()
convert_table_transformer_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 175
|
"""simple docstring"""
import argparse
lowerCAmelCase__ = '''docs/source/_static/js/custom.js'''
def snake_case_ ( A_ : List[str] ):
'''simple docstring'''
with open(A_, encoding='''utf-8''', newline='''\n''' ) as f:
_lowerCamelCase : int = f.readlines()
_lowerCamelCase : List[str] = 0
# First let's put the right version
while not lines[index].startswith('''const stableVersion =''' ):
index += 1
_lowerCamelCase : List[Any] = F'''const stableVersion = "v{version}"\n'''
# Then update the dictionary
while not lines[index].startswith('''const versionMapping = {''' ):
index += 1
# We go until the end
while not lines[index].startswith('''}''' ):
index += 1
# We add the new version at the end
lines[index - 1] += F''' "v{version}": "v{version}",\n'''
with open(A_, '''w''', encoding='''utf-8''', newline='''\n''' ) as f:
f.writelines(A_ )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument('''--version''', help='''Release version.''')
lowerCAmelCase__ = parser.parse_args()
update_custom_js(args.version)
| 175
| 1
|
import math
import numpy as np
import qiskit
from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute
def UpperCamelCase ( __lowercase : int = 3 ):
'''simple docstring'''
if isinstance(__lowercase ,__lowercase ):
raise TypeError('number of qubits must be a integer.' )
if number_of_qubits <= 0:
raise ValueError('number of qubits must be > 0.' )
if math.floor(__lowercase ) != number_of_qubits:
raise ValueError('number of qubits must be exact integer.' )
if number_of_qubits > 10:
raise ValueError('number of qubits too large to simulate(>10).' )
A_ : Any = QuantumRegister(__lowercase ,'qr' )
A_ : Tuple = ClassicalRegister(__lowercase ,'cr' )
A_ : Optional[int] = QuantumCircuit(__lowercase ,__lowercase )
A_ : int = number_of_qubits
for i in range(__lowercase ):
quantum_circuit.h(number_of_qubits - i - 1 )
counter -= 1
for j in range(__lowercase ):
quantum_circuit.cp(np.pi / 2 ** (counter - j) ,__lowercase ,__lowercase )
for k in range(number_of_qubits // 2 ):
quantum_circuit.swap(__lowercase ,number_of_qubits - k - 1 )
# measure all the qubits
quantum_circuit.measure(__lowercase ,__lowercase )
# simulate with 10000 shots
A_ : Tuple = Aer.get_backend('qasm_simulator' )
A_ : Dict = execute(__lowercase ,__lowercase ,shots=1_00_00 )
return job.result().get_counts(__lowercase )
if __name__ == "__main__":
print(
F"""Total count for quantum fourier transform state is: \
{quantum_fourier_transform(3)}"""
)
| 140
|
from string import ascii_lowercase, ascii_uppercase
def UpperCamelCase ( __lowercase : str ):
'''simple docstring'''
if not sentence:
return ""
A_ : List[str] = dict(zip(__lowercase ,__lowercase ) )
return lower_to_upper.get(sentence[0] ,sentence[0] ) + sentence[1:]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 140
| 1
|
import importlib
import torch
import yaml
from omegaconf import OmegaConf
from taming.models.vqgan import VQModel
def _UpperCamelCase (a__ :Optional[Any] , a__ :str=False ):
"""simple docstring"""
UpperCamelCase__ = OmegaConf.load(a__ )
if display:
print(yaml.dump(OmegaConf.to_container(a__ ) ) )
return config
def _UpperCamelCase (a__ :int , a__ :Optional[Any]=None , a__ :str=None ):
"""simple docstring"""
if conf_path is None:
UpperCamelCase__ = """./model_checkpoints/vqgan_only.yaml"""
UpperCamelCase__ = load_config(a__ , display=a__ )
UpperCamelCase__ = VQModel(**config.model.params )
if ckpt_path is None:
UpperCamelCase__ = """./model_checkpoints/vqgan_only.pt"""
UpperCamelCase__ = torch.load(a__ , map_location=a__ )
if ".ckpt" in ckpt_path:
UpperCamelCase__ = sd["""state_dict"""]
model.load_state_dict(a__ , strict=a__ )
model.to(a__ )
del sd
return model
def _UpperCamelCase (a__ :Optional[Any] , a__ :List[Any] ):
"""simple docstring"""
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = model.encode(a__ )
print(f"""VQGAN --- {model.__class__.__name__}: latent shape: {z.shape[2:]}""" )
UpperCamelCase__ = model.decode(a__ )
return xrec
def _UpperCamelCase (a__ :Dict , a__ :List[Any]=False ):
"""simple docstring"""
UpperCamelCase__ , UpperCamelCase__ = string.rsplit(""".""" , 1 )
if reload:
UpperCamelCase__ = importlib.import_module(a__ )
importlib.reload(a__ )
return getattr(importlib.import_module(a__ , package=a__ ) , cls )
def _UpperCamelCase (a__ :Optional[int] ):
"""simple docstring"""
if "target" not in config:
raise KeyError("""Expected key `target` to instantiate.""" )
return get_obj_from_str(config["""target"""] )(**config.get("""params""" , {} ) )
def _UpperCamelCase (a__ :Union[str, Any] , a__ :Dict , a__ :Dict=True , a__ :Optional[int]=True ):
"""simple docstring"""
UpperCamelCase__ = instantiate_from_config(a__ )
if sd is not None:
model.load_state_dict(a__ )
if gpu:
model.cuda()
if eval_mode:
model.eval()
return {"model": model}
def _UpperCamelCase (a__ :int , a__ :Any , a__ :Optional[int] , a__ :str ):
"""simple docstring"""
if ckpt:
UpperCamelCase__ = torch.load(a__ , map_location="""cpu""" )
UpperCamelCase__ = pl_sd["""global_step"""]
print(f"""loaded model from global step {global_step}.""" )
else:
UpperCamelCase__ = {"""state_dict""": None}
UpperCamelCase__ = None
UpperCamelCase__ = load_model_from_config(config.model , pl_sd["""state_dict"""] , gpu=a__ , eval_mode=a__ )["""model"""]
return model, global_step
| 364
|
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers import is_speech_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import WhisperFeatureExtractor
if is_torch_available():
import torch
UpperCamelCase__ = random.Random()
def _UpperCamelCase (a__ :Any , a__ :Union[str, Any]=1.0 , a__ :Tuple=None , a__ :str=None ):
"""simple docstring"""
if rng is None:
UpperCamelCase__ = global_rng
UpperCamelCase__ = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __init__( self , __lowerCAmelCase , __lowerCAmelCase=7 , __lowerCAmelCase=400 , __lowerCAmelCase=2000 , __lowerCAmelCase=10 , __lowerCAmelCase=160 , __lowerCAmelCase=8 , __lowerCAmelCase=0.0 , __lowerCAmelCase=4000 , __lowerCAmelCase=False , __lowerCAmelCase=True , ):
UpperCamelCase__ = parent
UpperCamelCase__ = batch_size
UpperCamelCase__ = min_seq_length
UpperCamelCase__ = max_seq_length
UpperCamelCase__ = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
UpperCamelCase__ = padding_value
UpperCamelCase__ = sampling_rate
UpperCamelCase__ = return_attention_mask
UpperCamelCase__ = do_normalize
UpperCamelCase__ = feature_size
UpperCamelCase__ = chunk_length
UpperCamelCase__ = hop_length
def _lowerCamelCase ( self ):
return {
"feature_size": self.feature_size,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def _lowerCamelCase ( self , __lowerCAmelCase=False , __lowerCAmelCase=False ):
def _flatten(__lowerCAmelCase ):
return list(itertools.chain(*__lowerCAmelCase ) )
if equal_length:
UpperCamelCase__ = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
UpperCamelCase__ = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
UpperCamelCase__ = [np.asarray(__lowerCAmelCase ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class __SCREAMING_SNAKE_CASE ( _a , unittest.TestCase ):
snake_case : int = WhisperFeatureExtractor if is_speech_available() else None
def _lowerCamelCase ( self ):
UpperCamelCase__ = WhisperFeatureExtractionTester(self )
def _lowerCamelCase ( self ):
UpperCamelCase__ = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCamelCase__ = feat_extract_first.save_pretrained(__lowerCAmelCase )[0]
check_json_file_has_correct_format(__lowerCAmelCase )
UpperCamelCase__ = self.feature_extraction_class.from_pretrained(__lowerCAmelCase )
UpperCamelCase__ = feat_extract_first.to_dict()
UpperCamelCase__ = feat_extract_second.to_dict()
UpperCamelCase__ = feat_extract_first.mel_filters
UpperCamelCase__ = feat_extract_second.mel_filters
self.assertTrue(np.allclose(__lowerCAmelCase , __lowerCAmelCase ) )
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
def _lowerCamelCase ( self ):
UpperCamelCase__ = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCamelCase__ = os.path.join(__lowerCAmelCase , """feat_extract.json""" )
feat_extract_first.to_json_file(__lowerCAmelCase )
UpperCamelCase__ = self.feature_extraction_class.from_json_file(__lowerCAmelCase )
UpperCamelCase__ = feat_extract_first.to_dict()
UpperCamelCase__ = feat_extract_second.to_dict()
UpperCamelCase__ = feat_extract_first.mel_filters
UpperCamelCase__ = feat_extract_second.mel_filters
self.assertTrue(np.allclose(__lowerCAmelCase , __lowerCAmelCase ) )
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
def _lowerCamelCase ( self ):
# Tests that all call wrap to encode_plus and batch_encode_plus
UpperCamelCase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
UpperCamelCase__ = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
UpperCamelCase__ = [np.asarray(__lowerCAmelCase ) for speech_input in speech_inputs]
# Test feature size
UpperCamelCase__ = feature_extractor(__lowerCAmelCase , padding="""max_length""" , return_tensors="""np""" ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames )
self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size )
# Test not batched input
UpperCamelCase__ = feature_extractor(speech_inputs[0] , return_tensors="""np""" ).input_features
UpperCamelCase__ = feature_extractor(np_speech_inputs[0] , return_tensors="""np""" ).input_features
self.assertTrue(np.allclose(__lowerCAmelCase , __lowerCAmelCase , atol=1E-3 ) )
# Test batched
UpperCamelCase__ = feature_extractor(__lowerCAmelCase , return_tensors="""np""" ).input_features
UpperCamelCase__ = feature_extractor(__lowerCAmelCase , return_tensors="""np""" ).input_features
for enc_seq_a, enc_seq_a in zip(__lowerCAmelCase , __lowerCAmelCase ):
self.assertTrue(np.allclose(__lowerCAmelCase , __lowerCAmelCase , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
UpperCamelCase__ = [floats_list((1, x) )[0] for x in (800, 800, 800)]
UpperCamelCase__ = np.asarray(__lowerCAmelCase )
UpperCamelCase__ = feature_extractor(__lowerCAmelCase , return_tensors="""np""" ).input_features
UpperCamelCase__ = feature_extractor(__lowerCAmelCase , return_tensors="""np""" ).input_features
for enc_seq_a, enc_seq_a in zip(__lowerCAmelCase , __lowerCAmelCase ):
self.assertTrue(np.allclose(__lowerCAmelCase , __lowerCAmelCase , atol=1E-3 ) )
# Test truncation required
UpperCamelCase__ = [floats_list((1, x) )[0] for x in range(200 , (feature_extractor.n_samples + 500) , 200 )]
UpperCamelCase__ = [np.asarray(__lowerCAmelCase ) for speech_input in speech_inputs]
UpperCamelCase__ = [x[: feature_extractor.n_samples] for x in speech_inputs]
UpperCamelCase__ = [np.asarray(__lowerCAmelCase ) for speech_input in speech_inputs_truncated]
UpperCamelCase__ = feature_extractor(__lowerCAmelCase , return_tensors="""np""" ).input_features
UpperCamelCase__ = feature_extractor(__lowerCAmelCase , return_tensors="""np""" ).input_features
for enc_seq_a, enc_seq_a in zip(__lowerCAmelCase , __lowerCAmelCase ):
self.assertTrue(np.allclose(__lowerCAmelCase , __lowerCAmelCase , atol=1E-3 ) )
def _lowerCamelCase ( self ):
import torch
UpperCamelCase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase__ = np.random.rand(100 , 32 ).astype(np.floataa )
UpperCamelCase__ = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
UpperCamelCase__ = feature_extractor.pad([{"""input_features""": inputs}] , return_tensors="""np""" )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
UpperCamelCase__ = feature_extractor.pad([{"""input_features""": inputs}] , return_tensors="""pt""" )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def _lowerCamelCase ( self , __lowerCAmelCase ):
UpperCamelCase__ = load_dataset("""hf-internal-testing/librispeech_asr_dummy""" , """clean""" , split="""validation""" )
# automatic decoding with librispeech
UpperCamelCase__ = ds.sort("""id""" ).select(range(__lowerCAmelCase ) )[:num_samples]["""audio"""]
return [x["array"] for x in speech_samples]
def _lowerCamelCase ( self ):
# fmt: off
UpperCamelCase__ = torch.tensor(
[
0.1193, -0.0946, -0.1098, -0.0196, 0.0225, -0.0690, -0.1736, 0.0951,
0.0971, -0.0817, -0.0702, 0.0162, 0.0260, 0.0017, -0.0192, -0.1678,
0.0709, -0.1867, -0.0655, -0.0274, -0.0234, -0.1884, -0.0516, -0.0554,
-0.0274, -0.1425, -0.1423, 0.0837, 0.0377, -0.0854
] )
# fmt: on
UpperCamelCase__ = self._load_datasamples(1 )
UpperCamelCase__ = WhisperFeatureExtractor()
UpperCamelCase__ = feature_extractor(__lowerCAmelCase , return_tensors="""pt""" ).input_features
self.assertEqual(input_features.shape , (1, 80, 3000) )
self.assertTrue(torch.allclose(input_features[0, 0, :30] , __lowerCAmelCase , atol=1E-4 ) )
def _lowerCamelCase ( self ):
UpperCamelCase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase__ = self._load_datasamples(1 )[0]
UpperCamelCase__ = ((audio - audio.min()) / (audio.max() - audio.min())) * 65535 # Rescale to [0, 65535] to show issue
UpperCamelCase__ = feat_extract.zero_mean_unit_var_norm([audio] , attention_mask=__lowerCAmelCase )[0]
self.assertTrue(np.all(np.mean(__lowerCAmelCase ) < 1E-3 ) )
self.assertTrue(np.all(np.abs(np.var(__lowerCAmelCase ) - 1 ) < 1E-3 ) )
| 87
| 0
|
"""simple docstring"""
import unittest
from parameterized import parameterized
from transformers import OpenLlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : List[Any], lowerCamelCase : Optional[int], lowerCamelCase : Optional[Any]=13, lowerCamelCase : Optional[int]=7, lowerCamelCase : Optional[int]=True, lowerCamelCase : int=True, lowerCamelCase : str=False, lowerCamelCase : int=True, lowerCamelCase : Optional[Any]=99, lowerCamelCase : Union[str, Any]=32, lowerCamelCase : Optional[int]=5, lowerCamelCase : List[Any]=4, lowerCamelCase : List[Any]=37, lowerCamelCase : Any="gelu", lowerCamelCase : Any=0.1, lowerCamelCase : List[Any]=0.1, lowerCamelCase : Union[str, Any]=512, lowerCamelCase : Optional[int]=16, lowerCamelCase : Optional[int]=2, lowerCamelCase : Optional[int]=0.02, lowerCamelCase : List[str]=3, lowerCamelCase : str=4, lowerCamelCase : Any=None, )-> Optional[Any]:
lowerCamelCase__ : Optional[Any] =parent
lowerCamelCase__ : Dict =batch_size
lowerCamelCase__ : int =seq_length
lowerCamelCase__ : str =is_training
lowerCamelCase__ : str =use_input_mask
lowerCamelCase__ : List[Any] =use_token_type_ids
lowerCamelCase__ : Union[str, Any] =use_labels
lowerCamelCase__ : Union[str, Any] =vocab_size
lowerCamelCase__ : str =hidden_size
lowerCamelCase__ : Optional[int] =num_hidden_layers
lowerCamelCase__ : Tuple =num_attention_heads
lowerCamelCase__ : Dict =intermediate_size
lowerCamelCase__ : Dict =hidden_act
lowerCamelCase__ : Optional[int] =hidden_dropout_prob
lowerCamelCase__ : List[Any] =attention_probs_dropout_prob
lowerCamelCase__ : List[Any] =max_position_embeddings
lowerCamelCase__ : str =type_vocab_size
lowerCamelCase__ : List[str] =type_sequence_label_size
lowerCamelCase__ : int =initializer_range
lowerCamelCase__ : Union[str, Any] =num_labels
lowerCamelCase__ : str =num_choices
lowerCamelCase__ : List[str] =scope
def snake_case ( self : Optional[Any] )-> Optional[int]:
lowerCamelCase__ : str =ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
lowerCamelCase__ : Union[str, Any] =None
if self.use_input_mask:
lowerCamelCase__ : str =random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase__ : str =None
if self.use_token_type_ids:
lowerCamelCase__ : str =ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size )
lowerCamelCase__ : Dict =None
lowerCamelCase__ : Optional[int] =None
lowerCamelCase__ : Any =None
if self.use_labels:
lowerCamelCase__ : List[Any] =ids_tensor([self.batch_size], self.type_sequence_label_size )
lowerCamelCase__ : Optional[Any] =ids_tensor([self.batch_size, self.seq_length], self.num_labels )
lowerCamelCase__ : Optional[int] =ids_tensor([self.batch_size], self.num_choices )
lowerCamelCase__ : List[str] =self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case ( self : Optional[int] )-> int:
return OpenLlamaConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=lowerCamelCase, initializer_range=self.initializer_range, use_stable_embedding=lowerCamelCase, )
def snake_case ( self : Union[str, Any], lowerCamelCase : Optional[int], lowerCamelCase : Union[str, Any], lowerCamelCase : Any, lowerCamelCase : Optional[int], lowerCamelCase : Optional[Any], lowerCamelCase : Any, lowerCamelCase : List[Any] )-> Optional[Any]:
lowerCamelCase__ : List[str] =OpenLlamaModel(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
lowerCamelCase__ : List[Any] =model(lowerCamelCase, attention_mask=lowerCamelCase )
lowerCamelCase__ : List[str] =model(lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case ( self : Tuple, lowerCamelCase : Optional[int], lowerCamelCase : List[str], lowerCamelCase : Union[str, Any], lowerCamelCase : List[Any], lowerCamelCase : int, lowerCamelCase : Optional[Any], lowerCamelCase : List[Any], lowerCamelCase : Dict, lowerCamelCase : List[str], )-> str:
lowerCamelCase__ : Tuple =True
lowerCamelCase__ : List[Any] =OpenLlamaModel(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
lowerCamelCase__ : Any =model(
lowerCamelCase, attention_mask=lowerCamelCase, encoder_hidden_states=lowerCamelCase, encoder_attention_mask=lowerCamelCase, )
lowerCamelCase__ : Union[str, Any] =model(
lowerCamelCase, attention_mask=lowerCamelCase, encoder_hidden_states=lowerCamelCase, )
lowerCamelCase__ : Any =model(lowerCamelCase, attention_mask=lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case ( self : Optional[Any], lowerCamelCase : Optional[int], lowerCamelCase : Union[str, Any], lowerCamelCase : List[Any], lowerCamelCase : Union[str, Any], lowerCamelCase : Dict, lowerCamelCase : str, lowerCamelCase : List[str], lowerCamelCase : Optional[Any], lowerCamelCase : Union[str, Any], )-> Optional[Any]:
lowerCamelCase__ : Optional[int] =OpenLlamaForCausalLM(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
lowerCamelCase__ : str =model(lowerCamelCase, attention_mask=lowerCamelCase, labels=lowerCamelCase )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
def snake_case ( self : List[str], lowerCamelCase : str, lowerCamelCase : str, lowerCamelCase : List[str], lowerCamelCase : Union[str, Any], lowerCamelCase : Dict, lowerCamelCase : Any, lowerCamelCase : Optional[Any], lowerCamelCase : Tuple, lowerCamelCase : List[str], )-> Any:
lowerCamelCase__ : Tuple =True
lowerCamelCase__ : Any =True
lowerCamelCase__ : Dict =OpenLlamaForCausalLM(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
# first forward pass
lowerCamelCase__ : Union[str, Any] =model(
lowerCamelCase, attention_mask=lowerCamelCase, encoder_hidden_states=lowerCamelCase, encoder_attention_mask=lowerCamelCase, use_cache=lowerCamelCase, )
lowerCamelCase__ : int =outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
lowerCamelCase__ : Optional[Any] =ids_tensor((self.batch_size, 3), config.vocab_size )
lowerCamelCase__ : Union[str, Any] =ids_tensor((self.batch_size, 3), vocab_size=2 )
# append to next input_ids and
lowerCamelCase__ : List[Any] =torch.cat([input_ids, next_tokens], dim=-1 )
lowerCamelCase__ : List[Any] =torch.cat([input_mask, next_mask], dim=-1 )
lowerCamelCase__ : Dict =model(
lowerCamelCase, attention_mask=lowerCamelCase, encoder_hidden_states=lowerCamelCase, encoder_attention_mask=lowerCamelCase, output_hidden_states=lowerCamelCase, )['''hidden_states'''][0]
lowerCamelCase__ : Optional[int] =model(
lowerCamelCase, attention_mask=lowerCamelCase, encoder_hidden_states=lowerCamelCase, encoder_attention_mask=lowerCamelCase, past_key_values=lowerCamelCase, output_hidden_states=lowerCamelCase, )['''hidden_states'''][0]
# select random slice
lowerCamelCase__ : List[str] =ids_tensor((1,), output_from_past.shape[-1] ).item()
lowerCamelCase__ : Optional[Any] =output_from_no_past[:, -3:, random_slice_idx].detach()
lowerCamelCase__ : int =output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowerCamelCase, lowerCamelCase, atol=1E-3 ) )
def snake_case ( self : Optional[Any] )-> Any:
lowerCamelCase__ : str =self.prepare_config_and_inputs()
(
(
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) ,
) : List[Any] =config_and_inputs
lowerCamelCase__ : Optional[int] ={'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
_a = (
(OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else ()
)
_a = (OpenLlamaForCausalLM,) if is_torch_available() else ()
_a = (
{
'feature-extraction': OpenLlamaModel,
'text-classification': OpenLlamaForSequenceClassification,
'text-generation': OpenLlamaForCausalLM,
'zero-shot': OpenLlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
_a = False
_a = False
def snake_case ( self : Dict )-> List[Any]:
lowerCamelCase__ : List[Any] =OpenLlamaModelTester(self )
lowerCamelCase__ : List[Any] =ConfigTester(self, config_class=lowerCamelCase, hidden_size=37 )
def snake_case ( self : Optional[int] )-> Optional[Any]:
self.config_tester.run_common_tests()
def snake_case ( self : Tuple )-> Union[str, Any]:
lowerCamelCase__ : int =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase )
def snake_case ( self : int )-> Optional[Any]:
lowerCamelCase__ : List[str] =self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowerCamelCase__ : Dict =type
self.model_tester.create_and_check_model(*lowerCamelCase )
def snake_case ( self : Union[str, Any] )-> Optional[int]:
lowerCamelCase__ , lowerCamelCase__ : Any =self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase__ : Tuple =3
lowerCamelCase__ : Dict =input_dict['''input_ids''']
lowerCamelCase__ : Dict =input_ids.ne(1 ).to(lowerCamelCase )
lowerCamelCase__ : List[str] =ids_tensor([self.model_tester.batch_size], self.model_tester.type_sequence_label_size )
lowerCamelCase__ : List[str] =OpenLlamaForSequenceClassification(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
lowerCamelCase__ : Union[str, Any] =model(lowerCamelCase, attention_mask=lowerCamelCase, labels=lowerCamelCase )
self.assertEqual(result.logits.shape, (self.model_tester.batch_size, self.model_tester.num_labels) )
def snake_case ( self : Optional[int] )-> int:
lowerCamelCase__ , lowerCamelCase__ : str =self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase__ : Any =3
lowerCamelCase__ : List[Any] ='''single_label_classification'''
lowerCamelCase__ : Any =input_dict['''input_ids''']
lowerCamelCase__ : Optional[Any] =input_ids.ne(1 ).to(lowerCamelCase )
lowerCamelCase__ : Tuple =ids_tensor([self.model_tester.batch_size], self.model_tester.type_sequence_label_size )
lowerCamelCase__ : Optional[Any] =OpenLlamaForSequenceClassification(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
lowerCamelCase__ : str =model(lowerCamelCase, attention_mask=lowerCamelCase, labels=lowerCamelCase )
self.assertEqual(result.logits.shape, (self.model_tester.batch_size, self.model_tester.num_labels) )
def snake_case ( self : List[str] )-> List[str]:
lowerCamelCase__ , lowerCamelCase__ : Tuple =self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase__ : Optional[int] =3
lowerCamelCase__ : Optional[int] ='''multi_label_classification'''
lowerCamelCase__ : Optional[Any] =input_dict['''input_ids''']
lowerCamelCase__ : str =input_ids.ne(1 ).to(lowerCamelCase )
lowerCamelCase__ : List[str] =ids_tensor(
[self.model_tester.batch_size, config.num_labels], self.model_tester.type_sequence_label_size ).to(torch.float )
lowerCamelCase__ : str =OpenLlamaForSequenceClassification(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
lowerCamelCase__ : Union[str, Any] =model(lowerCamelCase, attention_mask=lowerCamelCase, labels=lowerCamelCase )
self.assertEqual(result.logits.shape, (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip('''Open-Llama buffers include complex numbers, which breaks this test''' )
def snake_case ( self : Optional[Any] )-> List[str]:
pass
@parameterized.expand([('''linear''',), ('''dynamic''',)] )
def snake_case ( self : Union[str, Any], lowerCamelCase : Optional[int] )-> str:
lowerCamelCase__ , lowerCamelCase__ : str =self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase__ : List[Any] =ids_tensor([1, 10], config.vocab_size )
lowerCamelCase__ : str =ids_tensor([1, int(config.max_position_embeddings * 1.5 )], config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
lowerCamelCase__ : Union[str, Any] =OpenLlamaModel(lowerCamelCase )
original_model.to(lowerCamelCase )
original_model.eval()
lowerCamelCase__ : List[str] =original_model(lowerCamelCase ).last_hidden_state
lowerCamelCase__ : str =original_model(lowerCamelCase ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
lowerCamelCase__ : Tuple ={'''type''': scaling_type, '''factor''': 10.0}
lowerCamelCase__ : Any =OpenLlamaModel(lowerCamelCase )
scaled_model.to(lowerCamelCase )
scaled_model.eval()
lowerCamelCase__ : int =scaled_model(lowerCamelCase ).last_hidden_state
lowerCamelCase__ : Optional[int] =scaled_model(lowerCamelCase ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(lowerCamelCase, lowerCamelCase, atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(lowerCamelCase, lowerCamelCase, atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(lowerCamelCase, lowerCamelCase, atol=1E-5 ) )
| 238
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_lowercase : Optional[int] = "▁"
_lowercase : Optional[Any] = {"vocab_file": "spiece.model"}
_lowercase : Optional[Any] = {
"vocab_file": {"google/pegasus-xsum": "https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model"}
}
_lowercase : Tuple = {
"google/pegasus-xsum": 5_1_2,
}
_lowercase : Optional[int] = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
'''simple docstring'''
_a = VOCAB_FILES_NAMES
_a = VOCAB_FILES_NAMES
_a = PRETRAINED_VOCAB_FILES_MAP
_a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_a = ['input_ids', 'attention_mask']
def __init__( self : Any, lowerCamelCase : Optional[Any], lowerCamelCase : Any="<pad>", lowerCamelCase : Optional[Any]="</s>", lowerCamelCase : Any="<unk>", lowerCamelCase : Tuple="<mask_2>", lowerCamelCase : int="<mask_1>", lowerCamelCase : Optional[Any]=None, lowerCamelCase : Dict=103, lowerCamelCase : Optional[Dict[str, Any]] = None, **lowerCamelCase : Optional[int], )-> None:
lowerCamelCase__ : Union[str, Any] =offset
if additional_special_tokens is not None:
if not isinstance(lowerCamelCase, lowerCamelCase ):
raise TypeError(
F'''additional_special_tokens should be of type {type(lowerCamelCase )}, but is'''
F''' {type(lowerCamelCase )}''' )
lowerCamelCase__ : Any =(
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
F'''<unk_{i}>''' for i in range(len(lowerCamelCase ), self.offset - 1 )
]
if len(set(lowerCamelCase ) ) != len(lowerCamelCase ):
raise ValueError(
'''Please make sure that the provided additional_special_tokens do not contain an incorrectly'''
F''' shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.''' )
lowerCamelCase__ : Optional[Any] =additional_special_tokens_extended
else:
lowerCamelCase__ : Tuple =[mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [F'''<unk_{i}>''' for i in range(2, self.offset )]
lowerCamelCase__ : Tuple ={} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=lowerCamelCase, unk_token=lowerCamelCase, mask_token=lowerCamelCase, pad_token=lowerCamelCase, mask_token_sent=lowerCamelCase, offset=lowerCamelCase, additional_special_tokens=lowerCamelCase, sp_model_kwargs=self.sp_model_kwargs, **lowerCamelCase, )
lowerCamelCase__ : Optional[int] =mask_token_sent
lowerCamelCase__ : Optional[Any] =vocab_file
lowerCamelCase__ : List[str] =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowerCamelCase )
# add special tokens to encoder dict
lowerCamelCase__ : Dict[int, str] ={
0: self.pad_token,
1: self.eos_token,
}
if self.mask_token_sent is not None:
self.encoder.update(
{
2: self.mask_token_sent,
3: self.mask_token,
} )
if self.offset > 0:
# entries 2-104 are only used for pretraining and called <mask_1>, <mask_2>, unk_2, ...unk_102
# mask_token_sent is already added to list -> so start at 1
self.encoder.update({i + 3: additional_special_tokens[i] for i in range(1, self.offset - 1 )} )
lowerCamelCase__ : Dict[str, int] ={v: k for k, v in self.encoder.items()}
@property
def snake_case ( self : Union[str, Any] )-> int:
return len(self.sp_model ) + self.offset
def snake_case ( self : Optional[Any] )-> Dict[str, int]:
lowerCamelCase__ : List[Any] ={self.convert_ids_to_tokens(lowerCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : str )-> List[Any]:
lowerCamelCase__ : Optional[Any] =self.__dict__.copy()
lowerCamelCase__ : Optional[int] =None
return state
def __setstate__( self : Dict, lowerCamelCase : int )-> Optional[Any]:
lowerCamelCase__ : Optional[int] =d
# for backward compatibility
if not hasattr(self, '''sp_model_kwargs''' ):
lowerCamelCase__ : str ={}
lowerCamelCase__ : Union[str, Any] =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def snake_case ( self : Any, lowerCamelCase : str )-> List[str]:
return self.sp_model.encode(lowerCamelCase, out_type=lowerCamelCase )
def snake_case ( self : int, lowerCamelCase : str )-> int:
if token in self.decoder:
return self.decoder[token]
elif token in self.added_tokens_decoder:
return self.added_tokens_decoder[token]
lowerCamelCase__ : Any =self.sp_model.piece_to_id(lowerCamelCase )
return sp_id + self.offset
def snake_case ( self : Tuple, lowerCamelCase : int )-> str:
if index in self.encoder:
return self.encoder[index]
elif index in self.added_tokens_encoder:
return self.added_tokens_encoder[index]
else:
lowerCamelCase__ : Any =self.sp_model.IdToPiece(index - self.offset )
return token
def snake_case ( self : List[Any], lowerCamelCase : Optional[int] )-> Any:
lowerCamelCase__ : Optional[int] =[]
lowerCamelCase__ : Tuple =''''''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(lowerCamelCase ) + token
lowerCamelCase__ : str =[]
else:
current_sub_tokens.append(lowerCamelCase )
out_string += self.sp_model.decode(lowerCamelCase )
return out_string.strip()
def snake_case ( self : Union[str, Any], lowerCamelCase : Union[str, Any]=False )-> List[str]:
return 1
def snake_case ( self : Tuple, lowerCamelCase : Optional[int] )-> Tuple:
lowerCamelCase__ : Tuple =set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
return [1 if x in all_special_ids else 0 for x in seq]
def snake_case ( self : Any, lowerCamelCase : List, lowerCamelCase : Optional[List] = None, lowerCamelCase : bool = False )-> List[int]:
if already_has_special_tokens:
return self._special_token_mask(lowerCamelCase )
elif token_ids_a is None:
return self._special_token_mask(lowerCamelCase ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def snake_case ( self : Optional[Any], lowerCamelCase : List[str], lowerCamelCase : Optional[int]=None )-> List[int]:
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def snake_case ( self : Optional[int], lowerCamelCase : str, lowerCamelCase : Optional[str] = None )-> Tuple[str]:
if not os.path.isdir(lowerCamelCase ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowerCamelCase__ : List[str] =os.path.join(
lowerCamelCase, (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file, lowerCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCamelCase, '''wb''' ) as fi:
lowerCamelCase__ : int =self.sp_model.serialized_model_proto()
fi.write(lowerCamelCase )
return (out_vocab_file,)
| 238
| 1
|
"""simple docstring"""
from math import pow
def _lowerCAmelCase ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ):
'''simple docstring'''
if current_sum == needed_sum:
# If the sum of the powers is equal to needed_sum, then we have a solution.
solutions_count += 1
return current_sum, solutions_count
UpperCAmelCase = int(pow(__UpperCamelCase , __UpperCamelCase ) )
if current_sum + i_to_n <= needed_sum:
# If the sum of the powers is less than needed_sum, then continue adding powers.
current_sum += i_to_n
UpperCAmelCase , UpperCAmelCase = backtrack(
__UpperCamelCase , __UpperCamelCase , current_number + 1 , __UpperCamelCase , __UpperCamelCase )
current_sum -= i_to_n
if i_to_n < needed_sum:
# If the power of i is less than needed_sum, then try with the next power.
UpperCAmelCase , UpperCAmelCase = backtrack(
__UpperCamelCase , __UpperCamelCase , current_number + 1 , __UpperCamelCase , __UpperCamelCase )
return current_sum, solutions_count
def _lowerCAmelCase ( lowerCAmelCase , lowerCAmelCase ):
'''simple docstring'''
if not (1 <= needed_sum <= 1000 and 2 <= power <= 10):
raise ValueError(
"""Invalid input\n"""
"""needed_sum must be between 1 and 1000, power between 2 and 10.""" )
return backtrack(__UpperCamelCase , __UpperCamelCase , 1 , 0 , 0 )[1] # Return the solutions_count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 369
|
"""simple docstring"""
from __future__ import annotations
import math
def _lowerCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(lowerCAmelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def _lowerCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase = str(lowerCAmelCase )
UpperCAmelCase = [n]
for i in range(1 , len(lowerCAmelCase ) ):
list_nums.append(int(str_num[i:] ) )
list_nums.append(int(str_num[:-i] ) )
return list_nums
def _lowerCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
if len(str(lowerCAmelCase ) ) > 3:
if not is_prime(int(str(lowerCAmelCase )[-3:] ) ) or not is_prime(int(str(lowerCAmelCase )[:3] ) ):
return False
return True
def _lowerCAmelCase ( lowerCAmelCase = 11 ):
'''simple docstring'''
UpperCAmelCase = []
UpperCAmelCase = 13
while len(lowerCAmelCase ) != count:
if validate(lowerCAmelCase ):
UpperCAmelCase = list_truncated_nums(lowerCAmelCase )
if all(is_prime(lowerCAmelCase ) for i in list_nums ):
list_truncated_primes.append(lowerCAmelCase )
num += 2
return list_truncated_primes
def _lowerCAmelCase ( ):
'''simple docstring'''
return sum(compute_truncated_primes(11 ) )
if __name__ == "__main__":
print(F'{sum(compute_truncated_primes(1_1)) = }')
| 248
| 0
|
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_a = logging.get_logger(__name__)
_a = '''▁'''
_a = {
'''vocab_file''': '''vocab.json''',
'''spm_file''': '''sentencepiece.bpe.model''',
}
_a = {
'''vocab_file''': {
'''facebook/s2t-small-librispeech-asr''': (
'''https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/vocab.json'''
),
},
'''spm_file''': {
'''facebook/s2t-small-librispeech-asr''': (
'''https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/sentencepiece.bpe.model'''
)
},
}
_a = {
'''facebook/s2t-small-librispeech-asr''': 1_0_2_4,
}
_a = ['''pt''', '''fr''', '''ru''', '''nl''', '''ro''', '''it''', '''es''', '''de''']
_a = {'''mustc''': MUSTC_LANGS}
class A_ ( snake_case__ ):
_lowercase : int = VOCAB_FILES_NAMES
_lowercase : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
_lowercase : Dict = MAX_MODEL_INPUT_SIZES
_lowercase : int = ['input_ids', 'attention_mask']
_lowercase : List[int] = []
def __init__( self : Tuple , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Union[str, Any]="<s>" , UpperCAmelCase : Union[str, Any]="</s>" , UpperCAmelCase : int="<pad>" , UpperCAmelCase : Any="<unk>" , UpperCAmelCase : str=False , UpperCAmelCase : Dict=False , UpperCAmelCase : Tuple=None , UpperCAmelCase : Optional[Any]=None , UpperCAmelCase : Optional[Dict[str, Any]] = None , **UpperCAmelCase : List[str] , ) -> None:
__lowerCAmelCase: Dict = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=UpperCAmelCase , eos_token=UpperCAmelCase , unk_token=UpperCAmelCase , pad_token=UpperCAmelCase , do_upper_case=UpperCAmelCase , do_lower_case=UpperCAmelCase , tgt_lang=UpperCAmelCase , lang_codes=UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **UpperCAmelCase , )
__lowerCAmelCase: Tuple = do_upper_case
__lowerCAmelCase: List[str] = do_lower_case
__lowerCAmelCase: Dict = load_json(UpperCAmelCase )
__lowerCAmelCase: Optional[int] = {v: k for k, v in self.encoder.items()}
__lowerCAmelCase: Optional[int] = spm_file
__lowerCAmelCase: str = load_spm(UpperCAmelCase , self.sp_model_kwargs )
if lang_codes is not None:
__lowerCAmelCase: Tuple = lang_codes
__lowerCAmelCase: Any = LANGUAGES[lang_codes]
__lowerCAmelCase: str = [F'''<lang:{lang}>''' for lang in self.langs]
__lowerCAmelCase: Tuple = {lang: self.sp_model.PieceToId(F'''<lang:{lang}>''' ) for lang in self.langs}
__lowerCAmelCase: List[Any] = self.lang_tokens
__lowerCAmelCase: str = tgt_lang if tgt_lang is not None else self.langs[0]
self.set_tgt_lang_special_tokens(self._tgt_lang )
else:
__lowerCAmelCase: Any = {}
@property
def UpperCAmelCase ( self : Any ) -> int:
return len(self.encoder )
@property
def UpperCAmelCase ( self : List[str] ) -> str:
return self._tgt_lang
@tgt_lang.setter
def UpperCAmelCase ( self : Any , UpperCAmelCase : Union[str, Any] ) -> None:
__lowerCAmelCase: Tuple = new_tgt_lang
self.set_tgt_lang_special_tokens(UpperCAmelCase )
def UpperCAmelCase ( self : List[Any] , UpperCAmelCase : str ) -> None:
__lowerCAmelCase: Tuple = self.lang_code_to_id[tgt_lang]
__lowerCAmelCase: List[Any] = [lang_code_id]
def UpperCAmelCase ( self : Optional[Any] , UpperCAmelCase : str ) -> List[str]:
return self.sp_model.encode(UpperCAmelCase , out_type=UpperCAmelCase )
def UpperCAmelCase ( self : Tuple , UpperCAmelCase : Any ) -> int:
return self.encoder.get(UpperCAmelCase , self.encoder[self.unk_token] )
def UpperCAmelCase ( self : Union[str, Any] , UpperCAmelCase : int ) -> str:
return self.decoder.get(UpperCAmelCase , self.unk_token )
def UpperCAmelCase ( self : Union[str, Any] , UpperCAmelCase : List[str] ) -> str:
__lowerCAmelCase: Optional[int] = []
__lowerCAmelCase: Optional[int] = ''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
__lowerCAmelCase: List[Any] = self.sp_model.decode(UpperCAmelCase )
out_string += (decoded.upper() if self.do_upper_case else decoded) + token + " "
__lowerCAmelCase: List[Any] = []
else:
current_sub_tokens.append(UpperCAmelCase )
__lowerCAmelCase: Any = self.sp_model.decode(UpperCAmelCase )
out_string += decoded.upper() if self.do_upper_case else decoded
return out_string.strip()
def UpperCAmelCase ( self : Union[str, Any] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Any=None ) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + [self.eos_token_id]
def UpperCAmelCase ( self : List[Any] , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None , UpperCAmelCase : bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCAmelCase , token_ids_a=UpperCAmelCase , already_has_special_tokens=UpperCAmelCase )
__lowerCAmelCase: Tuple = [1] * len(self.prefix_tokens )
__lowerCAmelCase: Optional[int] = [1]
if token_ids_a is None:
return prefix_ones + ([0] * len(UpperCAmelCase )) + suffix_ones
return prefix_ones + ([0] * len(UpperCAmelCase )) + ([0] * len(UpperCAmelCase )) + suffix_ones
def UpperCAmelCase ( self : int ) -> Dict:
__lowerCAmelCase: Optional[Any] = self.encoder.copy()
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : str ) -> Dict:
__lowerCAmelCase: List[Any] = self.__dict__.copy()
__lowerCAmelCase: List[Any] = None
return state
def __setstate__( self : Optional[Any] , UpperCAmelCase : Dict ) -> None:
__lowerCAmelCase: Optional[int] = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
__lowerCAmelCase: List[str] = {}
__lowerCAmelCase: Optional[int] = load_spm(self.spm_file , self.sp_model_kwargs )
def UpperCAmelCase ( self : Tuple , UpperCAmelCase : str , UpperCAmelCase : Optional[str] = None ) -> Tuple[str]:
__lowerCAmelCase: Optional[int] = Path(UpperCAmelCase )
assert save_dir.is_dir(), F'''{save_directory} should be a directory'''
__lowerCAmelCase: List[str] = save_dir / (
(filename_prefix + '-' if filename_prefix else '') + self.vocab_files_names['vocab_file']
)
__lowerCAmelCase: int = save_dir / (
(filename_prefix + '-' if filename_prefix else '') + self.vocab_files_names['spm_file']
)
save_json(self.encoder , UpperCAmelCase )
if os.path.abspath(self.spm_file ) != os.path.abspath(UpperCAmelCase ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , UpperCAmelCase )
elif not os.path.isfile(self.spm_file ):
with open(UpperCAmelCase , 'wb' ) as fi:
__lowerCAmelCase: Union[str, Any] = self.sp_model.serialized_model_proto()
fi.write(UpperCAmelCase )
return (str(UpperCAmelCase ), str(UpperCAmelCase ))
def _a ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Dict[str, Any] ) -> sentencepiece.SentencePieceProcessor:
"""simple docstring"""
__lowerCAmelCase: str = sentencepiece.SentencePieceProcessor(**SCREAMING_SNAKE_CASE )
spm.Load(str(SCREAMING_SNAKE_CASE ) )
return spm
def _a ( SCREAMING_SNAKE_CASE : str ) -> Union[Dict, List]:
"""simple docstring"""
with open(SCREAMING_SNAKE_CASE , 'r' ) as f:
return json.load(SCREAMING_SNAKE_CASE )
def _a ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : str ) -> None:
"""simple docstring"""
with open(SCREAMING_SNAKE_CASE , 'w' ) as f:
json.dump(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , indent=2 )
| 322
|
import unittest
import numpy as np
from transformers import AlbertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.albert.modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
)
class A_ ( unittest.TestCase ):
def __init__( self : List[str] , UpperCAmelCase : Tuple , UpperCAmelCase : Optional[Any]=1_3 , UpperCAmelCase : Optional[int]=7 , UpperCAmelCase : Tuple=True , UpperCAmelCase : str=True , UpperCAmelCase : List[Any]=True , UpperCAmelCase : Union[str, Any]=True , UpperCAmelCase : List[str]=9_9 , UpperCAmelCase : Optional[int]=3_2 , UpperCAmelCase : Dict=5 , UpperCAmelCase : int=4 , UpperCAmelCase : Optional[Any]=3_7 , UpperCAmelCase : List[str]="gelu" , UpperCAmelCase : Tuple=0.1 , UpperCAmelCase : int=0.1 , UpperCAmelCase : str=5_1_2 , UpperCAmelCase : Dict=1_6 , UpperCAmelCase : Union[str, Any]=2 , UpperCAmelCase : int=0.02 , UpperCAmelCase : List[Any]=4 , ) -> Optional[Any]:
__lowerCAmelCase: str = parent
__lowerCAmelCase: Dict = batch_size
__lowerCAmelCase: Optional[int] = seq_length
__lowerCAmelCase: Dict = is_training
__lowerCAmelCase: Optional[Any] = use_attention_mask
__lowerCAmelCase: List[Any] = use_token_type_ids
__lowerCAmelCase: Optional[int] = use_labels
__lowerCAmelCase: Optional[Any] = vocab_size
__lowerCAmelCase: Optional[Any] = hidden_size
__lowerCAmelCase: Tuple = num_hidden_layers
__lowerCAmelCase: List[str] = num_attention_heads
__lowerCAmelCase: int = intermediate_size
__lowerCAmelCase: Union[str, Any] = hidden_act
__lowerCAmelCase: List[Any] = hidden_dropout_prob
__lowerCAmelCase: List[str] = attention_probs_dropout_prob
__lowerCAmelCase: Optional[int] = max_position_embeddings
__lowerCAmelCase: Union[str, Any] = type_vocab_size
__lowerCAmelCase: int = type_sequence_label_size
__lowerCAmelCase: Union[str, Any] = initializer_range
__lowerCAmelCase: Any = num_choices
def UpperCAmelCase ( self : Union[str, Any] ) -> Tuple:
__lowerCAmelCase: List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCAmelCase: List[Any] = None
if self.use_attention_mask:
__lowerCAmelCase: List[str] = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCAmelCase: Optional[Any] = None
if self.use_token_type_ids:
__lowerCAmelCase: List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowerCAmelCase: Optional[int] = AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCAmelCase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def UpperCAmelCase ( self : Dict ) -> Any:
__lowerCAmelCase: Optional[int] = self.prepare_config_and_inputs()
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase: Optional[Any] = config_and_inputs
__lowerCAmelCase: Tuple = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask}
return config, inputs_dict
@require_flax
class A_ ( snake_case__ , unittest.TestCase ):
_lowercase : Dict = (
(
FlaxAlbertModel,
FlaxAlbertForPreTraining,
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def UpperCAmelCase ( self : List[str] ) -> Optional[int]:
__lowerCAmelCase: List[Any] = FlaxAlbertModelTester(self )
@slow
def UpperCAmelCase ( self : Tuple ) -> Dict:
for model_class_name in self.all_model_classes:
__lowerCAmelCase: Optional[Any] = model_class_name.from_pretrained('albert-base-v2' )
__lowerCAmelCase: Dict = model(np.ones((1, 1) ) )
self.assertIsNotNone(UpperCAmelCase )
@require_flax
class A_ ( unittest.TestCase ):
@slow
def UpperCAmelCase ( self : Any ) -> Any:
__lowerCAmelCase: List[Any] = FlaxAlbertModel.from_pretrained('albert-base-v2' )
__lowerCAmelCase: Optional[int] = np.array([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] )
__lowerCAmelCase: Tuple = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
__lowerCAmelCase: Tuple = model(UpperCAmelCase , attention_mask=UpperCAmelCase )[0]
__lowerCAmelCase: str = (1, 1_1, 7_6_8)
self.assertEqual(output.shape , UpperCAmelCase )
__lowerCAmelCase: List[str] = np.array(
[[[-0.6513, 1.5035, -0.2766], [-0.6515, 1.5046, -0.2780], [-0.6512, 1.5049, -0.2784]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , UpperCAmelCase , atol=1E-4 ) )
| 322
| 1
|
"""simple docstring"""
import math
import random
def a__ ( snake_case__ , snake_case__ = False ) -> float:
if deriv:
return value * (1 - value)
return 1 / (1 + math.exp(-value ))
# Initial Value
lowerCAmelCase : Dict = 0.0_2
def a__ ( snake_case__ , snake_case__ ) -> float:
lowerCamelCase = float(2 * (random.randint(1 , 1_00 )) - 1 )
for _ in range(snake_case__ ):
# Forward propagation
lowerCamelCase = sigmoid_function(INITIAL_VALUE * weight )
# How much did we miss?
lowerCamelCase = (expected / 1_00) - layer_a
# Error delta
lowerCamelCase = layer_1_error * sigmoid_function(snake_case__ , snake_case__ )
# Update weight
weight += INITIAL_VALUE * layer_1_delta
return layer_a * 1_00
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase : Any = int(input("""Expected value: """))
lowerCAmelCase : List[Any] = int(input("""Number of propagations: """))
print(forward_propagation(expected, number_propagations))
| 168
|
"""simple docstring"""
import argparse
import gc
import json
import os
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
lowerCAmelCase : Dict = 16
lowerCAmelCase : int = 32
def a__ ( snake_case__ ) -> Optional[Any]:
return int(x / 2**20 )
class __magic_name__ :
'''simple docstring'''
def __enter__( self ):
"""simple docstring"""
gc.collect()
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated() # reset the peak gauge to zero
lowerCamelCase = torch.cuda.memory_allocated()
return self
def __exit__( self , *_a ):
"""simple docstring"""
gc.collect()
torch.cuda.empty_cache()
lowerCamelCase = torch.cuda.memory_allocated()
lowerCamelCase = torch.cuda.max_memory_allocated()
lowerCamelCase = bamb(self.end - self.begin )
lowerCamelCase = bamb(self.peak - self.begin )
# print(f"delta used/peak {self.used:4d}/{self.peaked:4d}")
def a__ ( snake_case__ , snake_case__ = 16 , snake_case__ = "bert-base-cased" , snake_case__ = 3_20 , snake_case__ = 1_60 , ) -> List[str]:
lowerCamelCase = AutoTokenizer.from_pretrained(snake_case__ )
lowerCamelCase = load_dataset(
"""glue""" , """mrpc""" , split={"""train""": F'train[:{n_train}]', """validation""": F'validation[:{n_val}]'} )
def tokenize_function(snake_case__ ):
# max_length=None => use the model max length (it's actually the default)
lowerCamelCase = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=snake_case__ , max_length=snake_case__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
lowerCamelCase = datasets.map(
snake_case__ , batched=snake_case__ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , load_from_cache_file=snake_case__ )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowerCamelCase = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(snake_case__ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(snake_case__ , padding="""max_length""" , max_length=1_28 , return_tensors="""pt""" )
return tokenizer.pad(snake_case__ , padding="""longest""" , return_tensors="""pt""" )
# Instantiate dataloaders.
lowerCamelCase = DataLoader(
tokenized_datasets["""train"""] , shuffle=snake_case__ , collate_fn=snake_case__ , batch_size=snake_case__ )
lowerCamelCase = DataLoader(
tokenized_datasets["""validation"""] , shuffle=snake_case__ , collate_fn=snake_case__ , batch_size=snake_case__ )
return train_dataloader, eval_dataloader
def a__ ( snake_case__ , snake_case__ ) -> Any:
# Initialize accelerator
lowerCamelCase = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowerCamelCase = config["""lr"""]
lowerCamelCase = int(config["""num_epochs"""] )
lowerCamelCase = int(config["""seed"""] )
lowerCamelCase = int(config["""batch_size"""] )
lowerCamelCase = args.model_name_or_path
set_seed(snake_case__ )
lowerCamelCase , lowerCamelCase = get_dataloaders(snake_case__ , snake_case__ , snake_case__ , args.n_train , args.n_val )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowerCamelCase = AutoModelForSequenceClassification.from_pretrained(snake_case__ , return_dict=snake_case__ )
# Instantiate optimizer
lowerCamelCase = (
AdamW
if accelerator.state.deepspeed_plugin is None
or """optimizer""" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
lowerCamelCase = optimizer_cls(params=model.parameters() , lr=snake_case__ )
if accelerator.state.deepspeed_plugin is not None:
lowerCamelCase = accelerator.state.deepspeed_plugin.deepspeed_config[
"""gradient_accumulation_steps"""
]
else:
lowerCamelCase = 1
lowerCamelCase = (len(snake_case__ ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
lowerCamelCase = get_linear_schedule_with_warmup(
optimizer=snake_case__ , num_warmup_steps=0 , num_training_steps=snake_case__ , )
else:
lowerCamelCase = DummyScheduler(snake_case__ , total_num_steps=snake_case__ , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = accelerator.prepare(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
# We need to keep track of how many total steps we have iterated over
lowerCamelCase = 0
# We also need to keep track of the stating epoch so files are named properly
lowerCamelCase = 0
# Now we train the model
lowerCamelCase = {}
for epoch in range(snake_case__ , snake_case__ ):
with TorchTracemalloc() as tracemalloc:
model.train()
for step, batch in enumerate(snake_case__ ):
lowerCamelCase = model(**snake_case__ )
lowerCamelCase = outputs.loss
lowerCamelCase = loss / gradient_accumulation_steps
accelerator.backward(snake_case__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
# Printing the GPU memory usage details such as allocated memory, peak memory, and total memory usage
accelerator.print("""Memory before entering the train : {}""".format(bamb(tracemalloc.begin ) ) )
accelerator.print("""Memory consumed at the end of the train (end-begin): {}""".format(tracemalloc.used ) )
accelerator.print("""Peak Memory consumed during the train (max-begin): {}""".format(tracemalloc.peaked ) )
accelerator.print(
"""Total Peak Memory consumed during the train (max): {}""".format(
tracemalloc.peaked + bamb(tracemalloc.begin ) ) )
lowerCamelCase = tracemalloc.peaked + bamb(tracemalloc.begin )
if args.peak_memory_upper_bound is not None:
assert (
train_total_peak_memory[F'epoch-{epoch}'] <= args.peak_memory_upper_bound
), "Peak memory usage exceeded the upper bound"
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , """peak_memory_utilization.json""" ) , """w""" ) as f:
json.dump(snake_case__ , snake_case__ )
def a__ ( ) -> str:
lowerCamelCase = argparse.ArgumentParser(description="""Simple example of training script tracking peak GPU memory usage.""" )
parser.add_argument(
"""--model_name_or_path""" , type=snake_case__ , default="""bert-base-cased""" , help="""Path to pretrained model or model identifier from huggingface.co/models.""" , required=snake_case__ , )
parser.add_argument(
"""--output_dir""" , type=snake_case__ , default=""".""" , help="""Optional save directory where all checkpoint folders will be stored. Default is the current working directory.""" , )
parser.add_argument(
"""--peak_memory_upper_bound""" , type=snake_case__ , default=snake_case__ , help="""The upper bound of peak memory usage in MB. If set, the training will throw an error if the peak memory usage exceeds this value.""" , )
parser.add_argument(
"""--n_train""" , type=snake_case__ , default=3_20 , help="""Number of training examples to use.""" , )
parser.add_argument(
"""--n_val""" , type=snake_case__ , default=1_60 , help="""Number of validation examples to use.""" , )
parser.add_argument(
"""--num_epochs""" , type=snake_case__ , default=1 , help="""Number of train epochs.""" , )
lowerCamelCase = parser.parse_args()
lowerCamelCase = {"""lr""": 2E-5, """num_epochs""": args.num_epochs, """seed""": 42, """batch_size""": 16}
training_function(snake_case__ , snake_case__ )
if __name__ == "__main__":
main()
| 168
| 1
|
# Usage:
# ./gen-card-facebook-wmt19.py
import os
from pathlib import Path
def __lowercase ( lowerCamelCase : Optional[Any] , lowerCamelCase : List[str] , lowerCamelCase : Dict ):
UpperCamelCase_ : List[Any] = {
'en': 'Machine learning is great, isn\'t it?',
'ru': 'Машинное обучение - это здорово, не так ли?',
'de': 'Maschinelles Lernen ist großartig, oder?',
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
UpperCamelCase_ : List[str] = {
'ru-en': ['[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)', '39.20'],
'en-ru': ['[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)', '33.47'],
'en-de': ['[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)', '42.83'],
'de-en': ['[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)', '41.35'],
}
UpperCamelCase_ : List[Any] = F"{src_lang}-{tgt_lang}"
UpperCamelCase_ : List[Any] = F"\n---\nlanguage: \n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt19\n- facebook\nlicense: apache-2.0\ndatasets:\n- wmt19\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}.\n\nFor more details, please see, [Facebook FAIR's WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616).\n\nThe abbreviation FSMT stands for FairSeqMachineTranslation\n\nAll four models are available:\n\n* [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru)\n* [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en)\n* [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de)\n* [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en)\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = \"facebook/wmt19-{src_lang}-{tgt_lang}\"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = \"{texts[src_lang]}\"\ninput_ids = tokenizer.encode(input, return_tensors=\"pt\")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n- The original (and this ported model) doesn't seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981)\n\n## Training data\n\nPretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616).\n\n## Eval results\n\npair | fairseq | transformers\n-------|---------|----------\n{pair} | {scores[pair][0]} | {scores[pair][1]}\n\nThe score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn't support:\n- model ensemble, therefore the best performing checkpoint was ported (``model4.pt``).\n- re-ranking\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=15\nmkdir -p $DATA_DIR\nsacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH=\"src:examples/seq2seq\" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\nnote: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`.\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt19/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561)\n\n\n### BibTeX entry and citation info\n\n```bibtex\n@inproceedings{{...,\n year={{2020}},\n title={{Facebook FAIR's WMT19 News Translation Task Submission}},\n author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}},\n booktitle={{Proc. of WMT}},\n}}\n```\n\n\n## TODO\n\n- port model ensemble (fairseq uses 4 model checkpoints)\n\n"
os.makedirs(lowerCamelCase , exist_ok=lowerCamelCase )
UpperCamelCase_ : Any = os.path.join(lowerCamelCase , 'README.md' )
print(F"Generating {path}" )
with open(lowerCamelCase , 'w' , encoding='utf-8' ) as f:
f.write(lowerCamelCase )
# make sure we are under the root of the project
a_ = Path(__file__).resolve().parent.parent.parent
a_ = repo_dir / 'model_cards'
for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
a_ , a_ , a_ = model_name.split('-')
a_ = model_cards_dir / 'facebook' / model_name
write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang)
| 175
|
import argparse
import ast
import logging
import os
import sys
import pandas as pd
import torch
from tqdm import tqdm
from transformers import BartForConditionalGeneration, RagRetriever, RagSequenceForGeneration, RagTokenForGeneration
from transformers import logging as transformers_logging
sys.path.append(os.path.join(os.getcwd())) # noqa: E402 # isort:skip
from utils_rag import exact_match_score, fa_score # noqa: E402 # isort:skip
a_ = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
transformers_logging.set_verbosity_info()
def __lowercase ( lowerCamelCase : Optional[Any] ):
if "token" in model_name_or_path:
return "rag_token"
if "sequence" in model_name_or_path:
return "rag_sequence"
if "bart" in model_name_or_path:
return "bart"
return None
def __lowercase ( lowerCamelCase : Union[str, Any] , lowerCamelCase : Optional[Any] , lowerCamelCase : str ):
return max(metric_fn(lowerCamelCase , lowerCamelCase ) for gt in ground_truths )
def __lowercase ( lowerCamelCase : Union[str, Any] , lowerCamelCase : Dict , lowerCamelCase : Dict ):
UpperCamelCase_ : Tuple = [line.strip() for line in open(lowerCamelCase , 'r' ).readlines()]
UpperCamelCase_ : List[Any] = []
if args.gold_data_mode == "qa":
UpperCamelCase_ : Union[str, Any] = pd.read_csv(lowerCamelCase , sep='\t' , header=lowerCamelCase )
for answer_list in data[1]:
UpperCamelCase_ : Optional[int] = ast.literal_eval(lowerCamelCase )
answers.append(lowerCamelCase )
else:
UpperCamelCase_ : int = [line.strip() for line in open(lowerCamelCase , 'r' ).readlines()]
UpperCamelCase_ : Optional[int] = [[reference] for reference in references]
UpperCamelCase_ : Optional[int] = 0
for prediction, ground_truths in zip(lowerCamelCase , lowerCamelCase ):
total += 1
em += metric_max_over_ground_truths(lowerCamelCase , lowerCamelCase , lowerCamelCase )
fa += metric_max_over_ground_truths(lowerCamelCase , lowerCamelCase , lowerCamelCase )
UpperCamelCase_ : Union[str, Any] = 1_0_0.0 * em / total
UpperCamelCase_ : List[Any] = 1_0_0.0 * fa / total
logger.info(F"F1: {fa:.2f}" )
logger.info(F"EM: {em:.2f}" )
def __lowercase ( lowerCamelCase : Any , lowerCamelCase : int , lowerCamelCase : List[str] ):
UpperCamelCase_ : Optional[int] = args.k
UpperCamelCase_ : List[Any] = [line.strip() for line in open(lowerCamelCase , 'r' ).readlines()]
UpperCamelCase_ : List[str] = [line.strip() for line in open(lowerCamelCase , 'r' ).readlines()]
UpperCamelCase_ : List[str] = 0
for hypo, reference in zip(lowerCamelCase , lowerCamelCase ):
UpperCamelCase_ : List[str] = set(hypo.split('\t' )[:k] )
UpperCamelCase_ : int = set(reference.split('\t' ) )
total += 1
em += len(hypo_provenance & ref_provenance ) / k
UpperCamelCase_ : Union[str, Any] = 1_0_0.0 * em / total
logger.info(F"Precision@{k}: {em: .2f}" )
def __lowercase ( lowerCamelCase : Tuple , lowerCamelCase : Any , lowerCamelCase : Any ):
def strip_title(lowerCamelCase : List[str] ):
if title.startswith('"' ):
UpperCamelCase_ : List[str] = title[1:]
if title.endswith('"' ):
UpperCamelCase_ : int = title[:-1]
return title
UpperCamelCase_ : Optional[int] = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
lowerCamelCase , return_tensors='pt' , padding=lowerCamelCase , truncation=lowerCamelCase , )['input_ids'].to(args.device )
UpperCamelCase_ : int = rag_model.rag.question_encoder(lowerCamelCase )
UpperCamelCase_ : List[str] = question_enc_outputs[0]
UpperCamelCase_ : Tuple = rag_model.retriever(
lowerCamelCase , question_enc_pool_output.cpu().detach().to(torch.floataa ).numpy() , prefix=rag_model.rag.generator.config.prefix , n_docs=rag_model.config.n_docs , return_tensors='pt' , )
UpperCamelCase_ : str = rag_model.retriever.index.get_doc_dicts(result.doc_ids )
UpperCamelCase_ : int = []
for docs in all_docs:
UpperCamelCase_ : Union[str, Any] = [strip_title(lowerCamelCase ) for title in docs['title']]
provenance_strings.append('\t'.join(lowerCamelCase ) )
return provenance_strings
def __lowercase ( lowerCamelCase : Union[str, Any] , lowerCamelCase : List[Any] , lowerCamelCase : List[Any] ):
with torch.no_grad():
UpperCamelCase_ : List[Any] = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
lowerCamelCase , return_tensors='pt' , padding=lowerCamelCase , truncation=lowerCamelCase )
UpperCamelCase_ : Union[str, Any] = inputs_dict.input_ids.to(args.device )
UpperCamelCase_ : str = inputs_dict.attention_mask.to(args.device )
UpperCamelCase_ : List[Any] = rag_model.generate( # rag_model overwrites generate
lowerCamelCase , attention_mask=lowerCamelCase , num_beams=args.num_beams , min_length=args.min_length , max_length=args.max_length , early_stopping=lowerCamelCase , num_return_sequences=1 , bad_words_ids=[[0, 0]] , )
UpperCamelCase_ : str = rag_model.retriever.generator_tokenizer.batch_decode(lowerCamelCase , skip_special_tokens=lowerCamelCase )
if args.print_predictions:
for q, a in zip(lowerCamelCase , lowerCamelCase ):
logger.info('Q: {} - A: {}'.format(lowerCamelCase , lowerCamelCase ) )
return answers
def __lowercase ( ):
UpperCamelCase_ : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
'--model_type' , choices=['rag_sequence', 'rag_token', 'bart'] , type=lowerCamelCase , help=(
'RAG model type: rag_sequence, rag_token or bart, if none specified, the type is inferred from the'
' model_name_or_path'
) , )
parser.add_argument(
'--index_name' , default=lowerCamelCase , choices=['exact', 'compressed', 'legacy'] , type=lowerCamelCase , help='RAG model retriever type' , )
parser.add_argument(
'--index_path' , default=lowerCamelCase , type=lowerCamelCase , help='Path to the retrieval index' , )
parser.add_argument('--n_docs' , default=5 , type=lowerCamelCase , help='Number of retrieved docs' )
parser.add_argument(
'--model_name_or_path' , default=lowerCamelCase , type=lowerCamelCase , required=lowerCamelCase , help='Path to pretrained checkpoints or model identifier from huggingface.co/models' , )
parser.add_argument(
'--eval_mode' , choices=['e2e', 'retrieval'] , default='e2e' , type=lowerCamelCase , help=(
'Evaluation mode, e2e calculates exact match and F1 of the downstream task, retrieval calculates'
' precision@k.'
) , )
parser.add_argument('--k' , default=1 , type=lowerCamelCase , help='k for the precision@k calculation' )
parser.add_argument(
'--evaluation_set' , default=lowerCamelCase , type=lowerCamelCase , required=lowerCamelCase , help='Path to a file containing evaluation samples' , )
parser.add_argument(
'--gold_data_path' , default=lowerCamelCase , type=lowerCamelCase , required=lowerCamelCase , help='Path to a tab-separated file with gold samples' , )
parser.add_argument(
'--gold_data_mode' , default='qa' , type=lowerCamelCase , choices=['qa', 'ans'] , help=(
'Format of the gold data file'
'qa - a single line in the following format: question [tab] answer_list'
'ans - a single line of the gold file contains the expected answer string'
) , )
parser.add_argument(
'--predictions_path' , type=lowerCamelCase , default='predictions.txt' , help='Name of the predictions file, to be stored in the checkpoints directory' , )
parser.add_argument(
'--eval_all_checkpoints' , action='store_true' , help='Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number' , )
parser.add_argument(
'--eval_batch_size' , default=8 , type=lowerCamelCase , help='Batch size per GPU/CPU for evaluation.' , )
parser.add_argument(
'--recalculate' , help='Recalculate predictions even if the prediction file exists' , action='store_true' , )
parser.add_argument(
'--num_beams' , default=4 , type=lowerCamelCase , help='Number of beams to be used when generating answers' , )
parser.add_argument('--min_length' , default=1 , type=lowerCamelCase , help='Min length of the generated answers' )
parser.add_argument('--max_length' , default=50 , type=lowerCamelCase , help='Max length of the generated answers' )
parser.add_argument(
'--print_predictions' , action='store_true' , help='If True, prints predictions while evaluating.' , )
parser.add_argument(
'--print_docs' , action='store_true' , help='If True, prints docs retried while generating.' , )
UpperCamelCase_ : Union[str, Any] = parser.parse_args()
UpperCamelCase_ : Union[str, Any] = torch.device('cuda' if torch.cuda.is_available() else 'cpu' )
return args
def __lowercase ( lowerCamelCase : int ):
UpperCamelCase_ : Any = {}
if args.model_type is None:
UpperCamelCase_ : List[Any] = infer_model_type(args.model_name_or_path )
assert args.model_type is not None
if args.model_type.startswith('rag' ):
UpperCamelCase_ : Optional[int] = RagTokenForGeneration if args.model_type == 'rag_token' else RagSequenceForGeneration
UpperCamelCase_ : Dict = args.n_docs
if args.index_name is not None:
UpperCamelCase_ : Union[str, Any] = args.index_name
if args.index_path is not None:
UpperCamelCase_ : str = args.index_path
else:
UpperCamelCase_ : Tuple = BartForConditionalGeneration
UpperCamelCase_ : Optional[int] = (
[f.path for f in os.scandir(args.model_name_or_path ) if f.is_dir()]
if args.eval_all_checkpoints
else [args.model_name_or_path]
)
logger.info('Evaluate the following checkpoints: %s' , lowerCamelCase )
UpperCamelCase_ : Optional[int] = get_scores if args.eval_mode == 'e2e' else get_precision_at_k
UpperCamelCase_ : Optional[int] = evaluate_batch_eae if args.eval_mode == 'e2e' else evaluate_batch_retrieval
for checkpoint in checkpoints:
if os.path.exists(args.predictions_path ) and (not args.recalculate):
logger.info('Calculating metrics based on an existing predictions file: {}'.format(args.predictions_path ) )
score_fn(lowerCamelCase , args.predictions_path , args.gold_data_path )
continue
logger.info('***** Running evaluation for {} *****'.format(lowerCamelCase ) )
logger.info(' Batch size = %d' , args.eval_batch_size )
logger.info(' Predictions will be stored under {}'.format(args.predictions_path ) )
if args.model_type.startswith('rag' ):
UpperCamelCase_ : List[str] = RagRetriever.from_pretrained(lowerCamelCase , **lowerCamelCase )
UpperCamelCase_ : List[Any] = model_class.from_pretrained(lowerCamelCase , retriever=lowerCamelCase , **lowerCamelCase )
model.retriever.init_retrieval()
else:
UpperCamelCase_ : Optional[Any] = model_class.from_pretrained(lowerCamelCase , **lowerCamelCase )
model.to(args.device )
with open(args.evaluation_set , 'r' ) as eval_file, open(args.predictions_path , 'w' ) as preds_file:
UpperCamelCase_ : Optional[Any] = []
for line in tqdm(lowerCamelCase ):
questions.append(line.strip() )
if len(lowerCamelCase ) == args.eval_batch_size:
UpperCamelCase_ : Dict = evaluate_batch_fn(lowerCamelCase , lowerCamelCase , lowerCamelCase )
preds_file.write('\n'.join(lowerCamelCase ) + '\n' )
preds_file.flush()
UpperCamelCase_ : Tuple = []
if len(lowerCamelCase ) > 0:
UpperCamelCase_ : Optional[int] = evaluate_batch_fn(lowerCamelCase , lowerCamelCase , lowerCamelCase )
preds_file.write('\n'.join(lowerCamelCase ) )
preds_file.flush()
score_fn(lowerCamelCase , args.predictions_path , args.gold_data_path )
if __name__ == "__main__":
a_ = get_args()
main(args)
| 175
| 1
|
"""simple docstring"""
import multiprocessing
import time
from arguments import PretokenizationArguments
from datasets import load_dataset
from transformers import AutoTokenizer, HfArgumentParser
def SCREAMING_SNAKE_CASE_ ( snake_case : Optional[Any] )-> Optional[int]:
_lowerCamelCase = {}
_lowerCamelCase = tokenizer(example['content'] , truncation=snake_case )['input_ids']
_lowerCamelCase = len(example['content'] ) / len(output['input_ids'] )
return output
A_ : List[str] =HfArgumentParser(PretokenizationArguments)
A_ : Dict =parser.parse_args()
if args.num_workers is None:
A_ : int =multiprocessing.cpu_count()
A_ : List[Any] =AutoTokenizer.from_pretrained(args.tokenizer_dir)
A_ : Optional[Any] =time.time()
A_ : Union[str, Any] =load_dataset(args.dataset_name, split="""train""")
print(f'Dataset loaded in {time.time()-t_start:.2f}s')
A_ : Union[str, Any] =time.time()
A_ : Union[str, Any] =ds.map(
tokenize,
num_proc=args.num_workers,
remove_columns=[
"""repo_name""",
"""path""",
"""copies""",
"""size""",
"""content""",
"""license""",
"""hash""",
"""line_mean""",
"""line_max""",
"""alpha_frac""",
"""autogenerated""",
],
)
print(f'Dataset tokenized in {time.time()-t_start:.2f}s')
A_ : Dict =time.time()
ds.push_to_hub(args.tokenized_data_repo)
print(f'Data pushed to the hub in {time.time()-t_start:.2f}s')
| 80
|
"""simple docstring"""
from __future__ import annotations
A_ : List[Any] =list[tuple[int, int]]
A_ : Tuple =[
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
A_ : List[str] =([-1, 0], [0, -1], [1, 0], [0, 1]) # up, left, down, right
class __a :
def __init__( self , a__ , a__ , a__ , a__ , a__ , a__ , ):
_lowerCamelCase = pos_x
_lowerCamelCase = pos_y
_lowerCamelCase = (pos_y, pos_x)
_lowerCamelCase = goal_x
_lowerCamelCase = goal_y
_lowerCamelCase = g_cost
_lowerCamelCase = parent
_lowerCamelCase = self.calculate_heuristic()
def snake_case_ ( self ):
_lowerCamelCase = abs(self.pos_x - self.goal_x )
_lowerCamelCase = abs(self.pos_y - self.goal_y )
return dx + dy
def __lt__( self , a__ ):
return self.f_cost < other.f_cost
class __a :
def __init__( self , a__ , a__ ):
_lowerCamelCase = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , a__ )
_lowerCamelCase = Node(goal[1] , goal[0] , goal[1] , goal[0] , 9_99_99 , a__ )
_lowerCamelCase = [self.start]
_lowerCamelCase = []
_lowerCamelCase = False
def snake_case_ ( self ):
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
_lowerCamelCase = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
_lowerCamelCase = True
return self.retrace_path(a__ )
self.closed_nodes.append(a__ )
_lowerCamelCase = self.get_successors(a__ )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(a__ )
else:
# retrieve the best current path
_lowerCamelCase = self.open_nodes.pop(self.open_nodes.index(a__ ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(a__ )
else:
self.open_nodes.append(a__ )
if not self.reached:
return [self.start.pos]
return None
def snake_case_ ( self , a__ ):
_lowerCamelCase = []
for action in delta:
_lowerCamelCase = parent.pos_x + action[1]
_lowerCamelCase = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(a__ ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
a__ , a__ , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , a__ , ) )
return successors
def snake_case_ ( self , a__ ):
_lowerCamelCase = node
_lowerCamelCase = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
_lowerCamelCase = current_node.parent
path.reverse()
return path
if __name__ == "__main__":
A_ : str =(0, 0)
A_ : Tuple =(len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
print("""------""")
A_ : List[str] =GreedyBestFirst(init, goal)
A_ : Optional[int] =greedy_bf.search()
if path:
for pos_x, pos_y in path:
A_ : Optional[Any] =2
for elem in grid:
print(elem)
| 80
| 1
|
A : Any = 'Tobias Carryer'
from time import time
class A :
'''simple docstring'''
def __init__(self : str , _UpperCAmelCase : List[str] , _UpperCAmelCase : List[str] , _UpperCAmelCase : Dict , _UpperCAmelCase : Optional[int]=int(time() ) ) -> Dict: # noqa: B008
"""simple docstring"""
lowercase__ = multiplier
lowercase__ = increment
lowercase__ = modulo
lowercase__ = seed
def lowerCamelCase__ (self : List[Any] ) -> Dict:
"""simple docstring"""
lowercase__ = (self.multiplier * self.seed + self.increment) % self.modulo
return self.seed
if __name__ == "__main__":
# Show the LCG in action.
A : List[Any] = LinearCongruentialGenerator(1_6_6_4_5_2_5, 1_0_1_3_9_0_4_2_2_3, 2 << 3_1)
while True:
print(lcg.next_number())
| 305
|
import argparse
import torch
from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert
from transformers.utils import logging
logging.set_verbosity_info()
def lowercase_ ( _lowerCamelCase : str , _lowerCamelCase : List[Any] , _lowerCamelCase : Dict):
# Initialise PyTorch model
lowercase__ : List[str] = BertConfig.from_json_file(_lowerCamelCase)
print(f'''Building PyTorch model from configuration: {config}''')
lowercase__ : Optional[Any] = BertForPreTraining(_lowerCamelCase)
# Load weights from tf checkpoint
load_tf_weights_in_bert(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
# Save pytorch-model
print(f'''Save PyTorch model to {pytorch_dump_path}''')
torch.save(model.state_dict() , _lowerCamelCase)
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--bert_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained BERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
UpperCamelCase = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 87
| 0
|
"""simple docstring"""
import pickle
import unittest
import torch
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils import require_cpu
@require_cpu
class __snake_case ( unittest.TestCase):
def SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
_lowerCamelCase : int = torch.nn.Linear(1_0 , 1_0 )
_lowerCamelCase : Dict = torch.optim.SGD(model.parameters() , 0.1 )
_lowerCamelCase : Dict = Accelerator()
_lowerCamelCase : int = accelerator.prepare(__lowerCAmelCase )
try:
pickle.loads(pickle.dumps(__lowerCAmelCase ) )
except Exception as e:
self.fail(f'''Accelerated optimizer pickling failed with {e}''' )
AcceleratorState._reset_state()
| 175
|
"""simple docstring"""
def snake_case_ ( A_ : list ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = len(A_ )
for i in range(1, A_ ):
_lowerCamelCase : Tuple = collection[i]
_lowerCamelCase : Dict = 0
_lowerCamelCase : Any = i - 1
while low <= high:
_lowerCamelCase : Optional[int] = (low + high) // 2
if val < collection[mid]:
_lowerCamelCase : List[str] = mid - 1
else:
_lowerCamelCase : Dict = mid + 1
for j in range(A_, A_, -1 ):
_lowerCamelCase : Optional[int] = collection[j - 1]
_lowerCamelCase : Tuple = val
return collection
if __name__ == "__main__":
lowerCAmelCase__ = input('''Enter numbers separated by a comma:\n''').strip()
lowerCAmelCase__ = [int(item) for item in user_input.split(''',''')]
print(binary_insertion_sort(unsorted))
| 175
| 1
|
'''simple docstring'''
import inspect
import unittest
from transformers import RegNetConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from transformers.utils import cached_property, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.models.regnet.modeling_flax_regnet import FlaxRegNetForImageClassification, FlaxRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Any , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : List[Any]=3 , __UpperCAmelCase : Dict=32 , __UpperCAmelCase : List[str]=3 , __UpperCAmelCase : Optional[Any]=10 , __UpperCAmelCase : Optional[int]=[10, 20, 30, 40] , __UpperCAmelCase : Dict=[1, 1, 2, 1] , __UpperCAmelCase : Optional[int]=True , __UpperCAmelCase : Optional[Any]=True , __UpperCAmelCase : Optional[Any]="relu" , __UpperCAmelCase : Tuple=3 , __UpperCAmelCase : Optional[int]=None , ):
'''simple docstring'''
_A = parent
_A = batch_size
_A = image_size
_A = num_channels
_A = embeddings_size
_A = hidden_sizes
_A = depths
_A = is_training
_A = use_labels
_A = hidden_act
_A = num_labels
_A = scope
_A = len(__UpperCAmelCase )
def lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
_A = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_A = self.get_config()
return config, pixel_values
def lowerCAmelCase ( self : Dict ):
'''simple docstring'''
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def lowerCAmelCase ( self : Optional[int] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Optional[int] ):
'''simple docstring'''
_A = FlaxRegNetModel(config=__UpperCAmelCase )
_A = model(__UpperCAmelCase )
# Output shape (b, c, h, w)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def lowerCAmelCase ( self : Tuple , __UpperCAmelCase : Dict , __UpperCAmelCase : str ):
'''simple docstring'''
_A = self.num_labels
_A = FlaxRegNetForImageClassification(config=__UpperCAmelCase )
_A = model(__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
_A = self.prepare_config_and_inputs()
_A , _A = config_and_inputs
_A = {"pixel_values": pixel_values}
return config, inputs_dict
@require_flax
class _UpperCAmelCase ( snake_case_ , unittest.TestCase ):
"""simple docstring"""
snake_case = (FlaxRegNetModel, FlaxRegNetForImageClassification) if is_flax_available() else ()
snake_case = False
snake_case = False
snake_case = False
def lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
_A = FlaxRegNetModelTester(self )
_A = ConfigTester(self , config_class=__UpperCAmelCase , has_text_modality=__UpperCAmelCase )
def lowerCAmelCase ( self : Any ):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCAmelCase ( self : Any ):
'''simple docstring'''
return
def lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCAmelCase )
def lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__UpperCAmelCase )
@unittest.skip(reason="RegNet does not use inputs_embeds" )
def lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
pass
@unittest.skip(reason="RegNet does not support input and output embeddings" )
def lowerCAmelCase ( self : Any ):
'''simple docstring'''
pass
def lowerCAmelCase ( self : str ):
'''simple docstring'''
_A , _A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A = model_class(__UpperCAmelCase )
_A = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_A = [*signature.parameters.keys()]
_A = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __UpperCAmelCase )
def lowerCAmelCase ( self : int ):
'''simple docstring'''
def check_hidden_states_output(__UpperCAmelCase : Tuple , __UpperCAmelCase : List[str] , __UpperCAmelCase : Optional[int] ):
_A = model_class(__UpperCAmelCase )
_A = model(**self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) )
_A = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_A = self.model_tester.num_stages
self.assertEqual(len(__UpperCAmelCase ) , expected_num_stages + 1 )
_A , _A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A = True
check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_A = True
check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def lowerCAmelCase ( self : str ):
'''simple docstring'''
_A , _A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_A = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase )
_A = model_class(__UpperCAmelCase )
@jax.jit
def model_jitted(__UpperCAmelCase : List[Any] , **__UpperCAmelCase : Any ):
return model(pixel_values=__UpperCAmelCase , **__UpperCAmelCase )
with self.subTest("JIT Enabled" ):
_A = model_jitted(**__UpperCAmelCase ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
_A = model_jitted(**__UpperCAmelCase ).to_tuple()
self.assertEqual(len(__UpperCAmelCase ) , len(__UpperCAmelCase ) )
for jitted_output, output in zip(__UpperCAmelCase , __UpperCAmelCase ):
self.assertEqual(jitted_output.shape , output.shape )
def __lowercase ( ) -> str:
'''simple docstring'''
_A = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_flax
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def lowerCAmelCase ( self : str ):
'''simple docstring'''
return AutoImageProcessor.from_pretrained("facebook/regnet-y-040" ) if is_vision_available() else None
@slow
def lowerCAmelCase ( self : Any ):
'''simple docstring'''
_A = FlaxRegNetForImageClassification.from_pretrained("facebook/regnet-y-040" )
_A = self.default_image_processor
_A = prepare_img()
_A = image_processor(images=__UpperCAmelCase , return_tensors="np" )
_A = model(**__UpperCAmelCase )
# verify the logits
_A = (1, 1000)
self.assertEqual(outputs.logits.shape , __UpperCAmelCase )
_A = jnp.array([-0.4180, -1.5051, -3.4836] )
self.assertTrue(jnp.allclose(outputs.logits[0, :3] , __UpperCAmelCase , atol=1E-4 ) )
| 79
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import convert_to_rgb, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
__snake_case : List[Any] = logging.get_logger(__name__)
class A__(a_ ):
"""simple docstring"""
_A : Optional[Any] = ['''pixel_values''']
def __init__( self , _lowercase = True , _lowercase = None , _lowercase = PILImageResampling.BICUBIC , _lowercase = True , _lowercase = 1 / 255 , _lowercase = True , _lowercase = None , _lowercase = None , _lowercase = True , **_lowercase , ) -> None:
super().__init__(**_lowercase )
a_ : Optional[Any] = size if size is not None else {"""height""": 384, """width""": 384}
a_ : List[str] = get_size_dict(_lowercase , default_to_square=_lowercase )
a_ : str = do_resize
a_ : Optional[int] = size
a_ : Dict = resample
a_ : Optional[int] = do_rescale
a_ : Dict = rescale_factor
a_ : int = do_normalize
a_ : str = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
a_ : Optional[int] = image_std if image_std is not None else OPENAI_CLIP_STD
a_ : Any = do_convert_rgb
def UpperCamelCase__ ( self , _lowercase , _lowercase , _lowercase = PILImageResampling.BICUBIC , _lowercase = None , **_lowercase , ) -> np.ndarray:
a_ : Union[str, Any] = get_size_dict(_lowercase , default_to_square=_lowercase )
if "height" not in size or "width" not in size:
raise ValueError(F'''The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}''' )
a_ : List[str] = (size["""height"""], size["""width"""])
return resize(_lowercase , size=_lowercase , resample=_lowercase , data_format=_lowercase , **_lowercase )
def UpperCamelCase__ ( self , _lowercase , _lowercase , _lowercase = None , **_lowercase , ) -> Optional[Any]:
return rescale(_lowercase , scale=_lowercase , data_format=_lowercase , **_lowercase )
def UpperCamelCase__ ( self , _lowercase , _lowercase , _lowercase , _lowercase = None , **_lowercase , ) -> np.ndarray:
return normalize(_lowercase , mean=_lowercase , std=_lowercase , data_format=_lowercase , **_lowercase )
def UpperCamelCase__ ( self , _lowercase , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = ChannelDimension.FIRST , **_lowercase , ) -> PIL.Image.Image:
a_ : Optional[int] = do_resize if do_resize is not None else self.do_resize
a_ : Any = resample if resample is not None else self.resample
a_ : Dict = do_rescale if do_rescale is not None else self.do_rescale
a_ : int = rescale_factor if rescale_factor is not None else self.rescale_factor
a_ : List[str] = do_normalize if do_normalize is not None else self.do_normalize
a_ : Optional[int] = image_mean if image_mean is not None else self.image_mean
a_ : Optional[Any] = image_std if image_std is not None else self.image_std
a_ : Any = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
a_ : str = size if size is not None else self.size
a_ : Tuple = get_size_dict(_lowercase , default_to_square=_lowercase )
a_ : Optional[int] = make_list_of_images(_lowercase )
if not valid_images(_lowercase ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
a_ : Optional[Any] = [convert_to_rgb(_lowercase ) for image in images]
# All transformations expect numpy arrays.
a_ : str = [to_numpy_array(_lowercase ) for image in images]
if do_resize:
a_ : Optional[int] = [self.resize(image=_lowercase , size=_lowercase , resample=_lowercase ) for image in images]
if do_rescale:
a_ : Union[str, Any] = [self.rescale(image=_lowercase , scale=_lowercase ) for image in images]
if do_normalize:
a_ : str = [self.normalize(image=_lowercase , mean=_lowercase , std=_lowercase ) for image in images]
a_ : Optional[Any] = [to_channel_dimension_format(_lowercase , _lowercase ) for image in images]
a_ : Optional[Any] = BatchFeature(data={"""pixel_values""": images} , tensor_type=_lowercase )
return encoded_outputs
| 248
| 0
|
from __future__ import annotations
import math
def snake_case (__lowercase ) -> Optional[int]:
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__a ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
__SCREAMING_SNAKE_CASE : Optional[int] = [num for num in range(3, 1_0_0_0_0_1, 2) if not is_prime(num)]
def snake_case (__lowercase ) -> Optional[Any]:
'''simple docstring'''
if not isinstance(__a , __a ):
raise ValueError("n must be an integer" )
if n <= 0:
raise ValueError("n must be >= 0" )
_snake_case : Optional[int] = []
for num in range(len(__a ) ):
_snake_case : List[str] = 0
while 2 * i * i <= odd_composites[num]:
_snake_case : Optional[Any] = odd_composites[num] - 2 * i * i
if is_prime(__a ):
break
i += 1
else:
list_nums.append(odd_composites[num] )
if len(__a ) == n:
return list_nums
return []
def snake_case () -> Optional[Any]:
'''simple docstring'''
return compute_nums(1 )[0]
if __name__ == "__main__":
print(F'''{solution() = }''')
| 355
|
import torch
from transformers import PreTrainedModel, XLMRobertaConfig, XLMRobertaModel
class lowercase_ ( __snake_case ):
_lowerCamelCase = 'M-CLIP'
def __init__( self , lowercase_=1_024 , lowercase_=768 , **lowercase_ ):
_snake_case : str = transformerDimSize
_snake_case : Union[str, Any] = imageDimSize
super().__init__(**lowercase_ )
class lowercase_ ( __snake_case ):
_lowerCamelCase = MCLIPConfig
def __init__( self , lowercase_ , *lowercase_ , **lowercase_ ):
super().__init__(lowercase_ , *lowercase_ , **lowercase_ )
_snake_case : List[Any] = XLMRobertaModel(lowercase_ )
_snake_case : int = torch.nn.Linear(
in_features=config.transformerDimensions , out_features=config.numDims )
def UpperCamelCase ( self , lowercase_ , lowercase_ ):
_snake_case : Tuple = self.transformer(input_ids=lowercase_ , attention_mask=lowercase_ )[0]
_snake_case : Tuple = (embs * attention_mask.unsqueeze(2 )).sum(dim=1 ) / attention_mask.sum(dim=1 )[:, None]
return self.LinearTransformation(lowercase_ ), embs
| 284
| 0
|
'''simple docstring'''
def _A (lowerCAmelCase__ :int = 2_00 ) -> int:
'''simple docstring'''
_a = [1, 2, 5, 10, 20, 50, 1_00, 2_00]
_a = [0] * (pence + 1)
_a = 1 # base case: 1 way to make 0 pence
for coin in coins:
for i in range(lowerCAmelCase__ , pence + 1 , 1 ):
number_of_ways[i] += number_of_ways[i - coin]
return number_of_ways[pence]
if __name__ == "__main__":
assert solution(2_0_0) == 7_3_6_8_2
| 168
|
'''simple docstring'''
import sys
a_ : Dict = (
"73167176531330624919225119674426574742355349194934"
"96983520312774506326239578318016984801869478851843"
"85861560789112949495459501737958331952853208805511"
"12540698747158523863050715693290963295227443043557"
"66896648950445244523161731856403098711121722383113"
"62229893423380308135336276614282806444486645238749"
"30358907296290491560440772390713810515859307960866"
"70172427121883998797908792274921901699720888093776"
"65727333001053367881220235421809751254540594752243"
"52584907711670556013604839586446706324415722155397"
"53697817977846174064955149290862569321978468622482"
"83972241375657056057490261407972968652414535100474"
"82166370484403199890008895243450658541227588666881"
"16427171479924442928230863465674813919123162824586"
"17866458359124566529476545682848912883142607690042"
"24219022671055626321111109370544217506941658960408"
"07198403850962455444362981230987879927244284909188"
"84580156166097919133875499200524063689912560717606"
"05886116467109405077541002256983155200055935729725"
"71636269561882670428252483600823257530420752963450"
)
def _A (lowerCAmelCase__ :str = N ) -> int:
'''simple docstring'''
_a = -sys.maxsize - 1
for i in range(len(lowerCAmelCase__ ) - 12 ):
_a = 1
for j in range(13 ):
product *= int(n[i + j] )
if product > largest_product:
_a = product
return largest_product
if __name__ == "__main__":
print(f'''{solution() = }''')
| 168
| 1
|
from math import isqrt, loga
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = [True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
lowercase__ = False
return [i for i in range(2 , SCREAMING_SNAKE_CASE ) if is_prime[i]]
def _a ( SCREAMING_SNAKE_CASE = 80_08_00 , SCREAMING_SNAKE_CASE = 80_08_00 ):
"""simple docstring"""
lowercase__ = degree * loga(SCREAMING_SNAKE_CASE )
lowercase__ = int(SCREAMING_SNAKE_CASE )
lowercase__ = calculate_prime_numbers(SCREAMING_SNAKE_CASE )
lowercase__ = 0
lowercase__ = 0
lowercase__ = len(SCREAMING_SNAKE_CASE ) - 1
while left < right:
while (
prime_numbers[right] * loga(prime_numbers[left] )
+ prime_numbers[left] * loga(prime_numbers[right] )
> upper_bound
):
right -= 1
hybrid_integers_count += right - left
left += 1
return hybrid_integers_count
if __name__ == "__main__":
print(f"""{solution() = }""")
| 93
|
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import floats_tensor, load_image, load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : Union[str, Any] = ShapEImgaImgPipeline
_lowercase : Optional[Any] = ['''image''']
_lowercase : Optional[int] = ['''image''']
_lowercase : Optional[int] = [
'''num_images_per_prompt''',
'''num_inference_steps''',
'''generator''',
'''latents''',
'''guidance_scale''',
'''frame_size''',
'''output_type''',
'''return_dict''',
]
_lowercase : Tuple = False
@property
def lowerCamelCase_ ( self: List[Any] ) -> Union[str, Any]:
"""simple docstring"""
return 32
@property
def lowerCamelCase_ ( self: Union[str, Any] ) -> int:
"""simple docstring"""
return 32
@property
def lowerCamelCase_ ( self: str ) -> List[str]:
"""simple docstring"""
return self.time_input_dim * 4
@property
def lowerCamelCase_ ( self: List[Any] ) -> str:
"""simple docstring"""
return 8
@property
def lowerCamelCase_ ( self: int ) -> Dict:
"""simple docstring"""
torch.manual_seed(0 )
lowercase__ = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=64 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1 , )
lowercase__ = CLIPVisionModel(UpperCamelCase_ )
return model
@property
def lowerCamelCase_ ( self: Dict ) -> List[Any]:
"""simple docstring"""
lowercase__ = CLIPImageProcessor(
crop_size=224 , do_center_crop=UpperCamelCase_ , do_normalize=UpperCamelCase_ , do_resize=UpperCamelCase_ , image_mean=[0.48145466, 0.4578275, 0.40821073] , image_std=[0.26862954, 0.26130258, 0.27577711] , resample=3 , size=224 , )
return image_processor
@property
def lowerCamelCase_ ( self: List[str] ) -> str:
"""simple docstring"""
torch.manual_seed(0 )
lowercase__ = {
'''num_attention_heads''': 2,
'''attention_head_dim''': 16,
'''embedding_dim''': self.time_input_dim,
'''num_embeddings''': 32,
'''embedding_proj_dim''': self.text_embedder_hidden_size,
'''time_embed_dim''': self.time_embed_dim,
'''num_layers''': 1,
'''clip_embed_dim''': self.time_input_dim * 2,
'''additional_embeddings''': 0,
'''time_embed_act_fn''': '''gelu''',
'''norm_in_type''': '''layer''',
'''embedding_proj_norm_type''': '''layer''',
'''encoder_hid_proj_type''': None,
'''added_emb_type''': None,
}
lowercase__ = PriorTransformer(**UpperCamelCase_ )
return model
@property
def lowerCamelCase_ ( self: Dict ) -> List[Any]:
"""simple docstring"""
torch.manual_seed(0 )
lowercase__ = {
'''param_shapes''': (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'''d_latent''': self.time_input_dim,
'''d_hidden''': self.renderer_dim,
'''n_output''': 12,
'''background''': (
0.1,
0.1,
0.1,
),
}
lowercase__ = ShapERenderer(**UpperCamelCase_ )
return model
def lowerCamelCase_ ( self: str ) -> Any:
"""simple docstring"""
lowercase__ = self.dummy_prior
lowercase__ = self.dummy_image_encoder
lowercase__ = self.dummy_image_processor
lowercase__ = self.dummy_renderer
lowercase__ = HeunDiscreteScheduler(
beta_schedule='''exp''' , num_train_timesteps=1_024 , prediction_type='''sample''' , use_karras_sigmas=UpperCamelCase_ , clip_sample=UpperCamelCase_ , clip_sample_range=1.0 , )
lowercase__ = {
'''prior''': prior,
'''image_encoder''': image_encoder,
'''image_processor''': image_processor,
'''renderer''': renderer,
'''scheduler''': scheduler,
}
return components
def lowerCamelCase_ ( self: Optional[Any] , UpperCamelCase_: Tuple , UpperCamelCase_: Optional[int]=0 ) -> Tuple:
"""simple docstring"""
lowercase__ = floats_tensor((1, 3, 64, 64) , rng=random.Random(UpperCamelCase_ ) ).to(UpperCamelCase_ )
if str(UpperCamelCase_ ).startswith('''mps''' ):
lowercase__ = torch.manual_seed(UpperCamelCase_ )
else:
lowercase__ = torch.Generator(device=UpperCamelCase_ ).manual_seed(UpperCamelCase_ )
lowercase__ = {
'''image''': input_image,
'''generator''': generator,
'''num_inference_steps''': 1,
'''frame_size''': 32,
'''output_type''': '''np''',
}
return inputs
def lowerCamelCase_ ( self: int ) -> str:
"""simple docstring"""
lowercase__ = '''cpu'''
lowercase__ = self.get_dummy_components()
lowercase__ = self.pipeline_class(**UpperCamelCase_ )
lowercase__ = pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
lowercase__ = pipe(**self.get_dummy_inputs(UpperCamelCase_ ) )
lowercase__ = output.images[0]
lowercase__ = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
lowercase__ = np.array(
[
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCamelCase_ ( self: int ) -> int:
"""simple docstring"""
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def lowerCamelCase_ ( self: List[str] ) -> List[Any]:
"""simple docstring"""
lowercase__ = torch_device == '''cpu'''
lowercase__ = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=UpperCamelCase_ , relax_max_difference=UpperCamelCase_ , )
def lowerCamelCase_ ( self: List[str] ) -> str:
"""simple docstring"""
lowercase__ = self.get_dummy_components()
lowercase__ = self.pipeline_class(**UpperCamelCase_ )
lowercase__ = pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
lowercase__ = 1
lowercase__ = 2
lowercase__ = self.get_dummy_inputs(UpperCamelCase_ )
for key in inputs.keys():
if key in self.batch_params:
lowercase__ = batch_size * [inputs[key]]
lowercase__ = pipe(**UpperCamelCase_ , num_images_per_prompt=UpperCamelCase_ )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class _a ( unittest.TestCase ):
def lowerCamelCase_ ( self: Optional[int] ) -> Optional[Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase_ ( self: str ) -> str:
"""simple docstring"""
lowercase__ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/shap_e/corgi.png''' )
lowercase__ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/shap_e/test_shap_e_img2img_out.npy''' )
lowercase__ = ShapEImgaImgPipeline.from_pretrained('''openai/shap-e-img2img''' )
lowercase__ = pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
lowercase__ = torch.Generator(device=UpperCamelCase_ ).manual_seed(0 )
lowercase__ = pipe(
UpperCamelCase_ , generator=UpperCamelCase_ , guidance_scale=3.0 , num_inference_steps=64 , frame_size=64 , output_type='''np''' , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(UpperCamelCase_ , UpperCamelCase_ )
| 93
| 1
|
'''simple docstring'''
def _UpperCamelCase ( __A ) -> int:
'''simple docstring'''
if not isinstance(__A , __A ) or number < 0:
raise ValueError("Input must be a non-negative integer" )
UpperCamelCase__ = 0
while number:
# This way we arrive at next set bit (next 1) instead of looping
# through each bit and checking for 1s hence the
# loop won't run 32 times it will only run the number of `1` times
number &= number - 1
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 80
|
'''simple docstring'''
import enum
import warnings
from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING
from ..utils import add_end_docstrings, is_tf_available
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
class lowercase_ ( enum.Enum ):
__UpperCAmelCase = 0
__UpperCAmelCase = 1
__UpperCAmelCase = 2
@add_end_docstrings(a__ )
class lowercase_ ( a__ ):
__UpperCAmelCase = '\n In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The\n voice of Nicholas\'s young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western\n Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision\n and denounces one of the men as a horse thief. Although his father initially slaps him for making such an\n accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of\n the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop,\n begging for his blessing. <eod> </s> <eos>\n '
def __init__( self , *a , **a ):
super().__init__(*a , **a )
self.check_model_type(
TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == "tf" else MODEL_FOR_CAUSAL_LM_MAPPING )
if "prefix" not in self._preprocess_params:
# This is very specific. The logic is quite complex and needs to be done
# as a "default".
# It also defines both some preprocess_kwargs and generate_kwargs
# which is why we cannot put them in their respective methods.
UpperCamelCase__ = None
if self.model.config.prefix is not None:
UpperCamelCase__ = self.model.config.prefix
if prefix is None and self.model.__class__.__name__ in [
"XLNetLMHeadModel",
"TransfoXLLMHeadModel",
"TFXLNetLMHeadModel",
"TFTransfoXLLMHeadModel",
]:
# For XLNet and TransformerXL we add an article to the prompt to give more state to the model.
UpperCamelCase__ = self.XL_PREFIX
if prefix is not None:
# Recalculate some generate_kwargs linked to prefix.
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = self._sanitize_parameters(prefix=a , **self._forward_params )
UpperCamelCase__ = {**self._preprocess_params, **preprocess_params}
UpperCamelCase__ = {**self._forward_params, **forward_params}
def __a ( self , a=None , a=None , a=None , a=None , a=None , a=None , a=None , a=None , **a , ):
UpperCamelCase__ = {}
if prefix is not None:
UpperCamelCase__ = prefix
if prefix:
UpperCamelCase__ = self.tokenizer(
a , padding=a , add_special_tokens=a , return_tensors=self.framework )
UpperCamelCase__ = prefix_inputs["input_ids"].shape[-1]
if handle_long_generation is not None:
if handle_long_generation not in {"hole"}:
raise ValueError(
f'''{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected'''
" [None, 'hole']" )
UpperCamelCase__ = handle_long_generation
preprocess_params.update(a )
UpperCamelCase__ = generate_kwargs
UpperCamelCase__ = {}
if return_full_text is not None and return_type is None:
if return_text is not None:
raise ValueError("`return_text` is mutually exclusive with `return_full_text`" )
if return_tensors is not None:
raise ValueError("`return_full_text` is mutually exclusive with `return_tensors`" )
UpperCamelCase__ = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT
if return_tensors is not None and return_type is None:
if return_text is not None:
raise ValueError("`return_text` is mutually exclusive with `return_tensors`" )
UpperCamelCase__ = ReturnType.TENSORS
if return_type is not None:
UpperCamelCase__ = return_type
if clean_up_tokenization_spaces is not None:
UpperCamelCase__ = clean_up_tokenization_spaces
if stop_sequence is not None:
UpperCamelCase__ = self.tokenizer.encode(a , add_special_tokens=a )
if len(a ) > 1:
warnings.warn(
"Stopping on a multiple token sequence is not yet supported on transformers. The first token of"
" the stop sequence will be used as the stop sequence string in the interim." )
UpperCamelCase__ = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def __a ( self , *a , **a ):
# Parse arguments
if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]:
kwargs.update({"add_space_before_punct_symbol": True} )
return super()._parse_and_tokenize(*a , **a )
def __call__( self , a , **a ):
return super().__call__(a , **a )
def __a ( self , a , a="" , a=None , **a ):
UpperCamelCase__ = self.tokenizer(
prefix + prompt_text , padding=a , add_special_tokens=a , return_tensors=self.framework )
UpperCamelCase__ = prompt_text
if handle_long_generation == "hole":
UpperCamelCase__ = inputs["input_ids"].shape[-1]
if "max_new_tokens" in generate_kwargs:
UpperCamelCase__ = generate_kwargs["max_new_tokens"]
else:
UpperCamelCase__ = generate_kwargs.get("max_length" , self.model.config.max_length ) - cur_len
if new_tokens < 0:
raise ValueError("We cannot infer how many new tokens are expected" )
if cur_len + new_tokens > self.tokenizer.model_max_length:
UpperCamelCase__ = self.tokenizer.model_max_length - new_tokens
if keep_length <= 0:
raise ValueError(
"We cannot use `hole` to handle this generation the number of desired tokens exceeds the"
" models max length" )
UpperCamelCase__ = inputs["input_ids"][:, -keep_length:]
if "attention_mask" in inputs:
UpperCamelCase__ = inputs["attention_mask"][:, -keep_length:]
return inputs
def __a ( self , a , **a ):
UpperCamelCase__ = model_inputs["input_ids"]
UpperCamelCase__ = model_inputs.get("attention_mask" , a )
# Allow empty prompts
if input_ids.shape[1] == 0:
UpperCamelCase__ = None
UpperCamelCase__ = None
UpperCamelCase__ = 1
else:
UpperCamelCase__ = input_ids.shape[0]
UpperCamelCase__ = model_inputs.pop("prompt_text" )
# If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying
# generate_kwargs, as some of the parameterization may come from the initialization of the pipeline.
UpperCamelCase__ = generate_kwargs.pop("prefix_length" , 0 )
if prefix_length > 0:
UpperCamelCase__ = "max_new_tokens" in generate_kwargs or (
"generation_config" in generate_kwargs
and generate_kwargs["generation_config"].max_new_tokens is not None
)
if not has_max_new_tokens:
UpperCamelCase__ = generate_kwargs.get("max_length" ) or self.model.config.max_length
generate_kwargs["max_length"] += prefix_length
UpperCamelCase__ = "min_new_tokens" in generate_kwargs or (
"generation_config" in generate_kwargs
and generate_kwargs["generation_config"].min_new_tokens is not None
)
if not has_min_new_tokens and "min_length" in generate_kwargs:
generate_kwargs["min_length"] += prefix_length
# BS x SL
UpperCamelCase__ = self.model.generate(input_ids=a , attention_mask=a , **a )
UpperCamelCase__ = generated_sequence.shape[0]
if self.framework == "pt":
UpperCamelCase__ = generated_sequence.reshape(a , out_b // in_b , *generated_sequence.shape[1:] )
elif self.framework == "tf":
UpperCamelCase__ = tf.reshape(a , (in_b, out_b // in_b, *generated_sequence.shape[1:]) )
return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text}
def __a ( self , a , a=ReturnType.FULL_TEXT , a=True ):
UpperCamelCase__ = model_outputs["generated_sequence"][0]
UpperCamelCase__ = model_outputs["input_ids"]
UpperCamelCase__ = model_outputs["prompt_text"]
UpperCamelCase__ = generated_sequence.numpy().tolist()
UpperCamelCase__ = []
for sequence in generated_sequence:
if return_type == ReturnType.TENSORS:
UpperCamelCase__ = {"generated_token_ids": sequence}
elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}:
# Decode text
UpperCamelCase__ = self.tokenizer.decode(
a , skip_special_tokens=a , clean_up_tokenization_spaces=a , )
# Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used
if input_ids is None:
UpperCamelCase__ = 0
else:
UpperCamelCase__ = len(
self.tokenizer.decode(
input_ids[0] , skip_special_tokens=a , clean_up_tokenization_spaces=a , ) )
if return_type == ReturnType.FULL_TEXT:
UpperCamelCase__ = prompt_text + text[prompt_length:]
else:
UpperCamelCase__ = text[prompt_length:]
UpperCamelCase__ = {"generated_text": all_text}
records.append(a )
return records
| 80
| 1
|
'''simple docstring'''
from __future__ import annotations
__snake_case = [
[-1, 0], # left
[0, -1], # down
[1, 0], # right
[0, 1], # up
]
def a ( __a , __a , __a , __a , __a , ) -> tuple[list[list[int]], list[list[int]]]:
'''simple docstring'''
UpperCamelCase__ :Any = [
[0 for col in range(len(grid[0] ) )] for row in range(len(__a ) )
] # the reference grid
UpperCamelCase__ :Any = 1
UpperCamelCase__ :Optional[Any] = [
[0 for col in range(len(grid[0] ) )] for row in range(len(__a ) )
] # the action grid
UpperCamelCase__ :str = init[0]
UpperCamelCase__ :Tuple = init[1]
UpperCamelCase__ :List[str] = 0
UpperCamelCase__ :Tuple = g + heuristic[x][y] # cost from starting cell to destination cell
UpperCamelCase__ :int = [[f, g, x, y]]
UpperCamelCase__ :Optional[int] = False # flag that is set when search is complete
UpperCamelCase__ :Optional[Any] = False # flag set if we can't find expand
while not found and not resign:
if len(__a ) == 0:
raise ValueError('''Algorithm is unable to find solution''' )
else: # to choose the least costliest action so as to move closer to the goal
cell.sort()
cell.reverse()
UpperCamelCase__ :List[Any] = cell.pop()
UpperCamelCase__ :Dict = next_cell[2]
UpperCamelCase__ :int = next_cell[3]
UpperCamelCase__ :List[Any] = next_cell[1]
if x == goal[0] and y == goal[1]:
UpperCamelCase__ :Tuple = True
else:
for i in range(len(__a ) ): # to try out different valid actions
UpperCamelCase__ :List[str] = x + DIRECTIONS[i][0]
UpperCamelCase__ :Optional[int] = y + DIRECTIONS[i][1]
if xa >= 0 and xa < len(__a ) and ya >= 0 and ya < len(grid[0] ):
if closed[xa][ya] == 0 and grid[xa][ya] == 0:
UpperCamelCase__ :Any = g + cost
UpperCamelCase__ :Optional[int] = ga + heuristic[xa][ya]
cell.append([fa, ga, xa, ya] )
UpperCamelCase__ :List[Any] = 1
UpperCamelCase__ :Any = i
UpperCamelCase__ :List[str] = []
UpperCamelCase__ :List[Any] = goal[0]
UpperCamelCase__ :int = goal[1]
invpath.append([x, y] ) # we get the reverse path from here
while x != init[0] or y != init[1]:
UpperCamelCase__ :Optional[int] = x - DIRECTIONS[action[x][y]][0]
UpperCamelCase__ :Dict = y - DIRECTIONS[action[x][y]][1]
UpperCamelCase__ :Optional[Any] = xa
UpperCamelCase__ :Tuple = ya
invpath.append([x, y] )
UpperCamelCase__ :Dict = []
for i in range(len(__a ) ):
path.append(invpath[len(__a ) - 1 - i] )
return path, action
if __name__ == "__main__":
__snake_case = [
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 0],
]
__snake_case = [0, 0]
# all coordinates are given in format [y,x]
__snake_case = [len(grid) - 1, len(grid[0]) - 1]
__snake_case = 1
# the cost map which pushes the path closer to the goal
__snake_case = [[0 for row in range(len(grid[0]))] for col in range(len(grid))]
for i in range(len(grid)):
for j in range(len(grid[0])):
__snake_case = abs(i - goal[0]) + abs(j - goal[1])
if grid[i][j] == 1:
# added extra penalty in the heuristic map
__snake_case = 99
__snake_case , __snake_case = search(grid, init, goal, cost, heuristic)
print('''ACTION MAP''')
for i in range(len(action)):
print(action[i])
for i in range(len(path)):
print(path[i])
| 219
|
'''simple docstring'''
from __future__ import annotations
__snake_case = list[list[int]]
# assigning initial values to the grid
__snake_case = [
[3, 0, 6, 5, 0, 8, 4, 0, 0],
[5, 2, 0, 0, 0, 0, 0, 0, 0],
[0, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
# a grid with no solution
__snake_case = [
[5, 0, 6, 5, 0, 8, 4, 0, 3],
[5, 2, 0, 0, 0, 0, 0, 0, 2],
[1, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
def a ( __a , __a , __a , __a ) -> bool:
'''simple docstring'''
for i in range(9 ):
if grid[row][i] == n or grid[i][column] == n:
return False
for i in range(3 ):
for j in range(3 ):
if grid[(row - row % 3) + i][(column - column % 3) + j] == n:
return False
return True
def a ( __a ) -> tuple[int, int] | None:
'''simple docstring'''
for i in range(9 ):
for j in range(9 ):
if grid[i][j] == 0:
return i, j
return None
def a ( __a ) -> Matrix | None:
'''simple docstring'''
if location := find_empty_location(__a ):
UpperCamelCase__ , UpperCamelCase__ :Optional[int] = location
else:
# If the location is ``None``, then the grid is solved.
return grid
for digit in range(1 , 10 ):
if is_safe(__a , __a , __a , __a ):
UpperCamelCase__ :Tuple = digit
if sudoku(__a ) is not None:
return grid
UpperCamelCase__ :Union[str, Any] = 0
return None
def a ( __a ) -> None:
'''simple docstring'''
for row in grid:
for cell in row:
print(__a , end=''' ''' )
print()
if __name__ == "__main__":
# make a copy of grid so that you can compare with the unmodified grid
for example_grid in (initial_grid, no_solution):
print('''\nExample grid:\n''' + '''=''' * 20)
print_solution(example_grid)
print('''\nExample grid solution:''')
__snake_case = sudoku(example_grid)
if solution is not None:
print_solution(solution)
else:
print('''Cannot find a solution.''')
| 219
| 1
|
import fire
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoTokenizer
from utils import SeqaSeqDataset, pickle_save
def __lowercase ( lowerCamelCase : int , lowerCamelCase : Optional[Any] , lowerCamelCase : int=1024 , lowerCamelCase : Dict=1024 , lowerCamelCase : Union[str, Any]=False , **lowerCamelCase : str ):
UpperCamelCase_ : Union[str, Any] = AutoTokenizer.from_pretrained(lowerCamelCase )
UpperCamelCase_ : Tuple = SeqaSeqDataset(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , type_path='train' , **lowerCamelCase )
UpperCamelCase_ : List[Any] = tok.pad_token_id
def get_lens(lowerCamelCase : Optional[int] ):
UpperCamelCase_ : Tuple = tqdm(
DataLoader(lowerCamelCase , batch_size=512 , num_workers=8 , shuffle=lowerCamelCase , collate_fn=ds.collate_fn ) , desc=str(ds.len_file ) , )
UpperCamelCase_ : Any = []
for batch in dl:
UpperCamelCase_ : Union[str, Any] = batch['input_ids'].ne(lowerCamelCase ).sum(1 ).tolist()
UpperCamelCase_ : Optional[Any] = batch['labels'].ne(lowerCamelCase ).sum(1 ).tolist()
if consider_target:
for src, tgt in zip(lowerCamelCase , lowerCamelCase ):
max_lens.append(max(lowerCamelCase , lowerCamelCase ) )
else:
max_lens.extend(lowerCamelCase )
return max_lens
UpperCamelCase_ : Optional[Any] = get_lens(lowerCamelCase )
UpperCamelCase_ : List[Any] = SeqaSeqDataset(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , type_path='val' , **lowerCamelCase )
UpperCamelCase_ : Optional[Any] = get_lens(lowerCamelCase )
pickle_save(lowerCamelCase , train_ds.len_file )
pickle_save(lowerCamelCase , val_ds.len_file )
if __name__ == "__main__":
fire.Fire(save_len_file)
| 175
|
import asyncio
import os
import shutil
import subprocess
import sys
import tempfile
import unittest
from distutils.util import strtobool
from functools import partial
from pathlib import Path
from typing import List, Union
from unittest import mock
import torch
from ..state import AcceleratorState, PartialState
from ..utils import (
gather,
is_bnb_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_mps_available,
is_safetensors_available,
is_tensorboard_available,
is_torch_version,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
def __lowercase ( lowerCamelCase : Dict , lowerCamelCase : int=False ):
try:
UpperCamelCase_ : Union[str, Any] = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
UpperCamelCase_ : List[str] = default
else:
# KEY is set, convert it to True or False.
try:
UpperCamelCase_ : Union[str, Any] = strtobool(lowerCamelCase )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(F"If set, {key} must be yes or no." )
return _value
a_ = parse_flag_from_env('RUN_SLOW', default=False)
def __lowercase ( lowerCamelCase : List[Any] ):
return unittest.skip('Test was skipped' )(lowerCamelCase )
def __lowercase ( lowerCamelCase : int ):
return unittest.skipUnless(_run_slow_tests , 'test is slow' )(lowerCamelCase )
def __lowercase ( lowerCamelCase : str ):
return unittest.skipUnless(not torch.cuda.is_available() , 'test requires only a CPU' )(lowerCamelCase )
def __lowercase ( lowerCamelCase : Optional[Any] ):
return unittest.skipUnless(torch.cuda.is_available() , 'test requires a GPU' )(lowerCamelCase )
def __lowercase ( lowerCamelCase : Any ):
return unittest.skipUnless(is_xpu_available() , 'test requires a XPU' )(lowerCamelCase )
def __lowercase ( lowerCamelCase : Any ):
return unittest.skipUnless(is_mps_available() , 'test requires a `mps` backend support in `torch`' )(lowerCamelCase )
def __lowercase ( lowerCamelCase : str ):
return unittest.skipUnless(
is_transformers_available() and is_datasets_available() , 'test requires the Hugging Face suite' )(lowerCamelCase )
def __lowercase ( lowerCamelCase : List[str] ):
return unittest.skipUnless(is_bnb_available() , 'test requires the bitsandbytes library' )(lowerCamelCase )
def __lowercase ( lowerCamelCase : str ):
return unittest.skipUnless(is_tpu_available() , 'test requires TPU' )(lowerCamelCase )
def __lowercase ( lowerCamelCase : Tuple ):
return unittest.skipUnless(torch.cuda.device_count() == 1 , 'test requires a GPU' )(lowerCamelCase )
def __lowercase ( lowerCamelCase : Tuple ):
return unittest.skipUnless(torch.xpu.device_count() == 1 , 'test requires a XPU' )(lowerCamelCase )
def __lowercase ( lowerCamelCase : Optional[Any] ):
return unittest.skipUnless(torch.cuda.device_count() > 1 , 'test requires multiple GPUs' )(lowerCamelCase )
def __lowercase ( lowerCamelCase : List[Any] ):
return unittest.skipUnless(torch.xpu.device_count() > 1 , 'test requires multiple XPUs' )(lowerCamelCase )
def __lowercase ( lowerCamelCase : int ):
return unittest.skipUnless(is_safetensors_available() , 'test requires safetensors' )(lowerCamelCase )
def __lowercase ( lowerCamelCase : Any ):
return unittest.skipUnless(is_deepspeed_available() , 'test requires DeepSpeed' )(lowerCamelCase )
def __lowercase ( lowerCamelCase : Tuple ):
return unittest.skipUnless(is_torch_version('>=' , '1.12.0' ) , 'test requires torch version >= 1.12.0' )(lowerCamelCase )
def __lowercase ( lowerCamelCase : List[Any]=None , lowerCamelCase : Optional[int]=None ):
if test_case is None:
return partial(lowerCamelCase , version=lowerCamelCase )
return unittest.skipUnless(is_torch_version('>=' , lowerCamelCase ) , F"test requires torch version >= {version}" )(lowerCamelCase )
def __lowercase ( lowerCamelCase : int ):
return unittest.skipUnless(is_tensorboard_available() , 'test requires Tensorboard' )(lowerCamelCase )
def __lowercase ( lowerCamelCase : int ):
return unittest.skipUnless(is_wandb_available() , 'test requires wandb' )(lowerCamelCase )
def __lowercase ( lowerCamelCase : Dict ):
return unittest.skipUnless(is_comet_ml_available() , 'test requires comet_ml' )(lowerCamelCase )
a_ = (
any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available()
)
def __lowercase ( lowerCamelCase : Dict ):
return unittest.skipUnless(
_atleast_one_tracker_available , 'test requires at least one tracker to be available and for `comet_ml` to not be installed' , )(lowerCamelCase )
class _lowercase ( unittest.TestCase ):
lowercase = True
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase_ : str = tempfile.mkdtemp()
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : Any ) -> Union[str, Any]:
"""simple docstring"""
if os.path.exists(cls.tmpdir ):
shutil.rmtree(cls.tmpdir )
def SCREAMING_SNAKE_CASE__ ( self : str ) -> List[str]:
"""simple docstring"""
if self.clear_on_setup:
for path in Path(self.tmpdir ).glob('**/*' ):
if path.is_file():
path.unlink()
elif path.is_dir():
shutil.rmtree(snake_case )
class _lowercase ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Any:
"""simple docstring"""
super().tearDown()
# Reset the state of the AcceleratorState singleton.
AcceleratorState._reset_state()
PartialState._reset_state()
class _lowercase ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self : str , snake_case : Union[mock.Mock, List[mock.Mock]] ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase_ : str = mocks if isinstance(snake_case , (tuple, list) ) else [mocks]
for m in self.mocks:
m.start()
self.addCleanup(m.stop )
def __lowercase ( lowerCamelCase : Optional[Any] ):
UpperCamelCase_ : str = AcceleratorState()
UpperCamelCase_ : str = tensor[None].clone().to(state.device )
UpperCamelCase_ : List[Any] = gather(lowerCamelCase ).cpu()
UpperCamelCase_ : Tuple = tensor[0].cpu()
for i in range(tensors.shape[0] ):
if not torch.equal(tensors[i] , lowerCamelCase ):
return False
return True
class _lowercase :
def __init__( self : Optional[int] , snake_case : Any , snake_case : List[Any] , snake_case : int ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase_ : int = returncode
UpperCamelCase_ : Optional[int] = stdout
UpperCamelCase_ : Optional[int] = stderr
async def __lowercase ( lowerCamelCase : Optional[Any] , lowerCamelCase : Tuple ):
while True:
UpperCamelCase_ : Tuple = await stream.readline()
if line:
callback(lowerCamelCase )
else:
break
async def __lowercase ( lowerCamelCase : Dict , lowerCamelCase : Dict=None , lowerCamelCase : Optional[Any]=None , lowerCamelCase : List[str]=None , lowerCamelCase : Dict=False , lowerCamelCase : Tuple=False ):
if echo:
print('\nRunning: ' , ' '.join(lowerCamelCase ) )
UpperCamelCase_ : Optional[int] = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=lowerCamelCase , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=lowerCamelCase , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
UpperCamelCase_ : str = []
UpperCamelCase_ : Union[str, Any] = []
def tee(lowerCamelCase : Tuple , lowerCamelCase : Optional[Any] , lowerCamelCase : Any , lowerCamelCase : List[str]="" ):
UpperCamelCase_ : int = line.decode('utf-8' ).rstrip()
sink.append(lowerCamelCase )
if not quiet:
print(lowerCamelCase , lowerCamelCase , file=lowerCamelCase )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
asyncio.create_task(_read_stream(p.stdout , lambda lowerCamelCase : tee(lowerCamelCase , lowerCamelCase , sys.stdout , label='stdout:' ) ) ),
asyncio.create_task(_read_stream(p.stderr , lambda lowerCamelCase : tee(lowerCamelCase , lowerCamelCase , sys.stderr , label='stderr:' ) ) ),
] , timeout=lowerCamelCase , )
return _RunOutput(await p.wait() , lowerCamelCase , lowerCamelCase )
def __lowercase ( lowerCamelCase : List[Any] , lowerCamelCase : Optional[Any]=None , lowerCamelCase : int=None , lowerCamelCase : Any=180 , lowerCamelCase : Dict=False , lowerCamelCase : Optional[int]=True ):
UpperCamelCase_ : str = asyncio.get_event_loop()
UpperCamelCase_ : Union[str, Any] = loop.run_until_complete(
_stream_subprocess(lowerCamelCase , env=lowerCamelCase , stdin=lowerCamelCase , timeout=lowerCamelCase , quiet=lowerCamelCase , echo=lowerCamelCase ) )
UpperCamelCase_ : int = ' '.join(lowerCamelCase )
if result.returncode > 0:
UpperCamelCase_ : Dict = '\n'.join(result.stderr )
raise RuntimeError(
F"'{cmd_str}' failed with returncode {result.returncode}\n\n"
F"The combined stderr from workers follows:\n{stderr}" )
return result
class _lowercase ( snake_case_ ):
pass
def __lowercase ( lowerCamelCase : List[str] , lowerCamelCase : Optional[int]=False ):
try:
UpperCamelCase_ : Any = subprocess.check_output(lowerCamelCase , stderr=subprocess.STDOUT )
if return_stdout:
if hasattr(lowerCamelCase , 'decode' ):
UpperCamelCase_ : Any = output.decode('utf-8' )
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
F"Command `{' '.join(lowerCamelCase )}` failed with the following error:\n\n{e.output.decode()}" ) from e
| 175
| 1
|
"""simple docstring"""
import os
import tempfile
import unittest
from transformers import FlaubertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FlaubertForMultipleChoice,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertModel,
FlaubertWithLMHeadModel,
)
from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCamelCase__ ( A ):
"""simple docstring"""
def __init__( self : Union[str, Any] , UpperCamelCase : str , UpperCamelCase : Optional[int]=13 , UpperCamelCase : Optional[int]=7 , UpperCamelCase : int=True , UpperCamelCase : Optional[Any]=True , UpperCamelCase : Union[str, Any]=True , UpperCamelCase : Any=True , UpperCamelCase : int=True , UpperCamelCase : Union[str, Any]=False , UpperCamelCase : Union[str, Any]=False , UpperCamelCase : Union[str, Any]=False , UpperCamelCase : Any=2 , UpperCamelCase : Optional[int]=99 , UpperCamelCase : Any=0 , UpperCamelCase : Optional[Any]=32 , UpperCamelCase : Any=5 , UpperCamelCase : Optional[Any]=4 , UpperCamelCase : Dict=0.1 , UpperCamelCase : Tuple=0.1 , UpperCamelCase : Any=512 , UpperCamelCase : int=12 , UpperCamelCase : Optional[Any]=2 , UpperCamelCase : Optional[int]=0.02 , UpperCamelCase : Union[str, Any]=3 , UpperCamelCase : Any=4 , UpperCamelCase : Optional[int]="last" , UpperCamelCase : Optional[int]=None , UpperCamelCase : Dict=None , ):
'''simple docstring'''
__UpperCAmelCase : List[str] = parent
__UpperCAmelCase : List[Any] = batch_size
__UpperCAmelCase : List[Any] = seq_length
__UpperCAmelCase : Tuple = is_training
__UpperCAmelCase : Optional[Any] = use_input_lengths
__UpperCAmelCase : Dict = use_token_type_ids
__UpperCAmelCase : List[Any] = use_labels
__UpperCAmelCase : List[Any] = gelu_activation
__UpperCAmelCase : Optional[Any] = sinusoidal_embeddings
__UpperCAmelCase : Optional[Any] = causal
__UpperCAmelCase : Any = asm
__UpperCAmelCase : List[Any] = n_langs
__UpperCAmelCase : List[Any] = vocab_size
__UpperCAmelCase : List[Any] = n_special
__UpperCAmelCase : Any = hidden_size
__UpperCAmelCase : int = num_hidden_layers
__UpperCAmelCase : List[Any] = num_attention_heads
__UpperCAmelCase : Dict = hidden_dropout_prob
__UpperCAmelCase : str = attention_probs_dropout_prob
__UpperCAmelCase : List[str] = max_position_embeddings
__UpperCAmelCase : Any = type_vocab_size
__UpperCAmelCase : int = type_sequence_label_size
__UpperCAmelCase : str = initializer_range
__UpperCAmelCase : Optional[int] = num_labels
__UpperCAmelCase : Any = num_choices
__UpperCAmelCase : str = summary_type
__UpperCAmelCase : List[Any] = use_proj
__UpperCAmelCase : List[Any] = scope
def lowerCamelCase__ ( self : str ):
'''simple docstring'''
__UpperCAmelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCAmelCase : Any = random_attention_mask([self.batch_size, self.seq_length] )
__UpperCAmelCase : Optional[Any] = None
if self.use_input_lengths:
__UpperCAmelCase : Tuple = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
__UpperCAmelCase : Optional[Any] = None
if self.use_token_type_ids:
__UpperCAmelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
__UpperCAmelCase : Union[str, Any] = None
__UpperCAmelCase : List[str] = None
__UpperCAmelCase : Any = None
if self.use_labels:
__UpperCAmelCase : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCAmelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__UpperCAmelCase : Any = ids_tensor([self.batch_size] , 2 ).float()
__UpperCAmelCase : List[Any] = ids_tensor([self.batch_size] , self.num_choices )
__UpperCAmelCase : List[Any] = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def lowerCamelCase__ ( self : List[Any] ):
'''simple docstring'''
return FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , )
def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase : Optional[int] , UpperCamelCase : List[Any] , UpperCamelCase : str , UpperCamelCase : List[Any] , UpperCamelCase : Any , UpperCamelCase : List[str] , UpperCamelCase : str , UpperCamelCase : Optional[int] , UpperCamelCase : Union[str, Any] , ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = FlaubertModel(config=UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
__UpperCAmelCase : Any = model(UpperCamelCase , lengths=UpperCamelCase , langs=UpperCamelCase )
__UpperCAmelCase : Dict = model(UpperCamelCase , langs=UpperCamelCase )
__UpperCAmelCase : Tuple = model(UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase__ ( self : Any , UpperCamelCase : Any , UpperCamelCase : Any , UpperCamelCase : Any , UpperCamelCase : int , UpperCamelCase : Any , UpperCamelCase : Optional[int] , UpperCamelCase : List[Any] , UpperCamelCase : Union[str, Any] , UpperCamelCase : Dict , ):
'''simple docstring'''
__UpperCAmelCase : List[str] = FlaubertWithLMHeadModel(UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
__UpperCAmelCase : Optional[int] = model(UpperCamelCase , token_type_ids=UpperCamelCase , labels=UpperCamelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase__ ( self : int , UpperCamelCase : List[Any] , UpperCamelCase : Optional[Any] , UpperCamelCase : Union[str, Any] , UpperCamelCase : int , UpperCamelCase : int , UpperCamelCase : Any , UpperCamelCase : Any , UpperCamelCase : str , UpperCamelCase : int , ):
'''simple docstring'''
__UpperCAmelCase : Optional[int] = FlaubertForQuestionAnsweringSimple(UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
__UpperCAmelCase : Optional[int] = model(UpperCamelCase )
__UpperCAmelCase : int = model(UpperCamelCase , start_positions=UpperCamelCase , end_positions=UpperCamelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCamelCase__ ( self : Union[str, Any] , UpperCamelCase : str , UpperCamelCase : Dict , UpperCamelCase : List[str] , UpperCamelCase : Any , UpperCamelCase : Dict , UpperCamelCase : Optional[Any] , UpperCamelCase : Union[str, Any] , UpperCamelCase : Tuple , UpperCamelCase : List[Any] , ):
'''simple docstring'''
__UpperCAmelCase : str = FlaubertForQuestionAnswering(UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
__UpperCAmelCase : Optional[Any] = model(UpperCamelCase )
__UpperCAmelCase : Union[str, Any] = model(
UpperCamelCase , start_positions=UpperCamelCase , end_positions=UpperCamelCase , cls_index=UpperCamelCase , is_impossible=UpperCamelCase , p_mask=UpperCamelCase , )
__UpperCAmelCase : Union[str, Any] = model(
UpperCamelCase , start_positions=UpperCamelCase , end_positions=UpperCamelCase , cls_index=UpperCamelCase , is_impossible=UpperCamelCase , )
((__UpperCAmelCase) ,) : Optional[Any] = result_with_labels.to_tuple()
__UpperCAmelCase : Optional[Any] = model(UpperCamelCase , start_positions=UpperCamelCase , end_positions=UpperCamelCase )
((__UpperCAmelCase) ,) : str = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def lowerCamelCase__ ( self : Any , UpperCamelCase : Union[str, Any] , UpperCamelCase : int , UpperCamelCase : List[Any] , UpperCamelCase : Any , UpperCamelCase : int , UpperCamelCase : Union[str, Any] , UpperCamelCase : Union[str, Any] , UpperCamelCase : Optional[int] , UpperCamelCase : List[str] , ):
'''simple docstring'''
__UpperCAmelCase : Any = FlaubertForSequenceClassification(UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
__UpperCAmelCase : List[str] = model(UpperCamelCase )
__UpperCAmelCase : Any = model(UpperCamelCase , labels=UpperCamelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowerCamelCase__ ( self : Optional[Any] , UpperCamelCase : Optional[int] , UpperCamelCase : List[Any] , UpperCamelCase : Tuple , UpperCamelCase : Tuple , UpperCamelCase : Any , UpperCamelCase : int , UpperCamelCase : str , UpperCamelCase : Tuple , UpperCamelCase : int , ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = self.num_labels
__UpperCAmelCase : Tuple = FlaubertForTokenClassification(UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
__UpperCAmelCase : str = model(UpperCamelCase , attention_mask=UpperCamelCase , labels=UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCamelCase__ ( self : str , UpperCamelCase : Any , UpperCamelCase : Optional[int] , UpperCamelCase : List[Any] , UpperCamelCase : Optional[Any] , UpperCamelCase : Any , UpperCamelCase : Dict , UpperCamelCase : int , UpperCamelCase : Optional[int] , UpperCamelCase : List[str] , ):
'''simple docstring'''
__UpperCAmelCase : Any = self.num_choices
__UpperCAmelCase : Dict = FlaubertForMultipleChoice(config=UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
__UpperCAmelCase : Optional[Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__UpperCAmelCase : Tuple = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__UpperCAmelCase : Optional[int] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__UpperCAmelCase : Any = model(
UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase , labels=UpperCamelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCamelCase__ ( self : Any ):
'''simple docstring'''
__UpperCAmelCase : List[str] = self.prepare_config_and_inputs()
(
(
__UpperCAmelCase
) ,(
__UpperCAmelCase
) ,(
__UpperCAmelCase
) ,(
__UpperCAmelCase
) ,(
__UpperCAmelCase
) ,(
__UpperCAmelCase
) ,(
__UpperCAmelCase
) ,(
__UpperCAmelCase
) ,(
__UpperCAmelCase
) ,
) : Optional[Any] = config_and_inputs
__UpperCAmelCase : List[str] = {
"""input_ids""": input_ids,
"""token_type_ids""": token_type_ids,
"""lengths""": input_lengths,
"""attention_mask""": input_mask,
}
return config, inputs_dict
@require_torch
class lowerCamelCase__ ( A , A , unittest.TestCase ):
"""simple docstring"""
__a = (
(
FlaubertModel,
FlaubertWithLMHeadModel,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertForMultipleChoice,
)
if is_torch_available()
else ()
)
__a = (
{
"""feature-extraction""": FlaubertModel,
"""fill-mask""": FlaubertWithLMHeadModel,
"""question-answering""": FlaubertForQuestionAnsweringSimple,
"""text-classification""": FlaubertForSequenceClassification,
"""token-classification""": FlaubertForTokenClassification,
"""zero-shot""": FlaubertForSequenceClassification,
}
if is_torch_available()
else {}
)
def lowerCamelCase__ ( self : str , UpperCamelCase : int , UpperCamelCase : int , UpperCamelCase : List[str] , UpperCamelCase : Optional[Any] , UpperCamelCase : str ):
'''simple docstring'''
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("""Fast""" )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def lowerCamelCase__ ( self : Dict , UpperCamelCase : Optional[Any] , UpperCamelCase : Dict , UpperCamelCase : Optional[Any]=False ):
'''simple docstring'''
__UpperCAmelCase : Optional[int] = super()._prepare_for_class(UpperCamelCase , UpperCamelCase , return_labels=UpperCamelCase )
if return_labels:
if model_class.__name__ == "FlaubertForQuestionAnswering":
__UpperCAmelCase : Optional[int] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCamelCase )
__UpperCAmelCase : List[str] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCamelCase )
return inputs_dict
def lowerCamelCase__ ( self : str ):
'''simple docstring'''
__UpperCAmelCase : List[str] = FlaubertModelTester(self )
__UpperCAmelCase : Any = ConfigTester(self , config_class=UpperCamelCase , emb_dim=37 )
def lowerCamelCase__ ( self : List[Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCamelCase__ ( self : Tuple ):
'''simple docstring'''
__UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*UpperCamelCase )
def lowerCamelCase__ ( self : List[str] ):
'''simple docstring'''
__UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*UpperCamelCase )
def lowerCamelCase__ ( self : List[Any] ):
'''simple docstring'''
__UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_simple_qa(*UpperCamelCase )
def lowerCamelCase__ ( self : Optional[int] ):
'''simple docstring'''
__UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*UpperCamelCase )
def lowerCamelCase__ ( self : Any ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*UpperCamelCase )
def lowerCamelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
__UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_token_classif(*UpperCamelCase )
def lowerCamelCase__ ( self : Optional[int] ):
'''simple docstring'''
__UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_multiple_choice(*UpperCamelCase )
@slow
def lowerCamelCase__ ( self : Tuple ):
'''simple docstring'''
for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCAmelCase : Any = FlaubertModel.from_pretrained(UpperCamelCase )
self.assertIsNotNone(UpperCamelCase )
@slow
@require_torch_gpu
def lowerCamelCase__ ( self : Optional[Any] ):
'''simple docstring'''
__UpperCAmelCase ,__UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# FlauBertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == FlaubertForMultipleChoice:
return
__UpperCAmelCase : int = True
__UpperCAmelCase : List[Any] = model_class(config=UpperCamelCase )
__UpperCAmelCase : Tuple = self._prepare_for_class(UpperCamelCase , UpperCamelCase )
__UpperCAmelCase : List[str] = torch.jit.trace(
UpperCamelCase , (inputs_dict["""input_ids"""].to("""cpu""" ), inputs_dict["""attention_mask"""].to("""cpu""" )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(UpperCamelCase , os.path.join(UpperCamelCase , """traced_model.pt""" ) )
__UpperCAmelCase : int = torch.jit.load(os.path.join(UpperCamelCase , """traced_model.pt""" ) , map_location=UpperCamelCase )
loaded(inputs_dict["""input_ids"""].to(UpperCamelCase ) , inputs_dict["""attention_mask"""].to(UpperCamelCase ) )
@require_torch
class lowerCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowerCamelCase__ ( self : Optional[Any] ):
'''simple docstring'''
__UpperCAmelCase : str = FlaubertModel.from_pretrained("""flaubert/flaubert_base_cased""" )
__UpperCAmelCase : List[str] = torch.tensor([[0, 345, 232, 328, 740, 140, 1_695, 69, 6_078, 1_588, 2]] )
with torch.no_grad():
__UpperCAmelCase : Any = model(UpperCamelCase )[0]
__UpperCAmelCase : Union[str, Any] = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , UpperCamelCase )
__UpperCAmelCase : Tuple = torch.tensor(
[[[-2.6251, -1.4298, -0.0227], [-2.8510, -1.6387, 0.2258], [-2.8114, -1.1832, -0.3066]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCamelCase , atol=1e-4 ) )
| 320
|
"""simple docstring"""
import argparse
import json
import os
import numpy as np
import PIL
import requests
import tensorflow.keras.applications.efficientnet as efficientnet
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from tensorflow.keras.preprocessing import image
from transformers import (
EfficientNetConfig,
EfficientNetForImageClassification,
EfficientNetImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase : List[str] = logging.get_logger(__name__)
UpperCAmelCase : List[str] = {
'b0': efficientnet.EfficientNetBa,
'b1': efficientnet.EfficientNetBa,
'b2': efficientnet.EfficientNetBa,
'b3': efficientnet.EfficientNetBa,
'b4': efficientnet.EfficientNetBa,
'b5': efficientnet.EfficientNetBa,
'b6': efficientnet.EfficientNetBa,
'b7': efficientnet.EfficientNetBa,
}
UpperCAmelCase : List[str] = {
'b0': {
'hidden_dim': 1280,
'width_coef': 1.0,
'depth_coef': 1.0,
'image_size': 224,
'dropout_rate': 0.2,
'dw_padding': [],
},
'b1': {
'hidden_dim': 1280,
'width_coef': 1.0,
'depth_coef': 1.1,
'image_size': 240,
'dropout_rate': 0.2,
'dw_padding': [16],
},
'b2': {
'hidden_dim': 1408,
'width_coef': 1.1,
'depth_coef': 1.2,
'image_size': 260,
'dropout_rate': 0.3,
'dw_padding': [5, 8, 16],
},
'b3': {
'hidden_dim': 1536,
'width_coef': 1.2,
'depth_coef': 1.4,
'image_size': 300,
'dropout_rate': 0.3,
'dw_padding': [5, 18],
},
'b4': {
'hidden_dim': 1792,
'width_coef': 1.4,
'depth_coef': 1.8,
'image_size': 380,
'dropout_rate': 0.4,
'dw_padding': [6],
},
'b5': {
'hidden_dim': 2048,
'width_coef': 1.6,
'depth_coef': 2.2,
'image_size': 456,
'dropout_rate': 0.4,
'dw_padding': [13, 27],
},
'b6': {
'hidden_dim': 2304,
'width_coef': 1.8,
'depth_coef': 2.6,
'image_size': 528,
'dropout_rate': 0.5,
'dw_padding': [31],
},
'b7': {
'hidden_dim': 2560,
'width_coef': 2.0,
'depth_coef': 3.1,
'image_size': 600,
'dropout_rate': 0.5,
'dw_padding': [18],
},
}
def lowerCamelCase ( _UpperCamelCase : List[Any] ) -> Optional[int]:
'''simple docstring'''
__UpperCAmelCase : List[Any] = EfficientNetConfig()
__UpperCAmelCase : Dict = CONFIG_MAP[model_name]["""hidden_dim"""]
__UpperCAmelCase : Dict = CONFIG_MAP[model_name]["""width_coef"""]
__UpperCAmelCase : str = CONFIG_MAP[model_name]["""depth_coef"""]
__UpperCAmelCase : Dict = CONFIG_MAP[model_name]["""image_size"""]
__UpperCAmelCase : Dict = CONFIG_MAP[model_name]["""dropout_rate"""]
__UpperCAmelCase : Union[str, Any] = CONFIG_MAP[model_name]["""dw_padding"""]
__UpperCAmelCase : int = """huggingface/label-files"""
__UpperCAmelCase : Optional[int] = """imagenet-1k-id2label.json"""
__UpperCAmelCase : str = 1_0_0_0
__UpperCAmelCase : Dict = json.load(open(hf_hub_download(_UpperCamelCase , _UpperCamelCase , repo_type="""dataset""" ) , """r""" ) )
__UpperCAmelCase : int = {int(_UpperCamelCase ): v for k, v in idalabel.items()}
__UpperCAmelCase : Dict = idalabel
__UpperCAmelCase : Tuple = {v: k for k, v in idalabel.items()}
return config
def lowerCamelCase ( ) -> Union[str, Any]:
'''simple docstring'''
__UpperCAmelCase : Dict = """http://images.cocodataset.org/val2017/000000039769.jpg"""
__UpperCAmelCase : Optional[Any] = Image.open(requests.get(_UpperCamelCase , stream=_UpperCamelCase ).raw )
return im
def lowerCamelCase ( _UpperCamelCase : Any ) -> str:
'''simple docstring'''
__UpperCAmelCase : Tuple = CONFIG_MAP[model_name]["""image_size"""]
__UpperCAmelCase : List[str] = EfficientNetImageProcessor(
size={"""height""": size, """width""": size} , image_mean=[0.485, 0.456, 0.406] , image_std=[0.47_853_944, 0.4_732_864, 0.47_434_163] , do_center_crop=_UpperCamelCase , )
return preprocessor
def lowerCamelCase ( _UpperCamelCase : Dict ) -> Optional[int]:
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = [v.split("""_""" )[0].split("""block""" )[1] for v in original_param_names if v.startswith("""block""" )]
__UpperCAmelCase : str = sorted(set(_UpperCamelCase ) )
__UpperCAmelCase : Optional[int] = len(_UpperCamelCase )
__UpperCAmelCase : Any = {b: str(_UpperCamelCase ) for b, i in zip(_UpperCamelCase , range(_UpperCamelCase ) )}
__UpperCAmelCase : Any = []
rename_keys.append(("""stem_conv/kernel:0""", """embeddings.convolution.weight""") )
rename_keys.append(("""stem_bn/gamma:0""", """embeddings.batchnorm.weight""") )
rename_keys.append(("""stem_bn/beta:0""", """embeddings.batchnorm.bias""") )
rename_keys.append(("""stem_bn/moving_mean:0""", """embeddings.batchnorm.running_mean""") )
rename_keys.append(("""stem_bn/moving_variance:0""", """embeddings.batchnorm.running_var""") )
for b in block_names:
__UpperCAmelCase : List[str] = block_name_mapping[b]
rename_keys.append((f'''block{b}_expand_conv/kernel:0''', f'''encoder.blocks.{hf_b}.expansion.expand_conv.weight''') )
rename_keys.append((f'''block{b}_expand_bn/gamma:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.weight''') )
rename_keys.append((f'''block{b}_expand_bn/beta:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.bias''') )
rename_keys.append(
(f'''block{b}_expand_bn/moving_mean:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.running_mean''') )
rename_keys.append(
(f'''block{b}_expand_bn/moving_variance:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.running_var''') )
rename_keys.append(
(f'''block{b}_dwconv/depthwise_kernel:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight''') )
rename_keys.append((f'''block{b}_bn/gamma:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight''') )
rename_keys.append((f'''block{b}_bn/beta:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias''') )
rename_keys.append(
(f'''block{b}_bn/moving_mean:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean''') )
rename_keys.append(
(f'''block{b}_bn/moving_variance:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var''') )
rename_keys.append((f'''block{b}_se_reduce/kernel:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.reduce.weight''') )
rename_keys.append((f'''block{b}_se_reduce/bias:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.reduce.bias''') )
rename_keys.append((f'''block{b}_se_expand/kernel:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.expand.weight''') )
rename_keys.append((f'''block{b}_se_expand/bias:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.expand.bias''') )
rename_keys.append(
(f'''block{b}_project_conv/kernel:0''', f'''encoder.blocks.{hf_b}.projection.project_conv.weight''') )
rename_keys.append((f'''block{b}_project_bn/gamma:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.weight''') )
rename_keys.append((f'''block{b}_project_bn/beta:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.bias''') )
rename_keys.append(
(f'''block{b}_project_bn/moving_mean:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.running_mean''') )
rename_keys.append(
(f'''block{b}_project_bn/moving_variance:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.running_var''') )
rename_keys.append(("""top_conv/kernel:0""", """encoder.top_conv.weight""") )
rename_keys.append(("""top_bn/gamma:0""", """encoder.top_bn.weight""") )
rename_keys.append(("""top_bn/beta:0""", """encoder.top_bn.bias""") )
rename_keys.append(("""top_bn/moving_mean:0""", """encoder.top_bn.running_mean""") )
rename_keys.append(("""top_bn/moving_variance:0""", """encoder.top_bn.running_var""") )
__UpperCAmelCase : Optional[int] = {}
for item in rename_keys:
if item[0] in original_param_names:
__UpperCAmelCase : Optional[Any] = """efficientnet.""" + item[1]
__UpperCAmelCase : Tuple = """classifier.weight"""
__UpperCAmelCase : Optional[int] = """classifier.bias"""
return key_mapping
def lowerCamelCase ( _UpperCamelCase : Any , _UpperCamelCase : Dict , _UpperCamelCase : int ) -> Tuple:
'''simple docstring'''
for key, value in tf_params.items():
if "normalization" in key:
continue
__UpperCAmelCase : List[Any] = key_mapping[key]
if "_conv" in key and "kernel" in key:
__UpperCAmelCase : int = torch.from_numpy(_UpperCamelCase ).permute(3 , 2 , 0 , 1 )
elif "depthwise_kernel" in key:
__UpperCAmelCase : Optional[Any] = torch.from_numpy(_UpperCamelCase ).permute(2 , 3 , 0 , 1 )
elif "kernel" in key:
__UpperCAmelCase : List[str] = torch.from_numpy(np.transpose(_UpperCamelCase ) )
else:
__UpperCAmelCase : Tuple = torch.from_numpy(_UpperCamelCase )
# Replace HF parameters with original TF model parameters
assert hf_params[hf_key].shape == new_hf_value.shape
hf_params[hf_key].copy_(_UpperCamelCase )
@torch.no_grad()
def lowerCamelCase ( _UpperCamelCase : Optional[int] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : List[str] ) -> Tuple:
'''simple docstring'''
__UpperCAmelCase : int = model_classes[model_name](
include_top=_UpperCamelCase , weights="""imagenet""" , input_tensor=_UpperCamelCase , input_shape=_UpperCamelCase , pooling=_UpperCamelCase , classes=1_0_0_0 , classifier_activation="""softmax""" , )
__UpperCAmelCase : List[str] = original_model.trainable_variables
__UpperCAmelCase : List[Any] = original_model.non_trainable_variables
__UpperCAmelCase : Union[str, Any] = {param.name: param.numpy() for param in tf_params}
for param in tf_non_train_params:
__UpperCAmelCase : int = param.numpy()
__UpperCAmelCase : Dict = list(tf_params.keys() )
# Load HuggingFace model
__UpperCAmelCase : Optional[Any] = get_efficientnet_config(_UpperCamelCase )
__UpperCAmelCase : Optional[Any] = EfficientNetForImageClassification(_UpperCamelCase ).eval()
__UpperCAmelCase : Any = hf_model.state_dict()
# Create src-to-dst parameter name mapping dictionary
print("""Converting parameters...""" )
__UpperCAmelCase : Tuple = rename_keys(_UpperCamelCase )
replace_params(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# Initialize preprocessor and preprocess input image
__UpperCAmelCase : List[Any] = convert_image_processor(_UpperCamelCase )
__UpperCAmelCase : Union[str, Any] = preprocessor(images=prepare_img() , return_tensors="""pt""" )
# HF model inference
hf_model.eval()
with torch.no_grad():
__UpperCAmelCase : Optional[int] = hf_model(**_UpperCamelCase )
__UpperCAmelCase : Any = outputs.logits.detach().numpy()
# Original model inference
__UpperCAmelCase : Union[str, Any] = False
__UpperCAmelCase : Dict = CONFIG_MAP[model_name]["""image_size"""]
__UpperCAmelCase : str = prepare_img().resize((image_size, image_size) , resample=PIL.Image.NEAREST )
__UpperCAmelCase : Optional[Any] = image.img_to_array(_UpperCamelCase )
__UpperCAmelCase : Tuple = np.expand_dims(_UpperCamelCase , axis=0 )
__UpperCAmelCase : str = original_model.predict(_UpperCamelCase )
# Check whether original and HF model outputs match -> np.allclose
assert np.allclose(_UpperCamelCase , _UpperCamelCase , atol=1E-3 ), "The predicted logits are not the same."
print("""Model outputs match!""" )
if save_model:
# Create folder to save model
if not os.path.isdir(_UpperCamelCase ):
os.mkdir(_UpperCamelCase )
# Save converted model and image processor
hf_model.save_pretrained(_UpperCamelCase )
preprocessor.save_pretrained(_UpperCamelCase )
if push_to_hub:
# Push model and image processor to hub
print(f'''Pushing converted {model_name} to the hub...''' )
__UpperCAmelCase : List[str] = f'''efficientnet-{model_name}'''
preprocessor.push_to_hub(_UpperCamelCase )
hf_model.push_to_hub(_UpperCamelCase )
if __name__ == "__main__":
UpperCAmelCase : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='b0',
type=str,
help='Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='hf_model',
type=str,
help='Path to the output PyTorch model directory.',
)
parser.add_argument('--save_model', action='store_true', help='Save model to local')
parser.add_argument('--push_to_hub', action='store_true', help='Push model and image processor to the hub')
UpperCAmelCase : Any = parser.parse_args()
convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
| 320
| 1
|
from typing import Optional, Tuple, Union
import torch
from einops import rearrange, reduce
from diffusers import DDIMScheduler, DDPMScheduler, DiffusionPipeline, ImagePipelineOutput, UNetaDConditionModel
from diffusers.schedulers.scheduling_ddim import DDIMSchedulerOutput
from diffusers.schedulers.scheduling_ddpm import DDPMSchedulerOutput
_snake_case : str = 8
def a_ ( lowerCAmelCase_ : Optional[int], lowerCAmelCase_ : int=BITS ):
__lowerCAmelCase = x.device
__lowerCAmelCase = (x * 255).int().clamp(0, 255 )
__lowerCAmelCase = 2 ** torch.arange(bits - 1, -1, -1, device=lowerCAmelCase_ )
__lowerCAmelCase = rearrange(lowerCAmelCase_, 'd -> d 1 1' )
__lowerCAmelCase = rearrange(lowerCAmelCase_, 'b c h w -> b c 1 h w' )
__lowerCAmelCase = ((x & mask) != 0).float()
__lowerCAmelCase = rearrange(lowerCAmelCase_, 'b c d h w -> b (c d) h w' )
__lowerCAmelCase = bits * 2 - 1
return bits
def a_ ( lowerCAmelCase_ : int, lowerCAmelCase_ : Optional[Any]=BITS ):
__lowerCAmelCase = x.device
__lowerCAmelCase = (x > 0).int()
__lowerCAmelCase = 2 ** torch.arange(bits - 1, -1, -1, device=lowerCAmelCase_, dtype=torch.intaa )
__lowerCAmelCase = rearrange(lowerCAmelCase_, 'd -> d 1 1' )
__lowerCAmelCase = rearrange(lowerCAmelCase_, 'b (c d) h w -> b c d h w', d=8 )
__lowerCAmelCase = reduce(x * mask, 'b c d h w -> b c h w', 'sum' )
return (dec / 255).clamp(0.0, 1.0 )
def a_ ( self : Dict, lowerCAmelCase_ : torch.FloatTensor, lowerCAmelCase_ : int, lowerCAmelCase_ : torch.FloatTensor, lowerCAmelCase_ : float = 0.0, lowerCAmelCase_ : bool = True, lowerCAmelCase_ : Optional[int]=None, lowerCAmelCase_ : bool = True, ):
if self.num_inference_steps is None:
raise ValueError(
'Number of inference steps is \'None\', you need to run \'set_timesteps\' after creating the scheduler' )
# See formulas (12) and (16) of DDIM paper https://arxiv.org/pdf/2010.02502.pdf
# Ideally, read DDIM paper in-detail understanding
# Notation (<variable name> -> <name in paper>
# - pred_noise_t -> e_theta(x_t, t)
# - pred_original_sample -> f_theta(x_t, t) or x_0
# - std_dev_t -> sigma_t
# - eta -> η
# - pred_sample_direction -> "direction pointing to x_t"
# - pred_prev_sample -> "x_t-1"
# 1. get previous step value (=t-1)
__lowerCAmelCase = timestep - self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
__lowerCAmelCase = self.alphas_cumprod[timestep]
__lowerCAmelCase = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod
__lowerCAmelCase = 1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
__lowerCAmelCase = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
# 4. Clip "predicted x_0"
__lowerCAmelCase = self.bit_scale
if self.config.clip_sample:
__lowerCAmelCase = torch.clamp(lowerCAmelCase_, -scale, lowerCAmelCase_ )
# 5. compute variance: "sigma_t(η)" -> see formula (16)
# σ_t = sqrt((1 − α_t−1)/(1 − α_t)) * sqrt(1 − α_t/α_t−1)
__lowerCAmelCase = self._get_variance(lowerCAmelCase_, lowerCAmelCase_ )
__lowerCAmelCase = eta * variance ** 0.5
if use_clipped_model_output:
# the model_output is always re-derived from the clipped x_0 in Glide
__lowerCAmelCase = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
# 6. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
__lowerCAmelCase = (1 - alpha_prod_t_prev - std_dev_t**2) ** 0.5 * model_output
# 7. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
__lowerCAmelCase = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if eta > 0:
# randn_like does not support generator https://github.com/pytorch/pytorch/issues/27072
__lowerCAmelCase = model_output.device if torch.is_tensor(lowerCAmelCase_ ) else 'cpu'
__lowerCAmelCase = torch.randn(model_output.shape, dtype=model_output.dtype, generator=lowerCAmelCase_ ).to(lowerCAmelCase_ )
__lowerCAmelCase = self._get_variance(lowerCAmelCase_, lowerCAmelCase_ ) ** 0.5 * eta * noise
__lowerCAmelCase = prev_sample + variance
if not return_dict:
return (prev_sample,)
return DDIMSchedulerOutput(prev_sample=lowerCAmelCase_, pred_original_sample=lowerCAmelCase_ )
def a_ ( self : List[str], lowerCAmelCase_ : torch.FloatTensor, lowerCAmelCase_ : int, lowerCAmelCase_ : torch.FloatTensor, lowerCAmelCase_ : Any="epsilon", lowerCAmelCase_ : Optional[int]=None, lowerCAmelCase_ : bool = True, ):
__lowerCAmelCase = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type in ["learned", "learned_range"]:
__lowerCAmelCase , __lowerCAmelCase = torch.split(lowerCAmelCase_, sample.shape[1], dim=1 )
else:
__lowerCAmelCase = None
# 1. compute alphas, betas
__lowerCAmelCase = self.alphas_cumprod[t]
__lowerCAmelCase = self.alphas_cumprod[t - 1] if t > 0 else self.one
__lowerCAmelCase = 1 - alpha_prod_t
__lowerCAmelCase = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if prediction_type == "epsilon":
__lowerCAmelCase = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif prediction_type == "sample":
__lowerCAmelCase = model_output
else:
raise ValueError(F"""Unsupported prediction_type {prediction_type}.""" )
# 3. Clip "predicted x_0"
__lowerCAmelCase = self.bit_scale
if self.config.clip_sample:
__lowerCAmelCase = torch.clamp(lowerCAmelCase_, -scale, lowerCAmelCase_ )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
__lowerCAmelCase = (alpha_prod_t_prev ** 0.5 * self.betas[t]) / beta_prod_t
__lowerCAmelCase = self.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
__lowerCAmelCase = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
__lowerCAmelCase = 0
if t > 0:
__lowerCAmelCase = torch.randn(
model_output.size(), dtype=model_output.dtype, layout=model_output.layout, generator=lowerCAmelCase_ ).to(model_output.device )
__lowerCAmelCase = (self._get_variance(lowerCAmelCase_, predicted_variance=lowerCAmelCase_ ) ** 0.5) * noise
__lowerCAmelCase = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return DDPMSchedulerOutput(prev_sample=lowerCAmelCase_, pred_original_sample=lowerCAmelCase_ )
class _UpperCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
def __init__( self : Any , lowerCAmelCase_ : UNetaDConditionModel , lowerCAmelCase_ : Union[DDIMScheduler, DDPMScheduler] , lowerCAmelCase_ : Optional[float] = 1.0 , ) -> List[str]:
super().__init__()
__lowerCAmelCase = bit_scale
__lowerCAmelCase = (
ddim_bit_scheduler_step if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else ddpm_bit_scheduler_step
)
self.register_modules(unet=lowerCAmelCase_ , scheduler=lowerCAmelCase_ )
@torch.no_grad()
def __call__( self : Union[str, Any] , lowerCAmelCase_ : Optional[int] = 2_5_6 , lowerCAmelCase_ : Optional[int] = 2_5_6 , lowerCAmelCase_ : Optional[int] = 5_0 , lowerCAmelCase_ : Optional[torch.Generator] = None , lowerCAmelCase_ : Optional[int] = 1 , lowerCAmelCase_ : Optional[str] = "pil" , lowerCAmelCase_ : bool = True , **lowerCAmelCase_ : Optional[Any] , ) -> Union[Tuple, ImagePipelineOutput]:
__lowerCAmelCase = torch.randn(
(batch_size, self.unet.config.in_channels, height, width) , generator=lowerCAmelCase_ , )
__lowerCAmelCase = decimal_to_bits(lowerCAmelCase_ ) * self.bit_scale
__lowerCAmelCase = latents.to(self.device )
self.scheduler.set_timesteps(lowerCAmelCase_ )
for t in self.progress_bar(self.scheduler.timesteps ):
# predict the noise residual
__lowerCAmelCase = self.unet(lowerCAmelCase_ , lowerCAmelCase_ ).sample
# compute the previous noisy sample x_t -> x_t-1
__lowerCAmelCase = self.scheduler.step(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ).prev_sample
__lowerCAmelCase = bits_to_decimal(lowerCAmelCase_ )
if output_type == "pil":
__lowerCAmelCase = self.numpy_to_pil(lowerCAmelCase_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowerCAmelCase_ )
| 284
|
import fire
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoTokenizer
from utils import SeqaSeqDataset, pickle_save
def a_ ( lowerCAmelCase_ : List[str], lowerCAmelCase_ : Dict, lowerCAmelCase_ : Tuple=1024, lowerCAmelCase_ : Optional[Any]=1024, lowerCAmelCase_ : Tuple=False, **lowerCAmelCase_ : Union[str, Any] ):
__lowerCAmelCase = AutoTokenizer.from_pretrained(lowerCAmelCase_ )
__lowerCAmelCase = SeqaSeqDataset(lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, type_path='train', **lowerCAmelCase_ )
__lowerCAmelCase = tok.pad_token_id
def get_lens(lowerCAmelCase_ : Optional[Any] ):
__lowerCAmelCase = tqdm(
DataLoader(lowerCAmelCase_, batch_size=512, num_workers=8, shuffle=lowerCAmelCase_, collate_fn=ds.collate_fn ), desc=str(ds.len_file ), )
__lowerCAmelCase = []
for batch in dl:
__lowerCAmelCase = batch['input_ids'].ne(lowerCAmelCase_ ).sum(1 ).tolist()
__lowerCAmelCase = batch['labels'].ne(lowerCAmelCase_ ).sum(1 ).tolist()
if consider_target:
for src, tgt in zip(lowerCAmelCase_, lowerCAmelCase_ ):
max_lens.append(max(lowerCAmelCase_, lowerCAmelCase_ ) )
else:
max_lens.extend(lowerCAmelCase_ )
return max_lens
__lowerCAmelCase = get_lens(lowerCAmelCase_ )
__lowerCAmelCase = SeqaSeqDataset(lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, type_path='val', **lowerCAmelCase_ )
__lowerCAmelCase = get_lens(lowerCAmelCase_ )
pickle_save(lowerCAmelCase_, train_ds.len_file )
pickle_save(lowerCAmelCase_, val_ds.len_file )
if __name__ == "__main__":
fire.Fire(save_len_file)
| 284
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
A : int = {'''configuration_speech_encoder_decoder''': ['''SpeechEncoderDecoderConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : str = ['''SpeechEncoderDecoderModel''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : str = ['''FlaxSpeechEncoderDecoderModel''']
if TYPE_CHECKING:
from .configuration_speech_encoder_decoder import SpeechEncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_encoder_decoder import SpeechEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_speech_encoder_decoder import FlaxSpeechEncoderDecoderModel
else:
import sys
A : int = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 276
|
import math
def __lowerCamelCase ( __a :int ) -> bool:
"""simple docstring"""
A__ = math.loga(math.sqrt(4 * positive_integer + 1 ) / 2 + 1 / 2 )
return exponent == int(__a )
def __lowerCamelCase ( __a :float = 1 / 1_2_3_4_5 ) -> int:
"""simple docstring"""
A__ = 0
A__ = 0
A__ = 3
while True:
A__ = (integer**2 - 1) / 4
# if candidate is an integer, then there is a partition for k
if partition_candidate == int(__a ):
A__ = int(__a )
total_partitions += 1
if check_partition_perfect(__a ):
perfect_partitions += 1
if perfect_partitions > 0:
if perfect_partitions / total_partitions < max_proportion:
return int(__a )
integer += 1
if __name__ == "__main__":
print(F'''{solution() = }''')
| 276
| 1
|
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_barthez import BarthezTokenizer
else:
_lowercase : Optional[Any] = None
_lowercase : Optional[int] = logging.get_logger(__name__)
_lowercase : Optional[int] = {"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"}
_lowercase : int = {
"vocab_file": {
"moussaKam/mbarthez": "https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model",
"moussaKam/barthez": "https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model",
"moussaKam/barthez-orangesum-title": (
"https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model"
),
},
"tokenizer_file": {
"moussaKam/mbarthez": "https://huggingface.co/moussaKam/mbarthez/resolve/main/tokenizer.json",
"moussaKam/barthez": "https://huggingface.co/moussaKam/barthez/resolve/main/tokenizer.json",
"moussaKam/barthez-orangesum-title": (
"https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/tokenizer.json"
),
},
}
_lowercase : int = {
"moussaKam/mbarthez": 1_0_2_4,
"moussaKam/barthez": 1_0_2_4,
"moussaKam/barthez-orangesum-title": 1_0_2_4,
}
_lowercase : Optional[int] = "▁"
class lowerCAmelCase__ ( lowerCamelCase_ ):
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = ['''input_ids''', '''attention_mask''']
lowerCAmelCase_ = BarthezTokenizer
def __init__( self , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE="<s>" , __SCREAMING_SNAKE_CASE="</s>" , __SCREAMING_SNAKE_CASE="</s>" , __SCREAMING_SNAKE_CASE="<s>" , __SCREAMING_SNAKE_CASE="<unk>" , __SCREAMING_SNAKE_CASE="<pad>" , __SCREAMING_SNAKE_CASE="<mask>" , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
lowercase_ : List[str] = AddedToken(__SCREAMING_SNAKE_CASE , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE ) if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else mask_token
super().__init__(
__SCREAMING_SNAKE_CASE , tokenizer_file=__SCREAMING_SNAKE_CASE , bos_token=__SCREAMING_SNAKE_CASE , eos_token=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , sep_token=__SCREAMING_SNAKE_CASE , cls_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , mask_token=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
lowercase_ : int = vocab_file
lowercase_ : Optional[Any] = False if not self.vocab_file else True
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ):
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowercase_ : Any = [self.cls_token_id]
lowercase_ : List[str] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ):
"""simple docstring"""
lowercase_ : str = [self.sep_token_id]
lowercase_ : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ):
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(__SCREAMING_SNAKE_CASE ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowercase_ : Any = os.path.join(
__SCREAMING_SNAKE_CASE , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__SCREAMING_SNAKE_CASE ):
copyfile(self.vocab_file , __SCREAMING_SNAKE_CASE )
return (out_vocab_file,)
| 93
|
'''simple docstring'''
import torch
from transformers import AutoModel
class lowerCAmelCase__ ( torch.nn.Module ):
def __init__( self , __SCREAMING_SNAKE_CASE="sayef/fsner-bert-base-uncased" ):
"""simple docstring"""
super(__SCREAMING_SNAKE_CASE , self ).__init__()
lowercase_ : Tuple = AutoModel.from_pretrained(__SCREAMING_SNAKE_CASE , return_dict=__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[Any] = torch.nn.CosineSimilarity(3 , 1E-0_8 )
lowercase_ : Optional[Any] = torch.nn.Softmax(dim=1 )
def _snake_case ( self , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return self.bert(**__SCREAMING_SNAKE_CASE ).last_hidden_state
def _snake_case ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return token_embeddings.sum(2 , keepdim=__SCREAMING_SNAKE_CASE )
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=1 ):
"""simple docstring"""
return self.softmax(T * self.cos(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ : Optional[Any] = W_supports['''sizes'''].tolist()
lowercase_ : Dict = W_supports['''start_token_id'''].item()
lowercase_ : List[Any] = W_supports['''end_token_id'''].item()
del W_supports["sizes"]
del W_supports["start_token_id"]
del W_supports["end_token_id"]
lowercase_ : List[str] = self.BERT(**__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[int] = self.BERT(**__SCREAMING_SNAKE_CASE )
lowercase_ : str = None
lowercase_ : Dict = None
lowercase_ : Tuple = W_supports['''input_ids'''] == start_token_id
lowercase_ : Any = W_supports['''input_ids'''] == end_token_id
for i, size in enumerate(__SCREAMING_SNAKE_CASE ):
if i == 0:
lowercase_ : List[str] = 0
else:
lowercase_ : List[Any] = support_sizes[i - 1]
lowercase_ : str = S[s : s + size][start_token_masks[s : s + size]]
lowercase_ : Optional[int] = S[s : s + size][end_token_masks[s : s + size]]
lowercase_ : List[str] = torch.matmul(q[i] , s_start.T ).sum(1 ).softmax(0 )
lowercase_ : List[str] = torch.matmul(q[i] , s_end.T ).sum(1 ).softmax(0 )
if p_starts is not None:
lowercase_ : Tuple = torch.vstack((p_starts, p_start) )
lowercase_ : Optional[Any] = torch.vstack((p_ends, p_end) )
else:
lowercase_ : str = p_start
lowercase_ : int = p_end
return p_starts, p_ends
| 93
| 1
|
'''simple docstring'''
# limitations under the License.
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401
from .utils import deprecate
deprecate(
'''pipelines_utils''',
'''0.22.0''',
'''Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.''',
standard_warn=False,
stacklevel=3,
)
| 142
|
'''simple docstring'''
import fire
from utils import calculate_rouge, save_json
def __a ( _UpperCamelCase: Tuple , _UpperCamelCase: Optional[int] , _UpperCamelCase: Optional[int]=None , **_UpperCamelCase: Any ) -> Optional[Any]:
"""simple docstring"""
_snake_case = [x.strip() for x in open(_UpperCamelCase ).readlines()]
_snake_case = [x.strip() for x in open(_UpperCamelCase ).readlines()][: len(_UpperCamelCase )]
_snake_case = calculate_rouge(_UpperCamelCase , _UpperCamelCase , **_UpperCamelCase )
if save_path is not None:
save_json(_UpperCamelCase , _UpperCamelCase , indent=_UpperCamelCase )
return metrics # these print nicely
if __name__ == "__main__":
fire.Fire(calculate_rouge_path)
| 142
| 1
|
from __future__ import annotations
import math
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : int ) -> bool:
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__UpperCamelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : int ) -> list[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = str(__UpperCamelCase )
SCREAMING_SNAKE_CASE__ = [n]
for i in range(1 , len(__UpperCamelCase ) ):
list_nums.append(int(str_num[i:] ) )
list_nums.append(int(str_num[:-i] ) )
return list_nums
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : int ) -> bool:
"""simple docstring"""
if len(str(__UpperCamelCase ) ) > 3:
if not is_prime(int(str(__UpperCamelCase )[-3:] ) ) or not is_prime(int(str(__UpperCamelCase )[:3] ) ):
return False
return True
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : int = 11 ) -> list[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = 13
while len(__UpperCamelCase ) != count:
if validate(__UpperCamelCase ):
SCREAMING_SNAKE_CASE__ = list_truncated_nums(__UpperCamelCase )
if all(is_prime(__UpperCamelCase ) for i in list_nums ):
list_truncated_primes.append(__UpperCamelCase )
num += 2
return list_truncated_primes
def __SCREAMING_SNAKE_CASE ( ) -> int:
"""simple docstring"""
return sum(compute_truncated_primes(11 ) )
if __name__ == "__main__":
print(F"""{sum(compute_truncated_primes(11)) = }""")
| 219
|
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : int , __UpperCamelCase : int ) -> str:
"""simple docstring"""
if a < 0 or b < 0:
raise ValueError("""the value of both inputs must be positive""" )
SCREAMING_SNAKE_CASE__ = str(bin(__UpperCamelCase ) )[2:] # remove the leading "0b"
SCREAMING_SNAKE_CASE__ = str(bin(__UpperCamelCase ) )[2:] # remove the leading "0b"
SCREAMING_SNAKE_CASE__ = max(len(__UpperCamelCase ) , len(__UpperCamelCase ) )
return "0b" + "".join(
str(int(char_a != char_b ) )
for char_a, char_b in zip(a_binary.zfill(__UpperCamelCase ) , b_binary.zfill(__UpperCamelCase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 219
| 1
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_pegasus import PegasusTokenizer
else:
__A = None
__A = logging.get_logger(__name__)
__A = """▁"""
__A = {"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""}
__A = {
"""vocab_file""": {"""google/pegasus-xsum""": """https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model"""},
"""tokenizer_file""": {
"""google/pegasus-xsum""": """https://huggingface.co/google/pegasus-xsum/resolve/main/tokenizer.json"""
},
}
__A = {
"""google/pegasus-xsum""": 512,
}
class _lowerCAmelCase ( a ):
"""simple docstring"""
__magic_name__ :Union[str, Any] = VOCAB_FILES_NAMES
__magic_name__ :Dict = PRETRAINED_VOCAB_FILES_MAP
__magic_name__ :Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__magic_name__ :Union[str, Any] = PegasusTokenizer
__magic_name__ :Optional[Any] = ["""input_ids""", """attention_mask"""]
def __init__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase="<pad>" , __UpperCAmelCase="</s>" , __UpperCAmelCase="<unk>" , __UpperCAmelCase="<mask_2>" , __UpperCAmelCase="<mask_1>" , __UpperCAmelCase=None , __UpperCAmelCase=1_0_3 , **__UpperCAmelCase , ):
'''simple docstring'''
lowerCAmelCase__ :int = offset
if additional_special_tokens is not None:
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
raise TypeError(
F"additional_special_tokens should be of type {type(__UpperCAmelCase )}, but is"
F" {type(__UpperCAmelCase )}" )
lowerCAmelCase__ :List[str] = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
F"<unk_{i}>" for i in range(len(__UpperCAmelCase ) , self.offset - 1 )
]
if len(set(__UpperCAmelCase ) ) != len(__UpperCAmelCase ):
raise ValueError(
'Please make sure that the provided additional_special_tokens do not contain an incorrectly'
F" shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}." )
lowerCAmelCase__ :Any = additional_special_tokens_extended
else:
lowerCAmelCase__ :List[Any] = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [F"<unk_{i}>" for i in range(2 , self.offset )]
super().__init__(
__UpperCAmelCase , tokenizer_file=__UpperCAmelCase , pad_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , mask_token_sent=__UpperCAmelCase , offset=__UpperCAmelCase , additional_special_tokens=__UpperCAmelCase , **__UpperCAmelCase , )
lowerCAmelCase__ :Optional[int] = vocab_file
lowerCAmelCase__ :Tuple = False if not self.vocab_file else True
def snake_case ( self , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :Dict = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
if all_special_ids != set(range(len(self.additional_special_tokens ) + 3 ) ):
raise ValueError(
'There should be 3 special tokens: mask_token, pad_token, and eos_token +'
F" {len(self.additional_special_tokens )} additional_special_tokens, but got {all_special_ids}" )
return [1 if x in all_special_ids else 0 for x in seq]
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = False ):
'''simple docstring'''
if already_has_special_tokens:
return self._special_token_mask(__UpperCAmelCase )
elif token_ids_a is None:
return self._special_token_mask(__UpperCAmelCase ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase=None ):
'''simple docstring'''
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(__UpperCAmelCase ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
lowerCAmelCase__ :Optional[Any] = os.path.join(
__UpperCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCAmelCase ):
copyfile(self.vocab_file , __UpperCAmelCase )
return (out_vocab_file,)
| 352
|
"""simple docstring"""
from __future__ import annotations
import csv
import requests
from bsa import BeautifulSoup
def __A (_SCREAMING_SNAKE_CASE = "" ) ->dict[str, float]:
"""simple docstring"""
lowerCAmelCase__ :Optional[Any] = url or 'https://www.imdb.com/chart/top/?ref_=nv_mv_250'
lowerCAmelCase__ :str = BeautifulSoup(requests.get(_SCREAMING_SNAKE_CASE ).text , 'html.parser' )
lowerCAmelCase__ :List[Any] = soup.find_all('td' , attrs='titleColumn' )
lowerCAmelCase__ :Optional[int] = soup.find_all('td' , class_='ratingColumn imdbRating' )
return {
title.a.text: float(rating.strong.text )
for title, rating in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
}
def __A (_SCREAMING_SNAKE_CASE = "IMDb_Top_250_Movies.csv" ) ->None:
"""simple docstring"""
lowerCAmelCase__ :Any = get_imdb_top_aaa_movies()
with open(_SCREAMING_SNAKE_CASE , 'w' , newline='' ) as out_file:
lowerCAmelCase__ :Dict = csv.writer(_SCREAMING_SNAKE_CASE )
writer.writerow(['Movie title', 'IMDb rating'] )
for title, rating in movies.items():
writer.writerow([title, rating] )
if __name__ == "__main__":
write_movies()
| 254
| 0
|
"""simple docstring"""
import os
import tempfile
import unittest
from transformers import FlaubertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FlaubertForMultipleChoice,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertModel,
FlaubertWithLMHeadModel,
)
from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class __lowerCamelCase ( a__ ):
'''simple docstring'''
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=13 , __UpperCAmelCase=7 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase=False , __UpperCAmelCase=False , __UpperCAmelCase=2 , __UpperCAmelCase=99 , __UpperCAmelCase=0 , __UpperCAmelCase=32 , __UpperCAmelCase=5 , __UpperCAmelCase=4 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=512 , __UpperCAmelCase=12 , __UpperCAmelCase=2 , __UpperCAmelCase=0.02 , __UpperCAmelCase=3 , __UpperCAmelCase=4 , __UpperCAmelCase="last" , __UpperCAmelCase=None , __UpperCAmelCase=None , ) -> List[Any]:
_a = parent
_a = batch_size
_a = seq_length
_a = is_training
_a = use_input_lengths
_a = use_token_type_ids
_a = use_labels
_a = gelu_activation
_a = sinusoidal_embeddings
_a = causal
_a = asm
_a = n_langs
_a = vocab_size
_a = n_special
_a = hidden_size
_a = num_hidden_layers
_a = num_attention_heads
_a = hidden_dropout_prob
_a = attention_probs_dropout_prob
_a = max_position_embeddings
_a = type_vocab_size
_a = type_sequence_label_size
_a = initializer_range
_a = num_labels
_a = num_choices
_a = summary_type
_a = use_proj
_a = scope
def _UpperCAmelCase ( self ) -> List[Any]:
_a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_a = random_attention_mask([self.batch_size, self.seq_length] )
_a = None
if self.use_input_lengths:
_a = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
_a = None
if self.use_token_type_ids:
_a = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
_a = None
_a = None
_a = None
if self.use_labels:
_a = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_a = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_a = ids_tensor([self.batch_size] , 2 ).float()
_a = ids_tensor([self.batch_size] , self.num_choices )
_a = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def _UpperCAmelCase ( self ) -> Optional[int]:
return FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , )
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) -> Any:
_a = FlaubertModel(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
_a = model(__UpperCAmelCase , lengths=__UpperCAmelCase , langs=__UpperCAmelCase )
_a = model(__UpperCAmelCase , langs=__UpperCAmelCase )
_a = model(__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) -> List[str]:
_a = FlaubertWithLMHeadModel(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
_a = model(__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) -> Optional[Any]:
_a = FlaubertForQuestionAnsweringSimple(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
_a = model(__UpperCAmelCase )
_a = model(__UpperCAmelCase , start_positions=__UpperCAmelCase , end_positions=__UpperCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) -> List[Any]:
_a = FlaubertForQuestionAnswering(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
_a = model(__UpperCAmelCase )
_a = model(
__UpperCAmelCase , start_positions=__UpperCAmelCase , end_positions=__UpperCAmelCase , cls_index=__UpperCAmelCase , is_impossible=__UpperCAmelCase , p_mask=__UpperCAmelCase , )
_a = model(
__UpperCAmelCase , start_positions=__UpperCAmelCase , end_positions=__UpperCAmelCase , cls_index=__UpperCAmelCase , is_impossible=__UpperCAmelCase , )
((_a) , ) = result_with_labels.to_tuple()
_a = model(__UpperCAmelCase , start_positions=__UpperCAmelCase , end_positions=__UpperCAmelCase )
((_a) , ) = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) -> Union[str, Any]:
_a = FlaubertForSequenceClassification(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
_a = model(__UpperCAmelCase )
_a = model(__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) -> Optional[Any]:
_a = self.num_labels
_a = FlaubertForTokenClassification(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
_a = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) -> Optional[Any]:
_a = self.num_choices
_a = FlaubertForMultipleChoice(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
_a = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_a = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_a = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_a = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _UpperCAmelCase ( self ) -> Optional[Any]:
_a = self.prepare_config_and_inputs()
(
(
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) ,
) = config_and_inputs
_a = {
'''input_ids''': input_ids,
'''token_type_ids''': token_type_ids,
'''lengths''': input_lengths,
'''attention_mask''': input_mask,
}
return config, inputs_dict
@require_torch
class __lowerCamelCase ( a__ , a__ , unittest.TestCase ):
'''simple docstring'''
A_ : int = (
(
FlaubertModel,
FlaubertWithLMHeadModel,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertForMultipleChoice,
)
if is_torch_available()
else ()
)
A_ : Dict = (
{
'feature-extraction': FlaubertModel,
'fill-mask': FlaubertWithLMHeadModel,
'question-answering': FlaubertForQuestionAnsweringSimple,
'text-classification': FlaubertForSequenceClassification,
'token-classification': FlaubertForTokenClassification,
'zero-shot': FlaubertForSequenceClassification,
}
if is_torch_available()
else {}
)
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Dict:
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('''Fast''' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=False ) -> Any:
_a = super()._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase , return_labels=__UpperCAmelCase )
if return_labels:
if model_class.__name__ == "FlaubertForQuestionAnswering":
_a = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__UpperCAmelCase )
_a = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__UpperCAmelCase )
return inputs_dict
def _UpperCAmelCase ( self ) -> List[str]:
_a = FlaubertModelTester(self )
_a = ConfigTester(self , config_class=__UpperCAmelCase , emb_dim=37 )
def _UpperCAmelCase ( self ) -> int:
self.config_tester.run_common_tests()
def _UpperCAmelCase ( self ) -> Tuple:
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*__UpperCAmelCase )
def _UpperCAmelCase ( self ) -> Tuple:
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*__UpperCAmelCase )
def _UpperCAmelCase ( self ) -> List[Any]:
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_simple_qa(*__UpperCAmelCase )
def _UpperCAmelCase ( self ) -> Dict:
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*__UpperCAmelCase )
def _UpperCAmelCase ( self ) -> Optional[int]:
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*__UpperCAmelCase )
def _UpperCAmelCase ( self ) -> int:
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_token_classif(*__UpperCAmelCase )
def _UpperCAmelCase ( self ) -> Any:
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_multiple_choice(*__UpperCAmelCase )
@slow
def _UpperCAmelCase ( self ) -> Optional[Any]:
for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_a = FlaubertModel.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
@slow
@require_torch_gpu
def _UpperCAmelCase ( self ) -> Tuple:
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# FlauBertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == FlaubertForMultipleChoice:
return
_a = True
_a = model_class(config=__UpperCAmelCase )
_a = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase )
_a = torch.jit.trace(
__UpperCAmelCase , (inputs_dict['''input_ids'''].to('''cpu''' ), inputs_dict['''attention_mask'''].to('''cpu''' )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(__UpperCAmelCase , os.path.join(__UpperCAmelCase , '''traced_model.pt''' ) )
_a = torch.jit.load(os.path.join(__UpperCAmelCase , '''traced_model.pt''' ) , map_location=__UpperCAmelCase )
loaded(inputs_dict['''input_ids'''].to(__UpperCAmelCase ) , inputs_dict['''attention_mask'''].to(__UpperCAmelCase ) )
@require_torch
class __lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def _UpperCAmelCase ( self ) -> Optional[int]:
_a = FlaubertModel.from_pretrained('''flaubert/flaubert_base_cased''' )
_a = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
with torch.no_grad():
_a = model(__UpperCAmelCase )[0]
_a = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , __UpperCAmelCase )
_a = torch.tensor(
[[[-2.6251, -1.4298, -0.0227], [-2.8510, -1.6387, 0.2258], [-2.8114, -1.1832, -0.3066]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __UpperCAmelCase , atol=1e-4 ) )
| 320
|
"""simple docstring"""
import warnings
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
__snake_case = logging.get_logger(__name__)
__snake_case = {
'''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/config.json''',
# See all BART models at https://huggingface.co/models?filter=bart
}
class __lowerCamelCase ( a__ ):
'''simple docstring'''
A_ : List[str] = 'bart'
A_ : Optional[Any] = ['past_key_values']
A_ : Union[str, Any] = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self , __UpperCAmelCase=50265 , __UpperCAmelCase=1024 , __UpperCAmelCase=12 , __UpperCAmelCase=4096 , __UpperCAmelCase=16 , __UpperCAmelCase=12 , __UpperCAmelCase=4096 , __UpperCAmelCase=16 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase="gelu" , __UpperCAmelCase=1024 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.02 , __UpperCAmelCase=0.0 , __UpperCAmelCase=False , __UpperCAmelCase=True , __UpperCAmelCase=3 , __UpperCAmelCase=1 , __UpperCAmelCase=0 , __UpperCAmelCase=2 , __UpperCAmelCase=True , __UpperCAmelCase=2 , __UpperCAmelCase=2 , **__UpperCAmelCase , ) -> Tuple:
_a = vocab_size
_a = max_position_embeddings
_a = d_model
_a = encoder_ffn_dim
_a = encoder_layers
_a = encoder_attention_heads
_a = decoder_ffn_dim
_a = decoder_layers
_a = decoder_attention_heads
_a = dropout
_a = attention_dropout
_a = activation_dropout
_a = activation_function
_a = init_std
_a = encoder_layerdrop
_a = decoder_layerdrop
_a = classifier_dropout
_a = use_cache
_a = encoder_layers
_a = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
num_labels=__UpperCAmelCase , pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , is_encoder_decoder=__UpperCAmelCase , decoder_start_token_id=__UpperCAmelCase , forced_eos_token_id=__UpperCAmelCase , **__UpperCAmelCase , )
# ensure backward compatibility for BART CNN models
if self.forced_bos_token_id is None and kwargs.get('''force_bos_token_to_be_generated''' , __UpperCAmelCase ):
_a = self.bos_token_id
warnings.warn(
F'Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. '
'''The config can simply be saved and uploaded again to be fixed.''' )
class __lowerCamelCase ( a__ ):
'''simple docstring'''
@property
def _UpperCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task in ["default", "seq2seq-lm"]:
_a = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
_a = {0: '''batch'''}
_a = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
else:
_a = {0: '''batch''', 1: '''decoder_sequence'''}
_a = {0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(__UpperCAmelCase , direction='''inputs''' )
elif self.task == "causal-lm":
# TODO: figure this case out.
_a = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
_a , _a = self.num_layers
for i in range(__UpperCAmelCase ):
_a = {0: '''batch''', 2: '''past_sequence + sequence'''}
_a = {0: '''batch''', 2: '''past_sequence + sequence'''}
else:
_a = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''decoder_input_ids''', {0: '''batch''', 1: '''decoder_sequence'''}),
('''decoder_attention_mask''', {0: '''batch''', 1: '''decoder_sequence'''}),
] )
return common_inputs
@property
def _UpperCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task in ["default", "seq2seq-lm"]:
_a = super().outputs
else:
_a = super(__UpperCAmelCase , self ).outputs
if self.use_past:
_a , _a = self.num_layers
for i in range(__UpperCAmelCase ):
_a = {0: '''batch''', 2: '''past_sequence + sequence'''}
_a = {0: '''batch''', 2: '''past_sequence + sequence'''}
return common_outputs
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = -1 , __UpperCAmelCase = -1 , __UpperCAmelCase = False , __UpperCAmelCase = None , ) -> Mapping[str, Any]:
_a = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
# Generate decoder inputs
_a = seq_length if not self.use_past else 1
_a = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
_a = {F'decoder_{name}': tensor for name, tensor in decoder_inputs.items()}
_a = dict(**__UpperCAmelCase , **__UpperCAmelCase )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
_a , _a = common_inputs['''input_ids'''].shape
_a = common_inputs['''decoder_input_ids'''].shape[1]
_a , _a = self.num_attention_heads
_a = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
_a = decoder_seq_length + 3
_a = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
_a = torch.cat(
[common_inputs['''decoder_attention_mask'''], torch.ones(__UpperCAmelCase , __UpperCAmelCase )] , dim=1 )
_a = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
_a , _a = self.num_layers
_a = min(__UpperCAmelCase , __UpperCAmelCase )
_a = max(__UpperCAmelCase , __UpperCAmelCase ) - min_num_layers
_a = '''encoder''' if num_encoder_layers > num_decoder_layers else '''decoder'''
for _ in range(__UpperCAmelCase ):
common_inputs["past_key_values"].append(
(
torch.zeros(__UpperCAmelCase ),
torch.zeros(__UpperCAmelCase ),
torch.zeros(__UpperCAmelCase ),
torch.zeros(__UpperCAmelCase ),
) )
# TODO: test this.
_a = encoder_shape if remaining_side_name == '''encoder''' else decoder_shape
for _ in range(__UpperCAmelCase , __UpperCAmelCase ):
common_inputs["past_key_values"].append((torch.zeros(__UpperCAmelCase ), torch.zeros(__UpperCAmelCase )) )
return common_inputs
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = -1 , __UpperCAmelCase = -1 , __UpperCAmelCase = False , __UpperCAmelCase = None , ) -> Mapping[str, Any]:
_a = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
_a , _a = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
_a = seqlen + 2
_a , _a = self.num_layers
_a , _a = self.num_attention_heads
_a = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
_a = common_inputs['''attention_mask'''].dtype
_a = torch.cat(
[common_inputs['''attention_mask'''], torch.ones(__UpperCAmelCase , __UpperCAmelCase , dtype=__UpperCAmelCase )] , dim=1 )
_a = [
(torch.zeros(__UpperCAmelCase ), torch.zeros(__UpperCAmelCase )) for _ in range(__UpperCAmelCase )
]
return common_inputs
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = -1 , __UpperCAmelCase = -1 , __UpperCAmelCase = False , __UpperCAmelCase = None , ) -> Mapping[str, Any]:
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
_a = compute_effective_axis_dimension(
__UpperCAmelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
_a = tokenizer.num_special_tokens_to_add(__UpperCAmelCase )
_a = compute_effective_axis_dimension(
__UpperCAmelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__UpperCAmelCase )
# Generate dummy inputs according to compute batch and sequence
_a = [''' '''.join([tokenizer.unk_token] ) * seq_length] * batch_size
_a = dict(tokenizer(__UpperCAmelCase , return_tensors=__UpperCAmelCase ) )
return common_inputs
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = -1 , __UpperCAmelCase = -1 , __UpperCAmelCase = False , __UpperCAmelCase = None , ) -> Mapping[str, Any]:
if self.task in ["default", "seq2seq-lm"]:
_a = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
__UpperCAmelCase , batch_size=__UpperCAmelCase , seq_length=__UpperCAmelCase , is_pair=__UpperCAmelCase , framework=__UpperCAmelCase )
elif self.task == "causal-lm":
_a = self._generate_dummy_inputs_for_causal_lm(
__UpperCAmelCase , batch_size=__UpperCAmelCase , seq_length=__UpperCAmelCase , is_pair=__UpperCAmelCase , framework=__UpperCAmelCase )
else:
_a = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__UpperCAmelCase , batch_size=__UpperCAmelCase , seq_length=__UpperCAmelCase , is_pair=__UpperCAmelCase , framework=__UpperCAmelCase )
return common_inputs
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Optional[Any]:
if self.task in ["default", "seq2seq-lm"]:
_a = super()._flatten_past_key_values_(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
else:
_a = super(__UpperCAmelCase , self )._flatten_past_key_values_(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
| 320
| 1
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A : List[str] = logging.get_logger(__name__)
__A : List[str] = {
'''microsoft/trocr-base-handwritten''': (
'''https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json'''
),
# See all TrOCR models at https://huggingface.co/models?filter=trocr
}
class lowerCamelCase ( _UpperCAmelCase ):
lowercase : List[str] = 'trocr'
lowercase : int = ['past_key_values']
lowercase : Tuple = {
'num_attention_heads': 'decoder_attention_heads',
'hidden_size': 'd_model',
'num_hidden_layers': 'decoder_layers',
}
def __init__( self , SCREAMING_SNAKE_CASE_=5_0265 , SCREAMING_SNAKE_CASE_=1024 , SCREAMING_SNAKE_CASE_=12 , SCREAMING_SNAKE_CASE_=16 , SCREAMING_SNAKE_CASE_=4096 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=512 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=0.02 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=1 , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_=2 , **SCREAMING_SNAKE_CASE_ , ):
UpperCamelCase : Tuple = vocab_size
UpperCamelCase : List[Any] = d_model
UpperCamelCase : Dict = decoder_layers
UpperCamelCase : List[Any] = decoder_attention_heads
UpperCamelCase : Dict = decoder_ffn_dim
UpperCamelCase : Dict = activation_function
UpperCamelCase : Optional[int] = max_position_embeddings
UpperCamelCase : str = dropout
UpperCamelCase : Tuple = attention_dropout
UpperCamelCase : Dict = activation_dropout
UpperCamelCase : str = init_std
UpperCamelCase : Optional[int] = decoder_layerdrop
UpperCamelCase : List[Any] = use_cache
UpperCamelCase : Union[str, Any] = scale_embedding
UpperCamelCase : List[Any] = use_learned_position_embeddings
UpperCamelCase : str = layernorm_embedding
super().__init__(
pad_token_id=SCREAMING_SNAKE_CASE_ , bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ , decoder_start_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
| 27
|
"""simple docstring"""
import math
from typing import Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
get_image_size,
is_torch_available,
is_torch_tensor,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_torch_available():
import torch
if is_vision_available():
import PIL
__A : Optional[Any] = logging.get_logger(__name__)
def A_ ( snake_case_ : np.ndarray ,snake_case_ : Union[int, Iterable[int]] ,snake_case_ : bool ,snake_case_ : int ):
'''simple docstring'''
def constraint_to_multiple_of(snake_case_ : Optional[Any] ,snake_case_ : Optional[int] ,snake_case_ : List[str]=0 ,snake_case_ : Optional[Any]=None ):
UpperCamelCase : List[str] = round(val / multiple ) * multiple
if max_val is not None and x > max_val:
UpperCamelCase : Optional[Any] = math.floor(val / multiple ) * multiple
if x < min_val:
UpperCamelCase : Dict = math.ceil(val / multiple ) * multiple
return x
UpperCamelCase : Any = (output_size, output_size) if isinstance(snake_case_ ,snake_case_ ) else output_size
UpperCamelCase , UpperCamelCase : int = get_image_size(snake_case_ )
UpperCamelCase , UpperCamelCase : Union[str, Any] = output_size
# determine new height and width
UpperCamelCase : List[str] = output_height / input_height
UpperCamelCase : List[str] = output_width / input_width
if keep_aspect_ratio:
# scale as little as possible
if abs(1 - scale_width ) < abs(1 - scale_height ):
# fit width
UpperCamelCase : int = scale_width
else:
# fit height
UpperCamelCase : Optional[Any] = scale_height
UpperCamelCase : int = constraint_to_multiple_of(scale_height * input_height ,multiple=snake_case_ )
UpperCamelCase : Union[str, Any] = constraint_to_multiple_of(scale_width * input_width ,multiple=snake_case_ )
return (new_height, new_width)
class lowerCamelCase ( _UpperCAmelCase ):
lowercase : str = ['pixel_values']
def __init__( self , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = PILImageResampling.BILINEAR , SCREAMING_SNAKE_CASE_ = False , SCREAMING_SNAKE_CASE_ = 1 , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = 1 / 255 , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , ):
super().__init__(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[Any] = size if size is not None else {"""height""": 384, """width""": 384}
UpperCamelCase : List[Any] = get_size_dict(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[Any] = do_resize
UpperCamelCase : Union[str, Any] = size
UpperCamelCase : Union[str, Any] = keep_aspect_ratio
UpperCamelCase : Any = ensure_multiple_of
UpperCamelCase : List[Any] = resample
UpperCamelCase : str = do_rescale
UpperCamelCase : Optional[Any] = rescale_factor
UpperCamelCase : List[str] = do_normalize
UpperCamelCase : str = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
UpperCamelCase : Union[str, Any] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def a_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = False , SCREAMING_SNAKE_CASE_ = 1 , SCREAMING_SNAKE_CASE_ = PILImageResampling.BICUBIC , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , ):
UpperCamelCase : Tuple = get_size_dict(SCREAMING_SNAKE_CASE_ )
if "height" not in size or "width" not in size:
raise ValueError(f'The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}' )
UpperCamelCase : Dict = get_resize_output_image_size(
SCREAMING_SNAKE_CASE_ , output_size=(size["""height"""], size["""width"""]) , keep_aspect_ratio=SCREAMING_SNAKE_CASE_ , multiple=SCREAMING_SNAKE_CASE_ , )
return resize(SCREAMING_SNAKE_CASE_ , size=SCREAMING_SNAKE_CASE_ , resample=SCREAMING_SNAKE_CASE_ , data_format=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def a_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , ):
return rescale(SCREAMING_SNAKE_CASE_ , scale=SCREAMING_SNAKE_CASE_ , data_format=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def a_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , ):
return normalize(SCREAMING_SNAKE_CASE_ , mean=SCREAMING_SNAKE_CASE_ , std=SCREAMING_SNAKE_CASE_ , data_format=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def a_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = ChannelDimension.FIRST , **SCREAMING_SNAKE_CASE_ , ):
UpperCamelCase : Optional[int] = do_resize if do_resize is not None else self.do_resize
UpperCamelCase : List[Any] = size if size is not None else self.size
UpperCamelCase : Dict = get_size_dict(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Any = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio
UpperCamelCase : Optional[int] = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of
UpperCamelCase : Tuple = resample if resample is not None else self.resample
UpperCamelCase : str = do_rescale if do_rescale is not None else self.do_rescale
UpperCamelCase : List[str] = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCamelCase : Any = do_normalize if do_normalize is not None else self.do_normalize
UpperCamelCase : Any = image_mean if image_mean is not None else self.image_mean
UpperCamelCase : List[Any] = image_std if image_std is not None else self.image_std
UpperCamelCase : str = make_list_of_images(SCREAMING_SNAKE_CASE_ )
if not valid_images(SCREAMING_SNAKE_CASE_ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
UpperCamelCase : Tuple = [to_numpy_array(SCREAMING_SNAKE_CASE_ ) for image in images]
if do_resize:
UpperCamelCase : Union[str, Any] = [self.resize(image=SCREAMING_SNAKE_CASE_ , size=SCREAMING_SNAKE_CASE_ , resample=SCREAMING_SNAKE_CASE_ ) for image in images]
if do_rescale:
UpperCamelCase : int = [self.rescale(image=SCREAMING_SNAKE_CASE_ , scale=SCREAMING_SNAKE_CASE_ ) for image in images]
if do_normalize:
UpperCamelCase : List[str] = [self.normalize(image=SCREAMING_SNAKE_CASE_ , mean=SCREAMING_SNAKE_CASE_ , std=SCREAMING_SNAKE_CASE_ ) for image in images]
UpperCamelCase : Any = [to_channel_dimension_format(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for image in images]
UpperCamelCase : Union[str, Any] = {"""pixel_values""": images}
return BatchFeature(data=SCREAMING_SNAKE_CASE_ , tensor_type=SCREAMING_SNAKE_CASE_ )
def a_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ):
UpperCamelCase : str = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(SCREAMING_SNAKE_CASE_ ) != len(SCREAMING_SNAKE_CASE_ ):
raise ValueError(
"""Make sure that you pass in as many target sizes as the batch dimension of the logits""" )
if is_torch_tensor(SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : List[Any] = target_sizes.numpy()
UpperCamelCase : Dict = []
for idx in range(len(SCREAMING_SNAKE_CASE_ ) ):
UpperCamelCase : List[Any] = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode="""bilinear""" , align_corners=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[Any] = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(SCREAMING_SNAKE_CASE_ )
else:
UpperCamelCase : List[Any] = logits.argmax(dim=1 )
UpperCamelCase : Dict = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 27
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A__: Any = {'''configuration_ibert''': ['''IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''IBertConfig''', '''IBertOnnxConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__: str = [
'''IBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''IBertForMaskedLM''',
'''IBertForMultipleChoice''',
'''IBertForQuestionAnswering''',
'''IBertForSequenceClassification''',
'''IBertForTokenClassification''',
'''IBertModel''',
'''IBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_ibert import IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, IBertConfig, IBertOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ibert import (
IBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
IBertForMaskedLM,
IBertForMultipleChoice,
IBertForQuestionAnswering,
IBertForSequenceClassification,
IBertForTokenClassification,
IBertModel,
IBertPreTrainedModel,
)
else:
import sys
A__: Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 276
|
'''simple docstring'''
import logging
import os
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
from filelock import FileLock
from transformers import PreTrainedTokenizer, is_tf_available, is_torch_available
A__: int = logging.getLogger(__name__)
@dataclass
class A__ :
__UpperCamelCase : str
__UpperCamelCase : List[str]
__UpperCamelCase : Optional[List[str]]
@dataclass
class A__ :
__UpperCamelCase : List[int]
__UpperCamelCase : List[int]
__UpperCamelCase : Optional[List[int]] = None
__UpperCamelCase : Optional[List[int]] = None
class A__ ( UpperCAmelCase__ ):
__UpperCamelCase : str = "train"
__UpperCamelCase : Tuple = "dev"
__UpperCamelCase : str = "test"
class A__ :
@staticmethod
def __UpperCAmelCase ( SCREAMING_SNAKE_CASE :Dict , SCREAMING_SNAKE_CASE :Union[Split, str] ) -> List[InputExample]:
'''simple docstring'''
raise NotImplementedError
@staticmethod
def __UpperCAmelCase ( SCREAMING_SNAKE_CASE :str ) -> List[str]:
'''simple docstring'''
raise NotImplementedError
@staticmethod
def __UpperCAmelCase ( SCREAMING_SNAKE_CASE :List[InputExample] , SCREAMING_SNAKE_CASE :List[str] , SCREAMING_SNAKE_CASE :int , SCREAMING_SNAKE_CASE :PreTrainedTokenizer , SCREAMING_SNAKE_CASE :str=False , SCREAMING_SNAKE_CASE :Optional[Any]="[CLS]" , SCREAMING_SNAKE_CASE :Optional[int]=1 , SCREAMING_SNAKE_CASE :Any="[SEP]" , SCREAMING_SNAKE_CASE :List[Any]=False , SCREAMING_SNAKE_CASE :Union[str, Any]=False , SCREAMING_SNAKE_CASE :List[str]=0 , SCREAMING_SNAKE_CASE :str=0 , SCREAMING_SNAKE_CASE :Dict=-1_0_0 , SCREAMING_SNAKE_CASE :Optional[int]=0 , SCREAMING_SNAKE_CASE :Tuple=True , ) -> List[InputFeatures]:
'''simple docstring'''
_a : str ={label: i for i, label in enumerate(SCREAMING_SNAKE_CASE )}
_a : Tuple =[]
for ex_index, example in enumerate(SCREAMING_SNAKE_CASE ):
if ex_index % 1_0_0_0_0 == 0:
logger.info("""Writing example %d of %d""" , SCREAMING_SNAKE_CASE , len(SCREAMING_SNAKE_CASE ) )
_a : Optional[Any] =[]
_a : List[Any] =[]
for word, label in zip(example.words , example.labels ):
_a : Optional[int] =tokenizer.tokenize(SCREAMING_SNAKE_CASE )
# bert-base-multilingual-cased sometimes output "nothing ([]) when calling tokenize with just a space.
if len(SCREAMING_SNAKE_CASE ) > 0:
tokens.extend(SCREAMING_SNAKE_CASE )
# Use the real label id for the first token of the word, and padding ids for the remaining tokens
label_ids.extend([label_map[label]] + [pad_token_label_id] * (len(SCREAMING_SNAKE_CASE ) - 1) )
# Account for [CLS] and [SEP] with "- 2" and with "- 3" for RoBERTa.
_a : Optional[int] =tokenizer.num_special_tokens_to_add()
if len(SCREAMING_SNAKE_CASE ) > max_seq_length - special_tokens_count:
_a : List[Any] =tokens[: (max_seq_length - special_tokens_count)]
_a : Tuple =label_ids[: (max_seq_length - special_tokens_count)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens += [sep_token]
label_ids += [pad_token_label_id]
if sep_token_extra:
# roberta uses an extra separator b/w pairs of sentences
tokens += [sep_token]
label_ids += [pad_token_label_id]
_a : Dict =[sequence_a_segment_id] * len(SCREAMING_SNAKE_CASE )
if cls_token_at_end:
tokens += [cls_token]
label_ids += [pad_token_label_id]
segment_ids += [cls_token_segment_id]
else:
_a : Any =[cls_token] + tokens
_a : Dict =[pad_token_label_id] + label_ids
_a : Union[str, Any] =[cls_token_segment_id] + segment_ids
_a : List[str] =tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE )
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
_a : Optional[int] =[1 if mask_padding_with_zero else 0] * len(SCREAMING_SNAKE_CASE )
# Zero-pad up to the sequence length.
_a : Union[str, Any] =max_seq_length - len(SCREAMING_SNAKE_CASE )
if pad_on_left:
_a : Optional[Any] =([pad_token] * padding_length) + input_ids
_a : Optional[int] =([0 if mask_padding_with_zero else 1] * padding_length) + input_mask
_a : Union[str, Any] =([pad_token_segment_id] * padding_length) + segment_ids
_a : Dict =([pad_token_label_id] * padding_length) + label_ids
else:
input_ids += [pad_token] * padding_length
input_mask += [0 if mask_padding_with_zero else 1] * padding_length
segment_ids += [pad_token_segment_id] * padding_length
label_ids += [pad_token_label_id] * padding_length
assert len(SCREAMING_SNAKE_CASE ) == max_seq_length
assert len(SCREAMING_SNAKE_CASE ) == max_seq_length
assert len(SCREAMING_SNAKE_CASE ) == max_seq_length
assert len(SCREAMING_SNAKE_CASE ) == max_seq_length
if ex_index < 5:
logger.info("""*** Example ***""" )
logger.info("""guid: %s""" , example.guid )
logger.info("""tokens: %s""" , """ """.join([str(SCREAMING_SNAKE_CASE ) for x in tokens] ) )
logger.info("""input_ids: %s""" , """ """.join([str(SCREAMING_SNAKE_CASE ) for x in input_ids] ) )
logger.info("""input_mask: %s""" , """ """.join([str(SCREAMING_SNAKE_CASE ) for x in input_mask] ) )
logger.info("""segment_ids: %s""" , """ """.join([str(SCREAMING_SNAKE_CASE ) for x in segment_ids] ) )
logger.info("""label_ids: %s""" , """ """.join([str(SCREAMING_SNAKE_CASE ) for x in label_ids] ) )
if "token_type_ids" not in tokenizer.model_input_names:
_a : Tuple =None
features.append(
InputFeatures(
input_ids=SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , token_type_ids=SCREAMING_SNAKE_CASE , label_ids=SCREAMING_SNAKE_CASE ) )
return features
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
class A__ ( UpperCAmelCase__ ):
__UpperCamelCase : List[InputFeatures]
__UpperCamelCase : int = nn.CrossEntropyLoss().ignore_index
def __init__( self :Dict , SCREAMING_SNAKE_CASE :TokenClassificationTask , SCREAMING_SNAKE_CASE :str , SCREAMING_SNAKE_CASE :PreTrainedTokenizer , SCREAMING_SNAKE_CASE :List[str] , SCREAMING_SNAKE_CASE :str , SCREAMING_SNAKE_CASE :Optional[int] = None , SCREAMING_SNAKE_CASE :int=False , SCREAMING_SNAKE_CASE :Split = Split.train , ) -> List[str]:
'''simple docstring'''
# Load data features from cache or dataset file
_a : Optional[Any] =os.path.join(
SCREAMING_SNAKE_CASE , """cached_{}_{}_{}""".format(mode.value , tokenizer.__class__.__name__ , str(SCREAMING_SNAKE_CASE ) ) , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
_a : List[str] =cached_features_file + """.lock"""
with FileLock(SCREAMING_SNAKE_CASE ):
if os.path.exists(SCREAMING_SNAKE_CASE ) and not overwrite_cache:
logger.info(f"Loading features from cached file {cached_features_file}" )
_a : Any =torch.load(SCREAMING_SNAKE_CASE )
else:
logger.info(f"Creating features from dataset file at {data_dir}" )
_a : Any =token_classification_task.read_examples_from_file(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# TODO clean up all this to leverage built-in features of tokenizers
_a : List[str] =token_classification_task.convert_examples_to_features(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , cls_token_at_end=bool(model_type in ["""xlnet"""] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ["""xlnet"""] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=SCREAMING_SNAKE_CASE , pad_on_left=bool(tokenizer.padding_side == """left""" ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info(f"Saving features into cached file {cached_features_file}" )
torch.save(self.features , SCREAMING_SNAKE_CASE )
def __len__( self :Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
return len(self.features )
def __getitem__( self :Dict , SCREAMING_SNAKE_CASE :int ) -> InputFeatures:
'''simple docstring'''
return self.features[i]
if is_tf_available():
import tensorflow as tf
class A__ :
__UpperCamelCase : List[InputFeatures]
__UpperCamelCase : int = -100
def __init__( self :str , SCREAMING_SNAKE_CASE :TokenClassificationTask , SCREAMING_SNAKE_CASE :str , SCREAMING_SNAKE_CASE :PreTrainedTokenizer , SCREAMING_SNAKE_CASE :List[str] , SCREAMING_SNAKE_CASE :str , SCREAMING_SNAKE_CASE :Optional[int] = None , SCREAMING_SNAKE_CASE :str=False , SCREAMING_SNAKE_CASE :Split = Split.train , ) -> Any:
'''simple docstring'''
_a : Tuple =token_classification_task.read_examples_from_file(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# TODO clean up all this to leverage built-in features of tokenizers
_a : List[Any] =token_classification_task.convert_examples_to_features(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , cls_token_at_end=bool(model_type in ["""xlnet"""] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ["""xlnet"""] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=SCREAMING_SNAKE_CASE , pad_on_left=bool(tokenizer.padding_side == """left""" ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
def gen():
for ex in self.features:
if ex.token_type_ids is None:
yield (
{"input_ids": ex.input_ids, "attention_mask": ex.attention_mask},
ex.label_ids,
)
else:
yield (
{
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label_ids,
)
if "token_type_ids" not in tokenizer.model_input_names:
_a : Union[str, Any] =tf.data.Dataset.from_generator(
SCREAMING_SNAKE_CASE , ({"""input_ids""": tf.intaa, """attention_mask""": tf.intaa}, tf.intaa) , (
{"""input_ids""": tf.TensorShape([None] ), """attention_mask""": tf.TensorShape([None] )},
tf.TensorShape([None] ),
) , )
else:
_a : Union[str, Any] =tf.data.Dataset.from_generator(
SCREAMING_SNAKE_CASE , ({"""input_ids""": tf.intaa, """attention_mask""": tf.intaa, """token_type_ids""": tf.intaa}, tf.intaa) , (
{
"""input_ids""": tf.TensorShape([None] ),
"""attention_mask""": tf.TensorShape([None] ),
"""token_type_ids""": tf.TensorShape([None] ),
},
tf.TensorShape([None] ),
) , )
def __UpperCAmelCase ( self :Tuple ) -> Any:
'''simple docstring'''
_a : List[Any] =self.dataset.apply(tf.data.experimental.assert_cardinality(len(self.features ) ) )
return self.dataset
def __len__( self :str ) -> Optional[int]:
'''simple docstring'''
return len(self.features )
def __getitem__( self :int , SCREAMING_SNAKE_CASE :str ) -> InputFeatures:
'''simple docstring'''
return self.features[i]
| 276
| 1
|
'''simple docstring'''
import torch
from diffusers import DPMSolverSDEScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import require_torchsde
from .test_schedulers import SchedulerCommonTest
@require_torchsde
class __UpperCamelCase ( lowercase__ ):
lowercase : Union[str, Any] = (DPMSolverSDEScheduler,)
lowercase : Dict = 1_0
def a__ ( self :Union[str, Any] ,**_UpperCamelCase :Optional[Any] ):
snake_case_ : Union[str, Any] = {
"""num_train_timesteps""": 1_1_0_0,
"""beta_start""": 0.00_01,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
"""noise_sampler_seed""": 0,
}
config.update(**_UpperCamelCase )
return config
def a__ ( self :int ):
for timesteps in [1_0, 5_0, 1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=_UpperCamelCase )
def a__ ( self :Tuple ):
for beta_start, beta_end in zip([0.0_00_01, 0.00_01, 0.0_01] ,[0.00_02, 0.0_02, 0.02] ):
self.check_over_configs(beta_start=_UpperCamelCase ,beta_end=_UpperCamelCase )
def a__ ( self :Optional[int] ):
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=_UpperCamelCase )
def a__ ( self :Optional[int] ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_UpperCamelCase )
def a__ ( self :Optional[Any] ):
snake_case_ : Any = self.scheduler_classes[0]
snake_case_ : Dict = self.get_scheduler_config()
snake_case_ : str = scheduler_class(**_UpperCamelCase )
scheduler.set_timesteps(self.num_inference_steps )
snake_case_ : Dict = self.dummy_model()
snake_case_ : Optional[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
snake_case_ : Any = sample.to(_UpperCamelCase )
for i, t in enumerate(scheduler.timesteps ):
snake_case_ : List[Any] = scheduler.scale_model_input(_UpperCamelCase ,_UpperCamelCase )
snake_case_ : Tuple = model(_UpperCamelCase ,_UpperCamelCase )
snake_case_ : List[Any] = scheduler.step(_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase )
snake_case_ : Any = output.prev_sample
snake_case_ : Tuple = torch.sum(torch.abs(_UpperCamelCase ) )
snake_case_ : List[str] = torch.mean(torch.abs(_UpperCamelCase ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 1_67.47_82_10_44_92_18_75 ) < 1E-2
assert abs(result_mean.item() - 0.21_78_70_59_64_56_52_77 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 1_71.59_35_21_11_81_64_06 ) < 1E-2
assert abs(result_mean.item() - 0.2_23_42_90_68_92_29_96_52 ) < 1E-3
else:
assert abs(result_sum.item() - 1_62.52_38_34_22_85_15_62 ) < 1E-2
assert abs(result_mean.item() - 0.2_11_61_95_70_85_13_26 ) < 1E-3
def a__ ( self :str ):
snake_case_ : str = self.scheduler_classes[0]
snake_case_ : Any = self.get_scheduler_config(prediction_type="""v_prediction""" )
snake_case_ : Tuple = scheduler_class(**_UpperCamelCase )
scheduler.set_timesteps(self.num_inference_steps )
snake_case_ : Optional[int] = self.dummy_model()
snake_case_ : Dict = self.dummy_sample_deter * scheduler.init_noise_sigma
snake_case_ : int = sample.to(_UpperCamelCase )
for i, t in enumerate(scheduler.timesteps ):
snake_case_ : List[str] = scheduler.scale_model_input(_UpperCamelCase ,_UpperCamelCase )
snake_case_ : int = model(_UpperCamelCase ,_UpperCamelCase )
snake_case_ : Tuple = scheduler.step(_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase )
snake_case_ : str = output.prev_sample
snake_case_ : Dict = torch.sum(torch.abs(_UpperCamelCase ) )
snake_case_ : Optional[Any] = torch.mean(torch.abs(_UpperCamelCase ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 1_24.77_14_92_00_43_94_53 ) < 1E-2
assert abs(result_mean.item() - 0.1_62_26_28_90_14_81_62_84 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 1_28.1_66_33_60_59_57_03 ) < 1E-2
assert abs(result_mean.item() - 0.1_66_88_32_60_01_16_72_97 ) < 1E-3
else:
assert abs(result_sum.item() - 1_19.8_48_75_48_82_81_25 ) < 1E-2
assert abs(result_mean.item() - 0.15_60_53_06_62_53_66_21 ) < 1E-3
def a__ ( self :Any ):
snake_case_ : Tuple = self.scheduler_classes[0]
snake_case_ : List[str] = self.get_scheduler_config()
snake_case_ : Optional[Any] = scheduler_class(**_UpperCamelCase )
scheduler.set_timesteps(self.num_inference_steps ,device=_UpperCamelCase )
snake_case_ : Optional[Any] = self.dummy_model()
snake_case_ : Optional[int] = self.dummy_sample_deter.to(_UpperCamelCase ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
snake_case_ : Dict = scheduler.scale_model_input(_UpperCamelCase ,_UpperCamelCase )
snake_case_ : str = model(_UpperCamelCase ,_UpperCamelCase )
snake_case_ : List[str] = scheduler.step(_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase )
snake_case_ : List[str] = output.prev_sample
snake_case_ : Dict = torch.sum(torch.abs(_UpperCamelCase ) )
snake_case_ : str = torch.mean(torch.abs(_UpperCamelCase ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 1_67.46_95_73_97_46_09_38 ) < 1E-2
assert abs(result_mean.item() - 0.2_18_05_93_46_07_98_26_35 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 1_71.59_35_36_37_69_53_12 ) < 1E-2
assert abs(result_mean.item() - 0.2_23_42_90_83_82_41_57_71 ) < 1E-3
else:
assert abs(result_sum.item() - 1_62.52_38_34_22_85_15_62 ) < 1E-2
assert abs(result_mean.item() - 0.2_11_61_95_70_85_13_26 ) < 1E-3
def a__ ( self :Optional[Any] ):
snake_case_ : Dict = self.scheduler_classes[0]
snake_case_ : Any = self.get_scheduler_config()
snake_case_ : List[str] = scheduler_class(**_UpperCamelCase ,use_karras_sigmas=_UpperCamelCase )
scheduler.set_timesteps(self.num_inference_steps ,device=_UpperCamelCase )
snake_case_ : Dict = self.dummy_model()
snake_case_ : Tuple = self.dummy_sample_deter.to(_UpperCamelCase ) * scheduler.init_noise_sigma
snake_case_ : List[Any] = sample.to(_UpperCamelCase )
for t in scheduler.timesteps:
snake_case_ : Any = scheduler.scale_model_input(_UpperCamelCase ,_UpperCamelCase )
snake_case_ : int = model(_UpperCamelCase ,_UpperCamelCase )
snake_case_ : List[Any] = scheduler.step(_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase )
snake_case_ : Any = output.prev_sample
snake_case_ : Optional[Any] = torch.sum(torch.abs(_UpperCamelCase ) )
snake_case_ : Dict = torch.mean(torch.abs(_UpperCamelCase ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 1_76.66_97_41_35_74_21_88 ) < 1E-2
assert abs(result_mean.item() - 0.2_30_03_87_27_30_98_18_11 ) < 1E-2
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 1_77.63_65_35_64_45_31_25 ) < 1E-2
assert abs(result_mean.item() - 0.2_30_03_87_27_30_98_18_11 ) < 1E-2
else:
assert abs(result_sum.item() - 1_70.3_13_52_23_38_86_72 ) < 1E-2
assert abs(result_mean.item() - 0.2_30_03_87_27_30_98_18_11 ) < 1E-2
| 8
|
'''simple docstring'''
import argparse
import json
import logging
import os
import shutil
import sys
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.utils import write_basic_config
from transformers.testing_utils import TestCasePlus, get_gpu_count, run_command, slow, torch_device
from transformers.utils import is_apex_available
logging.basicConfig(level=logging.DEBUG)
__A : int = logging.getLogger()
def UpperCAmelCase ( ):
'''simple docstring'''
snake_case_ : List[Any] = argparse.ArgumentParser()
parser.add_argument("""-f""" )
snake_case_ : int = parser.parse_args()
return args.f
def UpperCAmelCase ( lowerCamelCase_ :str ):
'''simple docstring'''
snake_case_ : Optional[Any] = {}
snake_case_ : Optional[Any] = os.path.join(lowerCamelCase_ , """all_results.json""" )
if os.path.exists(lowerCamelCase_ ):
with open(lowerCamelCase_ , """r""" ) as f:
snake_case_ : str = json.load(lowerCamelCase_ )
else:
raise ValueError(F'''can\'t find {path}''' )
return results
def UpperCAmelCase ( ):
'''simple docstring'''
snake_case_ : List[str] = torch.cuda.is_available() and torch_device == """cuda"""
return is_using_cuda and is_apex_available()
__A : Any = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class __UpperCamelCase ( lowercase__ ):
@classmethod
def a__ ( cls :Dict ):
# Write Accelerate config, will pick up on CPU, GPU, and multi-GPU
snake_case_ : Optional[int] = tempfile.mkdtemp()
snake_case_ : Any = os.path.join(cls.tmpdir ,"""default_config.yml""" )
write_basic_config(save_location=cls.configPath )
snake_case_ : List[Any] = ["""accelerate""", """launch""", """--config_file""", cls.configPath]
@classmethod
def a__ ( cls :int ):
shutil.rmtree(cls.tmpdir )
@mock.patch.dict(os.environ ,{"""WANDB_MODE""": """offline"""} )
def a__ ( self :Optional[int] ):
snake_case_ : List[Any] = self.get_auto_remove_tmp_dir()
snake_case_ : List[str] = F'''
{self.examples_dir}/pytorch/text-classification/run_glue_no_trainer.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--seed=42
--checkpointing_steps epoch
--with_tracking
'''.split()
if is_cuda_and_apex_available():
testargs.append("""--fp16""" )
run_command(self._launch_args + testargs )
snake_case_ : Dict = get_results(_UpperCamelCase )
self.assertGreaterEqual(result["""eval_accuracy"""] ,0.75 )
self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase ,"""epoch_0""" ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase ,"""glue_no_trainer""" ) ) )
@mock.patch.dict(os.environ ,{"""WANDB_MODE""": """offline"""} )
def a__ ( self :Tuple ):
snake_case_ : str = self.get_auto_remove_tmp_dir()
snake_case_ : Tuple = F'''
{self.examples_dir}/pytorch/language-modeling/run_clm_no_trainer.py
--model_name_or_path distilgpt2
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--block_size 128
--per_device_train_batch_size 5
--per_device_eval_batch_size 5
--num_train_epochs 2
--output_dir {tmp_dir}
--checkpointing_steps epoch
--with_tracking
'''.split()
if torch.cuda.device_count() > 1:
# Skipping because there are not enough batches to train the model + would need a drop_last to work.
return
run_command(self._launch_args + testargs )
snake_case_ : Optional[int] = get_results(_UpperCamelCase )
self.assertLess(result["""perplexity"""] ,1_0_0 )
self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase ,"""epoch_0""" ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase ,"""clm_no_trainer""" ) ) )
@mock.patch.dict(os.environ ,{"""WANDB_MODE""": """offline"""} )
def a__ ( self :Tuple ):
snake_case_ : List[Any] = self.get_auto_remove_tmp_dir()
snake_case_ : List[str] = F'''
{self.examples_dir}/pytorch/language-modeling/run_mlm_no_trainer.py
--model_name_or_path distilroberta-base
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--output_dir {tmp_dir}
--num_train_epochs=1
--checkpointing_steps epoch
--with_tracking
'''.split()
run_command(self._launch_args + testargs )
snake_case_ : str = get_results(_UpperCamelCase )
self.assertLess(result["""perplexity"""] ,4_2 )
self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase ,"""epoch_0""" ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase ,"""mlm_no_trainer""" ) ) )
@mock.patch.dict(os.environ ,{"""WANDB_MODE""": """offline"""} )
def a__ ( self :List[Any] ):
# with so little data distributed training needs more epochs to get the score on par with 0/1 gpu
snake_case_ : Dict = 7 if get_gpu_count() > 1 else 2
snake_case_ : str = self.get_auto_remove_tmp_dir()
snake_case_ : str = F'''
{self.examples_dir}/pytorch/token-classification/run_ner_no_trainer.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/conll/sample.json
--validation_file tests/fixtures/tests_samples/conll/sample.json
--output_dir {tmp_dir}
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=2
--num_train_epochs={epochs}
--seed 7
--checkpointing_steps epoch
--with_tracking
'''.split()
run_command(self._launch_args + testargs )
snake_case_ : Optional[int] = get_results(_UpperCamelCase )
self.assertGreaterEqual(result["""eval_accuracy"""] ,0.75 )
self.assertLess(result["""train_loss"""] ,0.5 )
self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase ,"""epoch_0""" ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase ,"""ner_no_trainer""" ) ) )
@unittest.skip(reason="""Fix me @muellerzr""" )
@mock.patch.dict(os.environ ,{"""WANDB_MODE""": """offline"""} )
def a__ ( self :List[str] ):
snake_case_ : List[Any] = self.get_auto_remove_tmp_dir()
snake_case_ : Optional[int] = F'''
{self.examples_dir}/pytorch/question-answering/run_qa_no_trainer.py
--model_name_or_path bert-base-uncased
--version_2_with_negative
--train_file tests/fixtures/tests_samples/SQUAD/sample.json
--validation_file tests/fixtures/tests_samples/SQUAD/sample.json
--output_dir {tmp_dir}
--seed=42
--max_train_steps=10
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
--with_tracking
'''.split()
run_command(self._launch_args + testargs )
snake_case_ : str = get_results(_UpperCamelCase )
# Because we use --version_2_with_negative the testing script uses SQuAD v2 metrics.
self.assertGreaterEqual(result["""eval_f1"""] ,2_8 )
self.assertGreaterEqual(result["""eval_exact"""] ,2_8 )
self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase ,"""epoch_0""" ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase ,"""qa_no_trainer""" ) ) )
@mock.patch.dict(os.environ ,{"""WANDB_MODE""": """offline"""} )
def a__ ( self :List[Any] ):
snake_case_ : str = self.get_auto_remove_tmp_dir()
snake_case_ : Union[str, Any] = F'''
{self.examples_dir}/pytorch/multiple-choice/run_swag_no_trainer.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/swag/sample.json
--validation_file tests/fixtures/tests_samples/swag/sample.json
--output_dir {tmp_dir}
--max_train_steps=20
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--with_tracking
'''.split()
run_command(self._launch_args + testargs )
snake_case_ : Union[str, Any] = get_results(_UpperCamelCase )
self.assertGreaterEqual(result["""eval_accuracy"""] ,0.8 )
self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase ,"""swag_no_trainer""" ) ) )
@slow
@mock.patch.dict(os.environ ,{"""WANDB_MODE""": """offline"""} )
def a__ ( self :int ):
snake_case_ : List[Any] = self.get_auto_remove_tmp_dir()
snake_case_ : List[Any] = F'''
{self.examples_dir}/pytorch/summarization/run_summarization_no_trainer.py
--model_name_or_path t5-small
--train_file tests/fixtures/tests_samples/xsum/sample.json
--validation_file tests/fixtures/tests_samples/xsum/sample.json
--output_dir {tmp_dir}
--max_train_steps=50
--num_warmup_steps=8
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
--with_tracking
'''.split()
run_command(self._launch_args + testargs )
snake_case_ : int = get_results(_UpperCamelCase )
self.assertGreaterEqual(result["""eval_rouge1"""] ,1_0 )
self.assertGreaterEqual(result["""eval_rouge2"""] ,2 )
self.assertGreaterEqual(result["""eval_rougeL"""] ,7 )
self.assertGreaterEqual(result["""eval_rougeLsum"""] ,7 )
self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase ,"""epoch_0""" ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase ,"""summarization_no_trainer""" ) ) )
@slow
@mock.patch.dict(os.environ ,{"""WANDB_MODE""": """offline"""} )
def a__ ( self :int ):
snake_case_ : Tuple = self.get_auto_remove_tmp_dir()
snake_case_ : Optional[Any] = F'''
{self.examples_dir}/pytorch/translation/run_translation_no_trainer.py
--model_name_or_path sshleifer/student_marian_en_ro_6_1
--source_lang en
--target_lang ro
--train_file tests/fixtures/tests_samples/wmt16/sample.json
--validation_file tests/fixtures/tests_samples/wmt16/sample.json
--output_dir {tmp_dir}
--max_train_steps=50
--num_warmup_steps=8
--num_beams=6
--learning_rate=3e-3
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--source_lang en_XX
--target_lang ro_RO
--checkpointing_steps epoch
--with_tracking
'''.split()
run_command(self._launch_args + testargs )
snake_case_ : Any = get_results(_UpperCamelCase )
self.assertGreaterEqual(result["""eval_bleu"""] ,3_0 )
self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase ,"""epoch_0""" ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase ,"""translation_no_trainer""" ) ) )
@slow
def a__ ( self :Optional[Any] ):
snake_case_ : List[str] = logging.StreamHandler(sys.stdout )
logger.addHandler(_UpperCamelCase )
snake_case_ : Dict = self.get_auto_remove_tmp_dir()
snake_case_ : Tuple = F'''
{self.examples_dir}/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py
--dataset_name huggingface/semantic-segmentation-test-sample
--output_dir {tmp_dir}
--max_train_steps=10
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
'''.split()
run_command(self._launch_args + testargs )
snake_case_ : str = get_results(_UpperCamelCase )
self.assertGreaterEqual(result["""eval_overall_accuracy"""] ,0.10 )
@mock.patch.dict(os.environ ,{"""WANDB_MODE""": """offline"""} )
def a__ ( self :Any ):
snake_case_ : Dict = self.get_auto_remove_tmp_dir()
snake_case_ : Tuple = F'''
{self.examples_dir}/pytorch/image-classification/run_image_classification_no_trainer.py
--model_name_or_path google/vit-base-patch16-224-in21k
--dataset_name hf-internal-testing/cats_vs_dogs_sample
--learning_rate 1e-4
--per_device_train_batch_size 2
--per_device_eval_batch_size 1
--max_train_steps 2
--train_val_split 0.1
--seed 42
--output_dir {tmp_dir}
--with_tracking
--checkpointing_steps 1
'''.split()
if is_cuda_and_apex_available():
testargs.append("""--fp16""" )
run_command(self._launch_args + testargs )
snake_case_ : str = get_results(_UpperCamelCase )
# The base model scores a 25%
self.assertGreaterEqual(result["""eval_accuracy"""] ,0.6 )
self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase ,"""step_1""" ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase ,"""image_classification_no_trainer""" ) ) )
| 8
| 1
|
from maths.prime_factors import prime_factors
def _a ( UpperCAmelCase ) -> int:
"""simple docstring"""
if not isinstance(UpperCAmelCase , UpperCAmelCase ):
lowerCamelCase__ : Tuple = f"Input value of [number={number}] must be an integer"
raise TypeError(UpperCAmelCase )
if number < 1:
raise ValueError('''Input must be a positive integer''' )
return -1 if len(prime_factors(UpperCAmelCase ) ) % 2 else 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 142
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_A : Any = {
'configuration_convbert': ['CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ConvBertConfig', 'ConvBertOnnxConfig'],
'tokenization_convbert': ['ConvBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : Dict = ['ConvBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : List[str] = [
'CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'ConvBertForMaskedLM',
'ConvBertForMultipleChoice',
'ConvBertForQuestionAnswering',
'ConvBertForSequenceClassification',
'ConvBertForTokenClassification',
'ConvBertLayer',
'ConvBertModel',
'ConvBertPreTrainedModel',
'load_tf_weights_in_convbert',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : Optional[int] = [
'TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFConvBertForMaskedLM',
'TFConvBertForMultipleChoice',
'TFConvBertForQuestionAnswering',
'TFConvBertForSequenceClassification',
'TFConvBertForTokenClassification',
'TFConvBertLayer',
'TFConvBertModel',
'TFConvBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig
from .tokenization_convbert import ConvBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_convbert_fast import ConvBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convbert import (
CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvBertForMaskedLM,
ConvBertForMultipleChoice,
ConvBertForQuestionAnswering,
ConvBertForSequenceClassification,
ConvBertForTokenClassification,
ConvBertLayer,
ConvBertModel,
ConvBertPreTrainedModel,
load_tf_weights_in_convbert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convbert import (
TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertLayer,
TFConvBertModel,
TFConvBertPreTrainedModel,
)
else:
import sys
_A : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 142
| 1
|
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import TensorType, logging
if TYPE_CHECKING:
from ...onnx.config import PatchingSpec
from ...tokenization_utils_base import PreTrainedTokenizerBase
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
'''allenai/longformer-base-4096''': '''https://huggingface.co/allenai/longformer-base-4096/resolve/main/config.json''',
'''allenai/longformer-large-4096''': '''https://huggingface.co/allenai/longformer-large-4096/resolve/main/config.json''',
'''allenai/longformer-large-4096-finetuned-triviaqa''': (
'''https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/config.json'''
),
'''allenai/longformer-base-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/config.json'''
),
'''allenai/longformer-large-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/config.json'''
),
}
class _snake_case ( __snake_case ):
'''simple docstring'''
A__ : List[Any] = "longformer"
def __init__( self: Optional[int] ,lowerCamelCase_: Union[List[int], int] = 512 ,lowerCamelCase_: int = 2 ,lowerCamelCase_: int = 1 ,lowerCamelCase_: int = 0 ,lowerCamelCase_: int = 2 ,lowerCamelCase_: int = 30522 ,lowerCamelCase_: int = 768 ,lowerCamelCase_: int = 12 ,lowerCamelCase_: int = 12 ,lowerCamelCase_: int = 3072 ,lowerCamelCase_: str = "gelu" ,lowerCamelCase_: float = 0.1 ,lowerCamelCase_: float = 0.1 ,lowerCamelCase_: int = 512 ,lowerCamelCase_: int = 2 ,lowerCamelCase_: float = 0.0_2 ,lowerCamelCase_: float = 1e-12 ,lowerCamelCase_: bool = False ,**lowerCamelCase_: int ,) -> str:
super().__init__(pad_token_id=lowerCamelCase_ ,**lowerCamelCase_ )
UpperCAmelCase_ : Optional[Any] = attention_window
UpperCAmelCase_ : Optional[Any] = sep_token_id
UpperCAmelCase_ : int = bos_token_id
UpperCAmelCase_ : int = eos_token_id
UpperCAmelCase_ : Union[str, Any] = vocab_size
UpperCAmelCase_ : str = hidden_size
UpperCAmelCase_ : Optional[int] = num_hidden_layers
UpperCAmelCase_ : List[str] = num_attention_heads
UpperCAmelCase_ : List[str] = hidden_act
UpperCAmelCase_ : Tuple = intermediate_size
UpperCAmelCase_ : Union[str, Any] = hidden_dropout_prob
UpperCAmelCase_ : Optional[Any] = attention_probs_dropout_prob
UpperCAmelCase_ : int = max_position_embeddings
UpperCAmelCase_ : Union[str, Any] = type_vocab_size
UpperCAmelCase_ : str = initializer_range
UpperCAmelCase_ : Union[str, Any] = layer_norm_eps
UpperCAmelCase_ : List[str] = onnx_export
class _snake_case ( __snake_case ):
'''simple docstring'''
def __init__( self: Dict ,lowerCamelCase_: "PretrainedConfig" ,lowerCamelCase_: str = "default" ,lowerCamelCase_: "List[PatchingSpec]" = None ) -> Any:
super().__init__(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ )
UpperCAmelCase_ : Any = True
@property
def A__ ( self: Optional[Any] ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
UpperCAmelCase_ : List[str] = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
UpperCAmelCase_ : str = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""global_attention_mask""", dynamic_axis),
] )
@property
def A__ ( self: Any ) -> Mapping[str, Mapping[int, str]]:
UpperCAmelCase_ : Tuple = super().outputs
if self.task == "default":
UpperCAmelCase_ : int = {0: """batch"""}
return outputs
@property
def A__ ( self: List[str] ) -> float:
return 1e-4
@property
def A__ ( self: Any ) -> int:
# needs to be >= 14 to support tril operator
return max(super().default_onnx_opset ,14 )
def A__ ( self: List[str] ,lowerCamelCase_: "PreTrainedTokenizerBase" ,lowerCamelCase_: int = -1 ,lowerCamelCase_: int = -1 ,lowerCamelCase_: bool = False ,lowerCamelCase_: Optional[TensorType] = None ,) -> Mapping[str, Any]:
UpperCAmelCase_ : List[Any] = super().generate_dummy_inputs(
preprocessor=lowerCamelCase_ ,batch_size=lowerCamelCase_ ,seq_length=lowerCamelCase_ ,is_pair=lowerCamelCase_ ,framework=lowerCamelCase_ )
import torch
# for some reason, replacing this code by inputs["global_attention_mask"] = torch.randint(2, inputs["input_ids"].shape, dtype=torch.int64)
# makes the export fail randomly
UpperCAmelCase_ : str = torch.zeros_like(inputs["""input_ids"""] )
# make every second token global
UpperCAmelCase_ : Dict = 1
return inputs
| 368
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
'''junnyu/roformer_chinese_small''': '''https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/config.json''',
'''junnyu/roformer_chinese_base''': '''https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/config.json''',
'''junnyu/roformer_chinese_char_small''': (
'''https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/config.json'''
),
'''junnyu/roformer_chinese_char_base''': (
'''https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/config.json'''
),
'''junnyu/roformer_small_discriminator''': (
'''https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/config.json'''
),
'''junnyu/roformer_small_generator''': (
'''https://huggingface.co/junnyu/roformer_small_generator/resolve/main/config.json'''
),
# See all RoFormer models at https://huggingface.co/models?filter=roformer
}
class _snake_case ( __snake_case ):
'''simple docstring'''
A__ : Optional[int] = "roformer"
def __init__( self: Optional[int] ,lowerCamelCase_: Tuple=50000 ,lowerCamelCase_: Optional[int]=None ,lowerCamelCase_: List[Any]=768 ,lowerCamelCase_: List[Any]=12 ,lowerCamelCase_: Optional[int]=12 ,lowerCamelCase_: Optional[Any]=3072 ,lowerCamelCase_: int="gelu" ,lowerCamelCase_: str=0.1 ,lowerCamelCase_: Union[str, Any]=0.1 ,lowerCamelCase_: Any=1536 ,lowerCamelCase_: str=2 ,lowerCamelCase_: Optional[int]=0.0_2 ,lowerCamelCase_: int=1e-12 ,lowerCamelCase_: Optional[int]=0 ,lowerCamelCase_: Any=False ,lowerCamelCase_: Union[str, Any]=True ,**lowerCamelCase_: List[str] ,) -> Tuple:
super().__init__(pad_token_id=lowerCamelCase_ ,**lowerCamelCase_ )
UpperCAmelCase_ : Tuple = vocab_size
UpperCAmelCase_ : Optional[int] = hidden_size if embedding_size is None else embedding_size
UpperCAmelCase_ : Optional[Any] = hidden_size
UpperCAmelCase_ : Optional[int] = num_hidden_layers
UpperCAmelCase_ : Any = num_attention_heads
UpperCAmelCase_ : Optional[Any] = hidden_act
UpperCAmelCase_ : Any = intermediate_size
UpperCAmelCase_ : Union[str, Any] = hidden_dropout_prob
UpperCAmelCase_ : Optional[int] = attention_probs_dropout_prob
UpperCAmelCase_ : Tuple = max_position_embeddings
UpperCAmelCase_ : Optional[Any] = type_vocab_size
UpperCAmelCase_ : List[str] = initializer_range
UpperCAmelCase_ : Optional[int] = layer_norm_eps
UpperCAmelCase_ : Optional[Any] = rotary_value
UpperCAmelCase_ : str = use_cache
class _snake_case ( __snake_case ):
'''simple docstring'''
@property
def A__ ( self: Tuple ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
UpperCAmelCase_ : Tuple = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
UpperCAmelCase_ : Optional[Any] = {0: """batch""", 1: """sequence"""}
UpperCAmelCase_ : Any = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""token_type_ids""", dynamic_axis),
] )
| 59
| 0
|
"""simple docstring"""
import argparse
import glob
import logging
import os
import time
from argparse import Namespace
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from torch.utils.data import DataLoader, TensorDataset
from transformers import glue_compute_metrics as compute_metrics
from transformers import glue_convert_examples_to_features as convert_examples_to_features
from transformers import glue_output_modes, glue_tasks_num_labels
from transformers import glue_processors as processors
__UpperCamelCase : str = logging.getLogger(__name__)
class SCREAMING_SNAKE_CASE ( a_ ):
"""simple docstring"""
lowercase__ = "sequence-classification"
def __init__( self : List[Any] ,lowercase_ : List[str] ):
if type(lowercase_ ) == dict:
lowerCAmelCase__ : int = Namespace(**lowercase_ )
lowerCAmelCase__ : int = glue_output_modes[hparams.task]
lowerCAmelCase__ : List[Any] = glue_tasks_num_labels[hparams.task]
super().__init__(lowercase_ ,lowercase_ ,self.mode )
def __lowerCAmelCase ( self : Dict ,**lowercase_ : Tuple ):
return self.model(**lowercase_ )
def __lowerCAmelCase ( self : Optional[int] ,lowercase_ : Tuple ,lowercase_ : Dict ):
lowerCAmelCase__ : List[Any] = {'''input_ids''': batch[0], '''attention_mask''': batch[1], '''labels''': batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
lowerCAmelCase__ : Optional[Any] = batch[2] if self.config.model_type in ['''bert''', '''xlnet''', '''albert'''] else None
lowerCAmelCase__ : Optional[int] = self(**lowercase_ )
lowerCAmelCase__ : Union[str, Any] = outputs[0]
lowerCAmelCase__ : Union[str, Any] = self.trainer.lr_schedulers[0]['''scheduler''']
lowerCAmelCase__ : Union[str, Any] = {'''loss''': loss, '''rate''': lr_scheduler.get_last_lr()[-1]}
return {"loss": loss, "log": tensorboard_logs}
def __lowerCAmelCase ( self : List[str] ):
lowerCAmelCase__ : Tuple = self.hparams
lowerCAmelCase__ : int = processors[args.task]()
lowerCAmelCase__ : int = processor.get_labels()
for mode in ["train", "dev"]:
lowerCAmelCase__ : int = self._feature_file(lowercase_ )
if os.path.exists(lowercase_ ) and not args.overwrite_cache:
logger.info('''Loading features from cached file %s''' ,lowercase_ )
else:
logger.info('''Creating features from dataset file at %s''' ,args.data_dir )
lowerCAmelCase__ : int = (
processor.get_dev_examples(args.data_dir )
if mode == '''dev'''
else processor.get_train_examples(args.data_dir )
)
lowerCAmelCase__ : Optional[int] = convert_examples_to_features(
lowercase_ ,self.tokenizer ,max_length=args.max_seq_length ,label_list=self.labels ,output_mode=args.glue_output_mode ,)
logger.info('''Saving features into cached file %s''' ,lowercase_ )
torch.save(lowercase_ ,lowercase_ )
def __lowerCAmelCase ( self : Optional[int] ,lowercase_ : str ,lowercase_ : int ,lowercase_ : bool = False ):
lowerCAmelCase__ : Tuple = '''dev''' if mode == '''test''' else mode
lowerCAmelCase__ : Tuple = self._feature_file(lowercase_ )
logger.info('''Loading features from cached file %s''' ,lowercase_ )
lowerCAmelCase__ : Union[str, Any] = torch.load(lowercase_ )
lowerCAmelCase__ : List[Any] = torch.tensor([f.input_ids for f in features] ,dtype=torch.long )
lowerCAmelCase__ : Dict = torch.tensor([f.attention_mask for f in features] ,dtype=torch.long )
lowerCAmelCase__ : Optional[int] = torch.tensor([f.token_type_ids for f in features] ,dtype=torch.long )
if self.hparams.glue_output_mode == "classification":
lowerCAmelCase__ : Optional[Any] = torch.tensor([f.label for f in features] ,dtype=torch.long )
elif self.hparams.glue_output_mode == "regression":
lowerCAmelCase__ : List[Any] = torch.tensor([f.label for f in features] ,dtype=torch.float )
return DataLoader(
TensorDataset(lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ) ,batch_size=lowercase_ ,shuffle=lowercase_ ,)
def __lowerCAmelCase ( self : Optional[int] ,lowercase_ : str ,lowercase_ : Any ):
lowerCAmelCase__ : Optional[int] = {'''input_ids''': batch[0], '''attention_mask''': batch[1], '''labels''': batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
lowerCAmelCase__ : int = batch[2] if self.config.model_type in ['''bert''', '''xlnet''', '''albert'''] else None
lowerCAmelCase__ : int = self(**lowercase_ )
lowerCAmelCase__ ,lowerCAmelCase__ : str = outputs[:2]
lowerCAmelCase__ : Optional[Any] = logits.detach().cpu().numpy()
lowerCAmelCase__ : Union[str, Any] = inputs['''labels'''].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def __lowerCAmelCase ( self : int ,lowercase_ : str ):
lowerCAmelCase__ : int = torch.stack([x['''val_loss'''] for x in outputs] ).mean().detach().cpu().item()
lowerCAmelCase__ : Any = np.concatenate([x['''pred'''] for x in outputs] ,axis=0 )
if self.hparams.glue_output_mode == "classification":
lowerCAmelCase__ : Any = np.argmax(lowercase_ ,axis=1 )
elif self.hparams.glue_output_mode == "regression":
lowerCAmelCase__ : Optional[Any] = np.squeeze(lowercase_ )
lowerCAmelCase__ : Union[str, Any] = np.concatenate([x['''target'''] for x in outputs] ,axis=0 )
lowerCAmelCase__ : Tuple = [[] for _ in range(out_label_ids.shape[0] )]
lowerCAmelCase__ : str = [[] for _ in range(out_label_ids.shape[0] )]
lowerCAmelCase__ : Any = {**{'''val_loss''': val_loss_mean}, **compute_metrics(self.hparams.task ,lowercase_ ,lowercase_ )}
lowerCAmelCase__ : Optional[int] = dict(results.items() )
lowerCAmelCase__ : Any = results
return ret, preds_list, out_label_list
def __lowerCAmelCase ( self : Union[str, Any] ,lowercase_ : list ):
lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ : List[str] = self._eval_end(lowercase_ )
lowerCAmelCase__ : str = ret['''log''']
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def __lowerCAmelCase ( self : int ,lowercase_ : Tuple ):
lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ : List[str] = self._eval_end(lowercase_ )
lowerCAmelCase__ : List[str] = ret['''log''']
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def __lowerCAmelCase ( lowercase_ : Dict ,lowercase_ : int ):
BaseTransformer.add_model_specific_args(lowercase_ ,lowercase_ )
parser.add_argument(
'''--max_seq_length''' ,default=1_2_8 ,type=lowercase_ ,help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) ,)
parser.add_argument(
'''--task''' ,default='''''' ,type=lowercase_ ,required=lowercase_ ,help='''The GLUE task to run''' ,)
parser.add_argument(
'''--gpus''' ,default=0 ,type=lowercase_ ,help='''The number of GPUs allocated for this, it is by default 0 meaning none''' ,)
parser.add_argument(
'''--overwrite_cache''' ,action='''store_true''' ,help='''Overwrite the cached training and evaluation sets''' )
return parser
def __SCREAMING_SNAKE_CASE ( ):
lowerCAmelCase__ : str = argparse.ArgumentParser()
add_generic_args(A_ , os.getcwd() )
lowerCAmelCase__ : int = GLUETransformer.add_model_specific_args(A_ , os.getcwd() )
lowerCAmelCase__ : Tuple = parser.parse_args()
# If output_dir not provided, a folder will be generated in pwd
if args.output_dir is None:
lowerCAmelCase__ : Tuple = os.path.join(
'''./results''' , f'{args.task}_{time.strftime("%Y%m%d_%H%M%S" )}' , )
os.makedirs(args.output_dir )
lowerCAmelCase__ : Tuple = GLUETransformer(A_ )
lowerCAmelCase__ : Tuple = generic_train(A_ , A_ )
# Optionally, predict on dev set and write to output_dir
if args.do_predict:
lowerCAmelCase__ : Union[str, Any] = sorted(glob.glob(os.path.join(args.output_dir , '''checkpoint-epoch=*.ckpt''' ) , recursive=A_ ) )
lowerCAmelCase__ : Dict = model.load_from_checkpoint(checkpoints[-1] )
return trainer.test(A_ )
if __name__ == "__main__":
main()
| 106
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_UpperCamelCase = {
'''configuration_pegasus_x''': ['''PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''PegasusXConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = [
'''PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''PegasusXForConditionalGeneration''',
'''PegasusXModel''',
'''PegasusXPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_pegasus_x import PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusXConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pegasus_x import (
PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST,
PegasusXForConditionalGeneration,
PegasusXModel,
PegasusXPreTrainedModel,
)
else:
import sys
_UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 254
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
if is_sentencepiece_available():
from ..ta.tokenization_ta import TaTokenizer
else:
from ...utils.dummy_sentencepiece_objects import TaTokenizer
__A : Dict = TaTokenizer
if is_tokenizers_available():
from ..ta.tokenization_ta_fast import TaTokenizerFast
else:
from ...utils.dummy_tokenizers_objects import TaTokenizerFast
__A : List[str] = TaTokenizerFast
__A : str = {'''configuration_mt5''': ['''MT5Config''', '''MT5OnnxConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : int = [
'''MT5EncoderModel''',
'''MT5ForConditionalGeneration''',
'''MT5ForQuestionAnswering''',
'''MT5Model''',
'''MT5PreTrainedModel''',
'''MT5Stack''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Dict = ['''TFMT5EncoderModel''', '''TFMT5ForConditionalGeneration''', '''TFMT5Model''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Any = ['''FlaxMT5EncoderModel''', '''FlaxMT5ForConditionalGeneration''', '''FlaxMT5Model''']
if TYPE_CHECKING:
from .configuration_mta import MTaConfig, MTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mta import (
MTaEncoderModel,
MTaForConditionalGeneration,
MTaForQuestionAnswering,
MTaModel,
MTaPreTrainedModel,
MTaStack,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mta import TFMTaEncoderModel, TFMTaForConditionalGeneration, TFMTaModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mta import FlaxMTaEncoderModel, FlaxMTaForConditionalGeneration, FlaxMTaModel
else:
import sys
__A : Any = _LazyModule(
__name__,
globals()['''__file__'''],
_import_structure,
extra_objects={'''MT5Tokenizer''': MTaTokenizer, '''MT5TokenizerFast''': MTaTokenizerFast},
module_spec=__spec__,
)
| 27
|
"""simple docstring"""
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_herbert import HerbertTokenizer
__A : Any = logging.get_logger(__name__)
__A : Dict = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
__A : Optional[Any] = {
'''vocab_file''': {
'''allegro/herbert-base-cased''': '''https://huggingface.co/allegro/herbert-base-cased/resolve/main/vocab.json'''
},
'''merges_file''': {
'''allegro/herbert-base-cased''': '''https://huggingface.co/allegro/herbert-base-cased/resolve/main/merges.txt'''
},
}
__A : Any = {'''allegro/herbert-base-cased''': 514}
__A : Optional[Any] = {}
class lowerCamelCase ( _UpperCAmelCase ):
lowercase : Dict = VOCAB_FILES_NAMES
lowercase : Any = PRETRAINED_VOCAB_FILES_MAP
lowercase : List[str] = PRETRAINED_INIT_CONFIGURATION
lowercase : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase : Union[str, Any] = HerbertTokenizer
def __init__( self , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_="<s>" , SCREAMING_SNAKE_CASE_="<unk>" , SCREAMING_SNAKE_CASE_="<pad>" , SCREAMING_SNAKE_CASE_="<mask>" , SCREAMING_SNAKE_CASE_="</s>" , **SCREAMING_SNAKE_CASE_ , ):
super().__init__(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , tokenizer_file=SCREAMING_SNAKE_CASE_ , cls_token=SCREAMING_SNAKE_CASE_ , unk_token=SCREAMING_SNAKE_CASE_ , pad_token=SCREAMING_SNAKE_CASE_ , mask_token=SCREAMING_SNAKE_CASE_ , sep_token=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
def a_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ):
UpperCamelCase : Dict = [self.cls_token_id]
UpperCamelCase : str = [self.sep_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def a_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=SCREAMING_SNAKE_CASE_ , token_ids_a=SCREAMING_SNAKE_CASE_ , already_has_special_tokens=SCREAMING_SNAKE_CASE_ )
if token_ids_a is None:
return [1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1]
return [1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1]
def a_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ):
UpperCamelCase : Tuple = [self.sep_token_id]
UpperCamelCase : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def a_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ):
UpperCamelCase : Optional[int] = self._tokenizer.model.save(SCREAMING_SNAKE_CASE_ , name=SCREAMING_SNAKE_CASE_ )
return tuple(SCREAMING_SNAKE_CASE_ )
| 27
| 1
|
'''simple docstring'''
from ....configuration_utils import PretrainedConfig
from ....utils import logging
__lowercase : Any = logging.get_logger(__name__)
__lowercase : List[str] = {
'CarlCochet/trajectory-transformer-halfcheetah-medium-v2': (
'https://huggingface.co/CarlCochet/trajectory-transformer-halfcheetah-medium-v2/resolve/main/config.json'
),
# See all TrajectoryTransformer models at https://huggingface.co/models?filter=trajectory_transformer
}
class __UpperCamelCase ( lowerCAmelCase_ ):
A_ = "trajectory_transformer"
A_ = ["past_key_values"]
A_ = {
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self , __a=100 , __a=5 , __a=1 , __a=1 , __a=249 , __a=6 , __a=17 , __a=25 , __a=4 , __a=4 , __a=128 , __a=0.1 , __a=0.1 , __a=0.1 , __a=0.0006 , __a=512 , __a=0.02 , __a=1E-1_2 , __a=1 , __a=True , __a=1 , __a=5_0256 , __a=5_0256 , **__a , ):
'''simple docstring'''
__a : List[Any] = vocab_size
__a : str = action_weight
__a : List[str] = reward_weight
__a : Tuple = value_weight
__a : Union[str, Any] = max_position_embeddings
__a : List[str] = block_size
__a : Optional[int] = action_dim
__a : List[Any] = observation_dim
__a : Union[str, Any] = transition_dim
__a : List[str] = learning_rate
__a : str = n_layer
__a : List[Any] = n_head
__a : Optional[Any] = n_embd
__a : int = embd_pdrop
__a : Optional[Any] = attn_pdrop
__a : Optional[Any] = resid_pdrop
__a : List[Any] = initializer_range
__a : Optional[int] = layer_norm_eps
__a : Any = kaiming_initializer_range
__a : str = use_cache
super().__init__(pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , **__a )
| 27
|
'''simple docstring'''
from __future__ import annotations
import inspect
import unittest
from math import floor
import numpy as np
from transformers import CvtConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFCvtForImageClassification, TFCvtModel
from transformers.models.cvt.modeling_tf_cvt import TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __UpperCamelCase ( lowerCAmelCase_ ):
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Optional[Any] = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__a , 'embed_dim' ) )
self.parent.assertTrue(hasattr(__a , 'num_heads' ) )
class __UpperCamelCase :
def __init__( self , __a , __a=13 , __a=64 , __a=3 , __a=[16, 48, 96] , __a=[1, 3, 6] , __a=[1, 2, 10] , __a=[7, 3, 3] , __a=[4, 2, 2] , __a=[2, 1, 1] , __a=[2, 2, 2] , __a=[False, False, True] , __a=[0.0, 0.0, 0.0] , __a=0.02 , __a=1E-1_2 , __a=True , __a=True , __a=2 , ):
'''simple docstring'''
__a : str = parent
__a : List[Any] = batch_size
__a : Optional[int] = image_size
__a : List[str] = patch_sizes
__a : str = patch_stride
__a : Any = patch_padding
__a : Dict = is_training
__a : Union[str, Any] = use_labels
__a : Dict = num_labels
__a : List[Any] = num_channels
__a : Any = embed_dim
__a : int = num_heads
__a : Optional[int] = stride_kv
__a : Dict = depth
__a : List[str] = cls_token
__a : List[Any] = attention_drop_rate
__a : Tuple = initializer_range
__a : int = layer_norm_eps
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__a : Dict = None
if self.use_labels:
# create a random int32 tensor of given shape
__a : str = ids_tensor([self.batch_size] , self.num_labels )
__a : str = self.get_config()
return config, pixel_values, labels
def __UpperCAmelCase ( self ):
'''simple docstring'''
return CvtConfig(
image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , )
def __UpperCAmelCase ( self , __a , __a , __a ):
'''simple docstring'''
__a : Optional[int] = TFCvtModel(config=__a )
__a : Dict = model(__a , training=__a )
__a : Any = (self.image_size, self.image_size)
__a , __a : Dict = image_size[0], image_size[1]
for i in range(len(self.depth ) ):
__a : Tuple = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
__a : str = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width) )
def __UpperCAmelCase ( self , __a , __a , __a ):
'''simple docstring'''
__a : List[Any] = self.num_labels
__a : Optional[int] = TFCvtForImageClassification(__a )
__a : Dict = model(__a , labels=__a , training=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Optional[Any] = self.prepare_config_and_inputs()
__a , __a , __a : Tuple = config_and_inputs
__a : str = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class __UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
A_ = (TFCvtModel, TFCvtForImageClassification) if is_tf_available() else ()
A_ = (
{"feature-extraction": TFCvtModel, "image-classification": TFCvtForImageClassification}
if is_tf_available()
else {}
)
A_ = False
A_ = False
A_ = False
A_ = False
A_ = False
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : int = TFCvtModelTester(self )
__a : List[Any] = TFCvtConfigTester(self , config_class=__a , has_text_modality=__a , hidden_size=37 )
def __UpperCAmelCase ( self ):
'''simple docstring'''
self.config_tester.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
@unittest.skip(reason='Cvt does not output attentions' )
def __UpperCAmelCase ( self ):
'''simple docstring'''
pass
@unittest.skip(reason='Cvt does not use inputs_embeds' )
def __UpperCAmelCase ( self ):
'''simple docstring'''
pass
@unittest.skip(reason='Cvt does not support input and output embeddings' )
def __UpperCAmelCase ( self ):
'''simple docstring'''
pass
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices('GPU' ) ) == 0 , reason='TF does not support backprop for grouped convolutions on CPU.' , )
def __UpperCAmelCase ( self ):
'''simple docstring'''
super().test_dataset_conversion()
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices('GPU' ) ) == 0 , reason='TF does not support backprop for grouped convolutions on CPU.' , )
@slow
def __UpperCAmelCase ( self ):
'''simple docstring'''
super().test_keras_fit()
@unittest.skip(reason='Get `Failed to determine best cudnn convolution algo.` error after using TF 2.12+cuda 11.8' )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Any = tf.keras.mixed_precision.Policy('mixed_float16' )
tf.keras.mixed_precision.set_global_policy(__a )
super().test_keras_fit()
tf.keras.mixed_precision.set_global_policy('float32' )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a , __a : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a : Dict = model_class(__a )
__a : Optional[Any] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__a : Optional[Any] = [*signature.parameters.keys()]
__a : Optional[Any] = ['pixel_values']
self.assertListEqual(arg_names[:1] , __a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
def check_hidden_states_output(__a , __a , __a ):
__a : List[str] = model_class(__a )
__a : Union[str, Any] = model(**self._prepare_for_class(__a , __a ) )
__a : Any = outputs.hidden_states
__a : Union[str, Any] = len(self.model_tester.depth )
self.assertEqual(len(__a ) , __a )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.embed_dim[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
__a , __a : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a : List[str] = True
check_hidden_states_output(__a , __a , __a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__a : Optional[Any] = True
check_hidden_states_output(__a , __a , __a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__a )
@slow
def __UpperCAmelCase ( self ):
'''simple docstring'''
for model_name in TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__a : Optional[Any] = TFCvtModel.from_pretrained(__a )
self.assertIsNotNone(__a )
def lowerCamelCase ():
__a : List[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class __UpperCamelCase ( unittest.TestCase ):
@cached_property
def __UpperCAmelCase ( self ):
'''simple docstring'''
return AutoImageProcessor.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : int = TFCvtForImageClassification.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
__a : Tuple = self.default_image_processor
__a : Any = prepare_img()
__a : int = image_processor(images=__a , return_tensors='tf' )
# forward pass
__a : Any = model(**__a )
# verify the logits
__a : Any = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , __a )
__a : Optional[Any] = tf.constant([0.9285, 0.9015, -0.3150] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , __a , atol=1E-4 ) )
| 27
| 1
|
def a__ ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ) -> int:
"""simple docstring"""
return 1 if input_a == input_a else 0
def a__ ( ) -> None:
"""simple docstring"""
assert xnor_gate(0 , 0 ) == 1
assert xnor_gate(0 , 1 ) == 0
assert xnor_gate(1 , 0 ) == 0
assert xnor_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(xnor_gate(0, 0))
print(xnor_gate(0, 1))
print(xnor_gate(1, 0))
print(xnor_gate(1, 1))
| 354
|
'''simple docstring'''
from __future__ import annotations
from itertools import permutations
from random import randint
from timeit import repeat
def a__ ( ) -> tuple[list[int], int]:
"""simple docstring"""
UpperCAmelCase_ : Tuple = [randint(-10_00 , 10_00 ) for i in range(10 )]
UpperCAmelCase_ : str = randint(-50_00 , 50_00 )
return (arr, r)
_lowerCamelCase = make_dataset()
def a__ ( _SCREAMING_SNAKE_CASE : list[int] , _SCREAMING_SNAKE_CASE : int ) -> tuple[int, ...]:
"""simple docstring"""
for triplet in permutations(_SCREAMING_SNAKE_CASE , 3 ):
if sum(_SCREAMING_SNAKE_CASE ) == target:
return tuple(sorted(_SCREAMING_SNAKE_CASE ) )
return (0, 0, 0)
def a__ ( _SCREAMING_SNAKE_CASE : list[int] , _SCREAMING_SNAKE_CASE : int ) -> tuple[int, int, int]:
"""simple docstring"""
arr.sort()
UpperCAmelCase_ : Optional[int] = len(_SCREAMING_SNAKE_CASE )
for i in range(n - 1 ):
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = i + 1, n - 1
while left < right:
if arr[i] + arr[left] + arr[right] == target:
return (arr[i], arr[left], arr[right])
elif arr[i] + arr[left] + arr[right] < target:
left += 1
elif arr[i] + arr[left] + arr[right] > target:
right -= 1
return (0, 0, 0)
def a__ ( ) -> tuple[float, float]:
"""simple docstring"""
UpperCAmelCase_ : Tuple = "\nfrom __main__ import dataset, triplet_sum1, triplet_sum2\n"
UpperCAmelCase_ : Optional[Any] = "\ntriplet_sum1(*dataset)\n"
UpperCAmelCase_ : str = "\ntriplet_sum2(*dataset)\n"
UpperCAmelCase_ : Dict = repeat(setup=_SCREAMING_SNAKE_CASE , stmt=_SCREAMING_SNAKE_CASE , repeat=5 , number=1_00_00 )
UpperCAmelCase_ : Dict = repeat(setup=_SCREAMING_SNAKE_CASE , stmt=_SCREAMING_SNAKE_CASE , repeat=5 , number=1_00_00 )
return (min(_SCREAMING_SNAKE_CASE ), min(_SCREAMING_SNAKE_CASE ))
if __name__ == "__main__":
from doctest import testmod
testmod()
_lowerCamelCase = solution_times()
print(f"""The time for naive implementation is {times[0]}.""")
print(f"""The time for optimized implementation is {times[1]}.""")
| 67
| 0
|
import torch
from diffusers import DPMSolverSDEScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import require_torchsde
from .test_schedulers import SchedulerCommonTest
@require_torchsde
class snake_case_ ( __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = (DPMSolverSDEScheduler,)
SCREAMING_SNAKE_CASE : str = 10
def snake_case__( self : Optional[int] , **_UpperCamelCase : str ) ->Optional[Any]:
snake_case_ = {
'''num_train_timesteps''': 1_1_0_0,
'''beta_start''': 0.0001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''noise_sampler_seed''': 0,
}
config.update(**_UpperCamelCase )
return config
def snake_case__( self : Optional[Any] ) ->Any:
for timesteps in [1_0, 5_0, 1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=_UpperCamelCase )
def snake_case__( self : List[str] ) ->int:
for beta_start, beta_end in zip([0.00001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ):
self.check_over_configs(beta_start=_UpperCamelCase , beta_end=_UpperCamelCase )
def snake_case__( self : List[str] ) ->Union[str, Any]:
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=_UpperCamelCase )
def snake_case__( self : List[Any] ) ->Optional[Any]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_UpperCamelCase )
def snake_case__( self : Optional[int] ) ->int:
snake_case_ = self.scheduler_classes[0]
snake_case_ = self.get_scheduler_config()
snake_case_ = scheduler_class(**_UpperCamelCase )
scheduler.set_timesteps(self.num_inference_steps )
snake_case_ = self.dummy_model()
snake_case_ = self.dummy_sample_deter * scheduler.init_noise_sigma
snake_case_ = sample.to(_UpperCamelCase )
for i, t in enumerate(scheduler.timesteps ):
snake_case_ = scheduler.scale_model_input(_UpperCamelCase , _UpperCamelCase )
snake_case_ = model(_UpperCamelCase , _UpperCamelCase )
snake_case_ = scheduler.step(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
snake_case_ = output.prev_sample
snake_case_ = torch.sum(torch.abs(_UpperCamelCase ) )
snake_case_ = torch.mean(torch.abs(_UpperCamelCase ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.47821044921875 ) < 1e-2
assert abs(result_mean.item() - 0.2178705964565277 ) < 1e-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59352111816406 ) < 1e-2
assert abs(result_mean.item() - 0.22342906892299652 ) < 1e-3
else:
assert abs(result_sum.item() - 162.52383422851562 ) < 1e-2
assert abs(result_mean.item() - 0.211619570851326 ) < 1e-3
def snake_case__( self : Optional[Any] ) ->str:
snake_case_ = self.scheduler_classes[0]
snake_case_ = self.get_scheduler_config(prediction_type='''v_prediction''' )
snake_case_ = scheduler_class(**_UpperCamelCase )
scheduler.set_timesteps(self.num_inference_steps )
snake_case_ = self.dummy_model()
snake_case_ = self.dummy_sample_deter * scheduler.init_noise_sigma
snake_case_ = sample.to(_UpperCamelCase )
for i, t in enumerate(scheduler.timesteps ):
snake_case_ = scheduler.scale_model_input(_UpperCamelCase , _UpperCamelCase )
snake_case_ = model(_UpperCamelCase , _UpperCamelCase )
snake_case_ = scheduler.step(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
snake_case_ = output.prev_sample
snake_case_ = torch.sum(torch.abs(_UpperCamelCase ) )
snake_case_ = torch.mean(torch.abs(_UpperCamelCase ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 124.77149200439453 ) < 1e-2
assert abs(result_mean.item() - 0.16226289014816284 ) < 1e-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 128.1663360595703 ) < 1e-2
assert abs(result_mean.item() - 0.16688326001167297 ) < 1e-3
else:
assert abs(result_sum.item() - 119.8487548828125 ) < 1e-2
assert abs(result_mean.item() - 0.1560530662536621 ) < 1e-3
def snake_case__( self : List[str] ) ->Union[str, Any]:
snake_case_ = self.scheduler_classes[0]
snake_case_ = self.get_scheduler_config()
snake_case_ = scheduler_class(**_UpperCamelCase )
scheduler.set_timesteps(self.num_inference_steps , device=_UpperCamelCase )
snake_case_ = self.dummy_model()
snake_case_ = self.dummy_sample_deter.to(_UpperCamelCase ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
snake_case_ = scheduler.scale_model_input(_UpperCamelCase , _UpperCamelCase )
snake_case_ = model(_UpperCamelCase , _UpperCamelCase )
snake_case_ = scheduler.step(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
snake_case_ = output.prev_sample
snake_case_ = torch.sum(torch.abs(_UpperCamelCase ) )
snake_case_ = torch.mean(torch.abs(_UpperCamelCase ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.46957397460938 ) < 1e-2
assert abs(result_mean.item() - 0.21805934607982635 ) < 1e-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59353637695312 ) < 1e-2
assert abs(result_mean.item() - 0.22342908382415771 ) < 1e-3
else:
assert abs(result_sum.item() - 162.52383422851562 ) < 1e-2
assert abs(result_mean.item() - 0.211619570851326 ) < 1e-3
def snake_case__( self : Optional[Any] ) ->int:
snake_case_ = self.scheduler_classes[0]
snake_case_ = self.get_scheduler_config()
snake_case_ = scheduler_class(**_UpperCamelCase , use_karras_sigmas=_UpperCamelCase )
scheduler.set_timesteps(self.num_inference_steps , device=_UpperCamelCase )
snake_case_ = self.dummy_model()
snake_case_ = self.dummy_sample_deter.to(_UpperCamelCase ) * scheduler.init_noise_sigma
snake_case_ = sample.to(_UpperCamelCase )
for t in scheduler.timesteps:
snake_case_ = scheduler.scale_model_input(_UpperCamelCase , _UpperCamelCase )
snake_case_ = model(_UpperCamelCase , _UpperCamelCase )
snake_case_ = scheduler.step(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
snake_case_ = output.prev_sample
snake_case_ = torch.sum(torch.abs(_UpperCamelCase ) )
snake_case_ = torch.mean(torch.abs(_UpperCamelCase ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 176.66974135742188 ) < 1e-2
assert abs(result_mean.item() - 0.23003872730981811 ) < 1e-2
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 177.63653564453125 ) < 1e-2
assert abs(result_mean.item() - 0.23003872730981811 ) < 1e-2
else:
assert abs(result_sum.item() - 170.3135223388672 ) < 1e-2
assert abs(result_mean.item() - 0.23003872730981811 ) < 1e-2
| 8
|
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available
from ...utils import OptionalDependencyNotAvailable
lowerCAmelCase_ = {'''configuration_dpt''': ['''DPT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''DPTConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ['''DPTFeatureExtractor''']
lowerCAmelCase_ = ['''DPTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'''DPT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''DPTForDepthEstimation''',
'''DPTForSemanticSegmentation''',
'''DPTModel''',
'''DPTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_dpt import DPTFeatureExtractor
from .image_processing_dpt import DPTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dpt import (
DPT_PRETRAINED_MODEL_ARCHIVE_LIST,
DPTForDepthEstimation,
DPTForSemanticSegmentation,
DPTModel,
DPTPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 8
| 1
|
"""simple docstring"""
import math
from typing import Optional
import numpy as np
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a : List[str] = logging.get_logger(__name__)
a : Optional[int] = {
'''facebook/encodec_24khz''': '''https://huggingface.co/facebook/encodec_24khz/resolve/main/config.json''',
'''facebook/encodec_48khz''': '''https://huggingface.co/facebook/encodec_48khz/resolve/main/config.json''',
}
class __UpperCamelCase ( __UpperCamelCase ):
lowerCamelCase : List[Any] ="encodec"
def __init__( self , lowerCAmelCase__=[1.5, 3.0, 6.0, 12.0, 24.0] , lowerCAmelCase__=2_4000 , lowerCAmelCase__=1 , lowerCAmelCase__=False , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=128 , lowerCAmelCase__=32 , lowerCAmelCase__=1 , lowerCAmelCase__=[8, 5, 4, 2] , lowerCAmelCase__="weight_norm" , lowerCAmelCase__=7 , lowerCAmelCase__=7 , lowerCAmelCase__=3 , lowerCAmelCase__=2 , lowerCAmelCase__=True , lowerCAmelCase__="reflect" , lowerCAmelCase__=2 , lowerCAmelCase__=2 , lowerCAmelCase__=1.0 , lowerCAmelCase__=1024 , lowerCAmelCase__=None , lowerCAmelCase__=True , **lowerCAmelCase__ , ) -> Any:
a : Optional[int] = target_bandwidths
a : int = sampling_rate
a : List[Any] = audio_channels
a : str = normalize
a : Union[str, Any] = chunk_length_s
a : Union[str, Any] = overlap
a : Union[str, Any] = hidden_size
a : Union[str, Any] = num_filters
a : Optional[Any] = num_residual_layers
a : List[Any] = upsampling_ratios
a : List[str] = norm_type
a : Union[str, Any] = kernel_size
a : Optional[int] = last_kernel_size
a : Optional[Any] = residual_kernel_size
a : Dict = dilation_growth_rate
a : int = use_causal_conv
a : Tuple = pad_mode
a : str = compress
a : Optional[Any] = num_lstm_layers
a : List[Any] = trim_right_ratio
a : Any = codebook_size
a : int = codebook_dim if codebook_dim is not None else hidden_size
a : int = use_conv_shortcut
if self.norm_type not in ["weight_norm", "time_group_norm"]:
raise ValueError(
f"""self.norm_type must be one of `\"weight_norm\"`, `\"time_group_norm\"`), got {self.norm_type}""" )
super().__init__(**_lowerCAmelCase )
@property
def __a ( self ) -> int:
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def __a ( self ) -> Optional[int]:
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length ) )
@property
def __a ( self ) -> Any:
a : List[str] = np.prod(self.upsampling_ratios )
return math.ceil(self.sampling_rate / hop_length )
@property
def __a ( self ) -> List[str]:
return int(1000 * self.target_bandwidths[-1] // (self.frame_rate * 10) )
| 370
|
"""simple docstring"""
import math
def _SCREAMING_SNAKE_CASE ( _lowercase : int = 100 ) ->int:
'''simple docstring'''
a : Dict = sum(i * i for i in range(1 , n + 1 ) )
a : Tuple = int(math.pow(sum(range(1 , n + 1 ) ) , 2 ) )
return square_of_sum - sum_of_squares
if __name__ == "__main__":
print(F'''{solution() = }''')
| 79
| 0
|
"""simple docstring"""
import argparse
import logging
import os
import datasets
import tensorflow as tf
from transformers import AutoTokenizer
UpperCAmelCase_ : Any = logging.getLogger(__name__)
def _A () -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = argparse.ArgumentParser(
description='''Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset.''' )
parser.add_argument(
'''--dataset_name''' , type=__lowerCamelCase , default='''wikitext''' , help='''Name of the training. Explore datasets at: hf.co/datasets.''' , )
parser.add_argument(
'''--dataset_config''' , type=__lowerCamelCase , default='''wikitext-103-raw-v1''' , help='''Configuration name of the dataset.''' )
parser.add_argument(
'''--tokenizer_name_or_path''' , type=__lowerCamelCase , default='''sayakpaul/unigram-tokenizer-wikitext''' , help='''Tokenizer identifier. Can be a local filepath or a Hub identifier.''' , )
parser.add_argument(
'''--shard_size''' , type=__lowerCamelCase , default=10_00 , help='''Number of entries to go in a single shard.''' , )
parser.add_argument('''--split''' , type=__lowerCamelCase , default='''train''' , choices=['''train''', '''test''', '''validation'''] )
parser.add_argument(
'''--limit''' , default=__lowerCamelCase , type=__lowerCamelCase , help='''Limit the number of shards (used for debugging).''' , )
parser.add_argument(
'''--max_length''' , type=__lowerCamelCase , default=5_12 , help='''Maximum sequence length. For training on TPUs, it helps to have a maximum'''
''' sequence length that is a multiple of 8.''' , )
parser.add_argument(
'''--output_dir''' , default='''tf-tpu''' , type=__lowerCamelCase , help='''Output directory where the TFRecord shards will be saved. If the'''
''' path is appended with `gs://` (\'gs://tf-tpu\', for example) then the TFRecord'''
''' shards will be directly saved to a Google Cloud Storage bucket.''' , )
SCREAMING_SNAKE_CASE_ : Dict = parser.parse_args()
return args
def _A (__a ) -> Any:
"""simple docstring"""
def fn(__a ):
return tokenizer(examples['''text'''] )
return fn
def _A (__a ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = []
for i in range(len(tokenized_data['''input_ids'''] ) ):
SCREAMING_SNAKE_CASE_ : str = {
"input_ids": tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data['''input_ids'''][i] ) ),
"attention_mask": tf.train.Feature(
intaa_list=tf.train.IntaaList(value=tokenized_data['''attention_mask'''][i] ) ),
}
SCREAMING_SNAKE_CASE_ : Optional[int] = tf.train.Features(feature=__lowerCamelCase )
SCREAMING_SNAKE_CASE_ : List[str] = tf.train.Example(features=__lowerCamelCase )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = example.SerializeToString()
records.append(__lowerCamelCase )
return records
def _A (__a ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = datasets.load_dataset(args.dataset_name , args.dataset_config , split=args.split )
if args.limit is not None:
SCREAMING_SNAKE_CASE_ : Optional[Any] = min(len(__lowerCamelCase ) , args.limit )
SCREAMING_SNAKE_CASE_ : Tuple = dataset.select(range(__lowerCamelCase ) )
print(f'Limiting the dataset to {args.limit} entries.' )
SCREAMING_SNAKE_CASE_ : Tuple = AutoTokenizer.from_pretrained(args.tokenizer_name_or_path )
# Handle output directory creation.
# For serializing into a Google Cloud Storage Bucket, one needs to first
# create a bucket.
if "gs" not in args.output_dir:
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
SCREAMING_SNAKE_CASE_ : List[Any] = os.path.join(args.output_dir , args.split )
if not os.path.exists(__lowerCamelCase ):
os.makedirs(__lowerCamelCase )
else:
SCREAMING_SNAKE_CASE_ : Tuple = os.path.join(args.output_dir , args.split )
# Tokenize the whole dataset at once.
SCREAMING_SNAKE_CASE_ : Any = tokenize_function(__lowerCamelCase )
SCREAMING_SNAKE_CASE_ : Optional[Any] = dataset.map(__lowerCamelCase , batched=__lowerCamelCase , num_proc=4 , remove_columns=['''text'''] )
# We need to concatenate all our texts together, and then split the result
# into chunks of a fixed size, which we will call block_size. To do this, we
# will use the map method again, with the option batched=True. When we use batched=True,
# the function we pass to map() will be passed multiple inputs at once, allowing us
# to group them into more or fewer examples than we had in the input.
# This allows us to create our new fixed-length samples. The advantage of this
# method is that we don't lose a whole lot of content from the dataset compared to the
# case where we simply tokenize with a pre-defined max_length.
def group_texts(__a ):
# Concatenate all texts.
SCREAMING_SNAKE_CASE_ : Optional[Any] = {k: sum(examples[k] , [] ) for k in examples.keys()}
SCREAMING_SNAKE_CASE_ : str = len(concatenated_examples[list(examples.keys() )[0]] )
# We drop the small remainder, though you could add padding instead if the model supports it
# In this, as in all things, we advise you to follow your heart 🫀
SCREAMING_SNAKE_CASE_ : Any = (total_length // args.max_length) * args.max_length
# Split by chunks of max_len.
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {
k: [t[i : i + args.max_length] for i in range(0 , __lowerCamelCase , args.max_length )]
for k, t in concatenated_examples.items()
}
return result
SCREAMING_SNAKE_CASE_ : Any = dataset_tokenized.map(__lowerCamelCase , batched=__lowerCamelCase , batch_size=10_00 , num_proc=4 )
SCREAMING_SNAKE_CASE_ : Dict = 0
SCREAMING_SNAKE_CASE_ : Optional[int] = 0
for shard in range(0 , len(__lowerCamelCase ) , args.shard_size ):
SCREAMING_SNAKE_CASE_ : int = grouped_dataset[shard : shard + args.shard_size]
SCREAMING_SNAKE_CASE_ : Any = len(dataset_snapshot['''input_ids'''] )
SCREAMING_SNAKE_CASE_ : List[Any] = os.path.join(__lowerCamelCase , f'dataset-{shard_count}-{records_containing}.tfrecord' )
SCREAMING_SNAKE_CASE_ : Tuple = get_serialized_examples(__lowerCamelCase )
with tf.io.TFRecordWriter(__lowerCamelCase ) as out_file:
for i in range(len(__lowerCamelCase ) ):
SCREAMING_SNAKE_CASE_ : Dict = serialized_examples[i]
out_file.write(__lowerCamelCase )
print('''Wrote file {} containing {} records'''.format(__lowerCamelCase , __lowerCamelCase ) )
shard_count += 1
total_records += records_containing
with open(f'split-{args.split}-records-count.txt' , '''w''' ) as f:
print(f'Total {args.split} records: {total_records}' , file=__lowerCamelCase )
if __name__ == "__main__":
UpperCAmelCase_ : Union[str, Any] = parse_args()
main(args)
| 91
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
if is_sentencepiece_available():
from ..ta.tokenization_ta import TaTokenizer
else:
from ...utils.dummy_sentencepiece_objects import TaTokenizer
__lowerCamelCase = TaTokenizer
if is_tokenizers_available():
from ..ta.tokenization_ta_fast import TaTokenizerFast
else:
from ...utils.dummy_tokenizers_objects import TaTokenizerFast
__lowerCamelCase = TaTokenizerFast
__lowerCamelCase = {"""configuration_mt5""": ["""MT5Config""", """MT5OnnxConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = [
"""MT5EncoderModel""",
"""MT5ForConditionalGeneration""",
"""MT5ForQuestionAnswering""",
"""MT5Model""",
"""MT5PreTrainedModel""",
"""MT5Stack""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = ["""TFMT5EncoderModel""", """TFMT5ForConditionalGeneration""", """TFMT5Model"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = ["""FlaxMT5EncoderModel""", """FlaxMT5ForConditionalGeneration""", """FlaxMT5Model"""]
if TYPE_CHECKING:
from .configuration_mta import MTaConfig, MTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mta import (
MTaEncoderModel,
MTaForConditionalGeneration,
MTaForQuestionAnswering,
MTaModel,
MTaPreTrainedModel,
MTaStack,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mta import TFMTaEncoderModel, TFMTaForConditionalGeneration, TFMTaModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mta import FlaxMTaEncoderModel, FlaxMTaForConditionalGeneration, FlaxMTaModel
else:
import sys
__lowerCamelCase = _LazyModule(
__name__,
globals()["""__file__"""],
_import_structure,
extra_objects={"""MT5Tokenizer""": MTaTokenizer, """MT5TokenizerFast""": MTaTokenizerFast},
module_spec=__spec__,
)
| 59
| 0
|
"""simple docstring"""
import unittest
import numpy as np
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class _SCREAMING_SNAKE_CASE( unittest.TestCase ):
def __init__( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__=7 ,SCREAMING_SNAKE_CASE__=3 ,SCREAMING_SNAKE_CASE__=18 ,SCREAMING_SNAKE_CASE__=30 ,SCREAMING_SNAKE_CASE__=4_00 ,SCREAMING_SNAKE_CASE__=True ,SCREAMING_SNAKE_CASE__=None ,SCREAMING_SNAKE_CASE__=True ,SCREAMING_SNAKE_CASE__=[0.5, 0.5, 0.5] ,SCREAMING_SNAKE_CASE__=[0.5, 0.5, 0.5] ,) -> Optional[int]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Union[str, Any] = size if size is not None else {'''height''': 18, '''width''': 18}
__SCREAMING_SNAKE_CASE :Tuple = parent
__SCREAMING_SNAKE_CASE :int = batch_size
__SCREAMING_SNAKE_CASE :str = num_channels
__SCREAMING_SNAKE_CASE :List[str] = image_size
__SCREAMING_SNAKE_CASE :Optional[Any] = min_resolution
__SCREAMING_SNAKE_CASE :Union[str, Any] = max_resolution
__SCREAMING_SNAKE_CASE :Dict = do_resize
__SCREAMING_SNAKE_CASE :str = size
__SCREAMING_SNAKE_CASE :str = do_normalize
__SCREAMING_SNAKE_CASE :List[str] = image_mean
__SCREAMING_SNAKE_CASE :str = image_std
def _UpperCamelCase ( self ) -> List[str]:
"""simple docstring"""
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class _SCREAMING_SNAKE_CASE( A , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = DPTImageProcessor if is_vision_available() else None
def _UpperCamelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :int = DPTImageProcessingTester(self )
@property
def _UpperCamelCase ( self ) -> int:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def _UpperCamelCase ( self ) -> Dict:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :str = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ ,'''image_mean''' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ ,'''image_std''' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ ,'''do_normalize''' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ ,'''do_resize''' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ ,'''size''' ) )
def _UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :int = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size ,{'''height''': 18, '''width''': 18} )
__SCREAMING_SNAKE_CASE :Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict ,size=42 )
self.assertEqual(image_processor.size ,{'''height''': 42, '''width''': 42} )
def _UpperCamelCase ( self ) -> Dict:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__SCREAMING_SNAKE_CASE :List[Any] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=SCREAMING_SNAKE_CASE__ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE__ ,Image.Image )
# Test not batched input
__SCREAMING_SNAKE_CASE :List[str] = image_processing(image_inputs[0] ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) ,)
# Test batched
__SCREAMING_SNAKE_CASE :str = image_processing(SCREAMING_SNAKE_CASE__ ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) ,)
def _UpperCamelCase ( self ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__SCREAMING_SNAKE_CASE :Optional[int] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=SCREAMING_SNAKE_CASE__ ,numpify=SCREAMING_SNAKE_CASE__ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE__ ,np.ndarray )
# Test not batched input
__SCREAMING_SNAKE_CASE :Tuple = image_processing(image_inputs[0] ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) ,)
# Test batched
__SCREAMING_SNAKE_CASE :Dict = image_processing(SCREAMING_SNAKE_CASE__ ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) ,)
def _UpperCamelCase ( self ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__SCREAMING_SNAKE_CASE :Tuple = prepare_image_inputs(self.image_processor_tester ,equal_resolution=SCREAMING_SNAKE_CASE__ ,torchify=SCREAMING_SNAKE_CASE__ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE__ ,torch.Tensor )
# Test not batched input
__SCREAMING_SNAKE_CASE :Tuple = image_processing(image_inputs[0] ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) ,)
# Test batched
__SCREAMING_SNAKE_CASE :Union[str, Any] = image_processing(SCREAMING_SNAKE_CASE__ ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) ,)
| 239
|
"""simple docstring"""
from itertools import product
def __lowerCamelCase ( a_ : int , a_ : int ) -> list[int]:
__SCREAMING_SNAKE_CASE :Tuple = sides_number
__SCREAMING_SNAKE_CASE :List[Any] = max_face_number * dice_number
__SCREAMING_SNAKE_CASE :List[Any] = [0] * (max_total + 1)
__SCREAMING_SNAKE_CASE :Optional[int] = 1
__SCREAMING_SNAKE_CASE :Tuple = range(a_ , max_face_number + 1 )
for dice_numbers in product(a_ , repeat=a_ ):
__SCREAMING_SNAKE_CASE :Any = sum(a_ )
totals_frequencies[total] += 1
return totals_frequencies
def __lowerCamelCase ( ) -> float:
__SCREAMING_SNAKE_CASE :Dict = total_frequency_distribution(
sides_number=4 , dice_number=9 )
__SCREAMING_SNAKE_CASE :Union[str, Any] = total_frequency_distribution(
sides_number=6 , dice_number=6 )
__SCREAMING_SNAKE_CASE :Optional[Any] = 0
__SCREAMING_SNAKE_CASE :Any = 9
__SCREAMING_SNAKE_CASE :List[str] = 4 * 9
__SCREAMING_SNAKE_CASE :Dict = 6
for peter_total in range(a_ , max_peter_total + 1 ):
peter_wins_count += peter_totals_frequencies[peter_total] * sum(
colin_totals_frequencies[min_colin_total:peter_total] )
__SCREAMING_SNAKE_CASE :List[str] = (4**9) * (6**6)
__SCREAMING_SNAKE_CASE :Union[str, Any] = peter_wins_count / total_games_number
__SCREAMING_SNAKE_CASE :str = round(a_ , ndigits=7 )
return rounded_peter_win_probability
if __name__ == "__main__":
print(f'{solution() = }')
| 239
| 1
|
'''simple docstring'''
from argparse import ArgumentParser
from datasets.commands.convert import ConvertCommand
from datasets.commands.dummy_data import DummyDataCommand
from datasets.commands.env import EnvironmentCommand
from datasets.commands.run_beam import RunBeamCommand
from datasets.commands.test import TestCommand
from datasets.utils.logging import set_verbosity_info
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Optional[Any] ):
return {key.lstrip('-' ): value for key, value in zip(unknown_args[::2] , unknown_args[1::2] )}
def lowerCamelCase ():
__a : Optional[int] = ArgumentParser(
'HuggingFace Datasets CLI tool' , usage='datasets-cli <command> [<args>]' , allow_abbrev=_SCREAMING_SNAKE_CASE )
__a : Any = parser.add_subparsers(help='datasets-cli command helpers' )
set_verbosity_info()
# Register commands
ConvertCommand.register_subcommand(_SCREAMING_SNAKE_CASE )
EnvironmentCommand.register_subcommand(_SCREAMING_SNAKE_CASE )
TestCommand.register_subcommand(_SCREAMING_SNAKE_CASE )
RunBeamCommand.register_subcommand(_SCREAMING_SNAKE_CASE )
DummyDataCommand.register_subcommand(_SCREAMING_SNAKE_CASE )
# Parse args
__a , __a : Dict = parser.parse_known_args()
if not hasattr(_SCREAMING_SNAKE_CASE , 'func' ):
parser.print_help()
exit(1 )
__a : List[str] = parse_unknown_args(_SCREAMING_SNAKE_CASE )
# Run
__a : Optional[Any] = args.func(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
service.run()
if __name__ == "__main__":
main()
| 27
|
'''simple docstring'''
import datasets
import faiss
import numpy as np
import streamlit as st
import torch
from elasticsearch import Elasticsearch
from elia_utils import (
embed_questions_for_retrieval,
make_qa_sas_model,
qa_sas_generate,
query_es_index,
query_qa_dense_index,
)
import transformers
from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer
__lowercase : List[Any] = 'bart'
__lowercase : Union[str, Any] = True
@st.cache(allow_output_mutation=_SCREAMING_SNAKE_CASE )
def lowerCamelCase ():
if LOAD_DENSE_INDEX:
__a : List[Any] = AutoTokenizer.from_pretrained('yjernite/retribert-base-uncased' )
__a : Dict = AutoModel.from_pretrained('yjernite/retribert-base-uncased' ).to('cuda:0' )
__a : Optional[int] = qar_model.eval()
else:
__a , __a : str = (None, None)
if MODEL_TYPE == "bart":
__a : Union[str, Any] = AutoTokenizer.from_pretrained('yjernite/bart_eli5' )
__a : int = AutoModelForSeqaSeqLM.from_pretrained('yjernite/bart_eli5' ).to('cuda:0' )
__a : Optional[Any] = torch.load('seq2seq_models/eli5_bart_model_blm_2.pth' )
sas_model.load_state_dict(save_dict['model'] )
__a : str = sas_model.eval()
else:
__a , __a : Tuple = make_qa_sas_model(
model_name='t5-small' , from_file='seq2seq_models/eli5_t5_model_1024_4.pth' , device='cuda:0' )
return (qar_tokenizer, qar_model, sas_tokenizer, sas_model)
@st.cache(allow_output_mutation=_SCREAMING_SNAKE_CASE )
def lowerCamelCase ():
if LOAD_DENSE_INDEX:
__a : Optional[Any] = faiss.StandardGpuResources()
__a : Dict = datasets.load_dataset(path='wiki_snippets' , name='wiki40b_en_100_0' )['train']
__a : int = np.memmap(
'wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat' , dtype='float32' , mode='r' , shape=(wikiaab_passages.num_rows, 128) , )
__a : int = faiss.IndexFlatIP(128 )
__a : Any = faiss.index_cpu_to_gpu(_SCREAMING_SNAKE_CASE , 1 , _SCREAMING_SNAKE_CASE )
wikiaab_gpu_index_flat.add(_SCREAMING_SNAKE_CASE ) # TODO fix for larger GPU
else:
__a , __a : str = (None, None)
__a : Optional[int] = Elasticsearch([{'host': 'localhost', 'port': '9200'}] )
return (wikiaab_passages, wikiaab_gpu_index_flat, es_client)
@st.cache(allow_output_mutation=_SCREAMING_SNAKE_CASE )
def lowerCamelCase ():
__a : Dict = datasets.load_dataset('eli5' , name='LFQA_reddit' )
__a : Dict = elia['train_eli5']
__a : Optional[int] = np.memmap(
'eli5_questions_reps.dat' , dtype='float32' , mode='r' , shape=(elia_train.num_rows, 128) )
__a : str = faiss.IndexFlatIP(128 )
eli5_train_q_index.add(_SCREAMING_SNAKE_CASE )
return (elia_train, eli5_train_q_index)
__lowercase , __lowercase , __lowercase : Any = load_indexes()
__lowercase , __lowercase , __lowercase , __lowercase : Dict = load_models()
__lowercase , __lowercase : int = load_train_data()
def lowerCamelCase (_SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : List[str]=10 ):
__a : Optional[int] = embed_questions_for_retrieval([question] , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__a , __a : Union[str, Any] = eli5_train_q_index.search(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__a : Any = [elia_train[int(_SCREAMING_SNAKE_CASE )] for i in I[0]]
return nn_examples
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : str="wiki40b" , _SCREAMING_SNAKE_CASE : List[str]="dense" , _SCREAMING_SNAKE_CASE : Any=10 ):
if source == "none":
__a , __a : Any = (' <P> '.join(['' for _ in range(11 )] ).strip(), [])
else:
if method == "dense":
__a , __a : str = query_qa_dense_index(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
else:
__a , __a : Union[str, Any] = query_es_index(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , index_name='english_wiki40b_snippets_100w' , n_results=_SCREAMING_SNAKE_CASE , )
__a : Dict = [
(res['article_title'], res['section_title'].strip(), res['score'], res['passage_text']) for res in hit_lst
]
__a : Any = 'question: {} context: {}'.format(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return question_doc, support_list
@st.cache(
hash_funcs={
torch.Tensor: (lambda _SCREAMING_SNAKE_CASE : None),
transformers.models.bart.tokenization_bart.BartTokenizer: (lambda _SCREAMING_SNAKE_CASE : None),
} )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Dict=64 , _SCREAMING_SNAKE_CASE : Dict=256 , _SCREAMING_SNAKE_CASE : Any=False , _SCREAMING_SNAKE_CASE : Tuple=2 , _SCREAMING_SNAKE_CASE : Union[str, Any]=0.9_5 , _SCREAMING_SNAKE_CASE : str=0.8 ):
with torch.no_grad():
__a : Union[str, Any] = qa_sas_generate(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , num_answers=1 , num_beams=_SCREAMING_SNAKE_CASE , min_len=_SCREAMING_SNAKE_CASE , max_len=_SCREAMING_SNAKE_CASE , do_sample=_SCREAMING_SNAKE_CASE , temp=_SCREAMING_SNAKE_CASE , top_p=_SCREAMING_SNAKE_CASE , top_k=_SCREAMING_SNAKE_CASE , max_input_length=1_024 , device='cuda:0' , )[0]
return (answer, support_list)
st.title('Long Form Question Answering with ELI5')
# Start sidebar
__lowercase : Optional[Any] = '<img src=\'https://huggingface.co/front/assets/huggingface_logo.svg\'>'
__lowercase : str = '\n<html>\n <head>\n <style>\n .img-container {\n padding-left: 90px;\n padding-right: 90px;\n padding-top: 50px;\n padding-bottom: 50px;\n background-color: #f0f3f9;\n }\n </style>\n </head>\n <body>\n <span class="img-container"> <!-- Inline parent element -->\n %s\n </span>\n </body>\n</html>\n' % (
header_html,
)
st.sidebar.markdown(
header_full,
unsafe_allow_html=True,
)
# Long Form QA with ELI5 and Wikipedia
__lowercase : str = '\nThis demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).\nFirst, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,\na pre-processed fixed snapshot of Wikipedia.\n'
st.sidebar.markdown(description, unsafe_allow_html=True)
__lowercase : Dict = [
'Answer the question',
'View the retrieved document only',
'View the most similar ELI5 question and answer',
'Show me everything, please!',
]
__lowercase : Union[str, Any] = st.sidebar.checkbox('Demo options')
if demo_options:
__lowercase : Any = st.sidebar.selectbox(
'',
action_list,
index=3,
)
__lowercase : Tuple = action_list.index(action_st)
__lowercase : Tuple = st.sidebar.selectbox(
'',
['Show full text of passages', 'Show passage section titles'],
index=0,
)
__lowercase : List[Any] = show_type == 'Show full text of passages'
else:
__lowercase : int = 3
__lowercase : str = True
__lowercase : Tuple = st.sidebar.checkbox('Retrieval options')
if retrieval_options:
__lowercase : List[Any] = '\n ### Information retriever options\n\n The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding\n trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.\n The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.\n '
st.sidebar.markdown(retriever_info)
__lowercase : Union[str, Any] = st.sidebar.selectbox('Which Wikipedia format should the model use?', ['wiki40b', 'none'])
__lowercase : Union[str, Any] = st.sidebar.selectbox('Which Wikipedia indexer should the model use?', ['dense', 'sparse', 'mixed'])
else:
__lowercase : str = 'wiki40b'
__lowercase : List[Any] = 'dense'
__lowercase : Dict = 'beam'
__lowercase : Optional[int] = 2
__lowercase : List[str] = 64
__lowercase : Tuple = 2_56
__lowercase : List[str] = None
__lowercase : Tuple = None
__lowercase : List[Any] = st.sidebar.checkbox('Generation options')
if generate_options:
__lowercase : Optional[Any] = '\n ### Answer generation options\n\n The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)\n weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with\n **beam** search, or **sample** from the decoder\'s output probabilities.\n '
st.sidebar.markdown(generate_info)
__lowercase : List[Any] = st.sidebar.selectbox('Would you like to use beam search or sample an answer?', ['beam', 'sampled'])
__lowercase : Tuple = st.sidebar.slider(
'Minimum generation length', min_value=8, max_value=2_56, value=64, step=8, format=None, key=None
)
__lowercase : int = st.sidebar.slider(
'Maximum generation length', min_value=64, max_value=5_12, value=2_56, step=16, format=None, key=None
)
if sampled == "beam":
__lowercase : Any = st.sidebar.slider('Beam size', min_value=1, max_value=8, value=2, step=None, format=None, key=None)
else:
__lowercase : Dict = st.sidebar.slider(
'Nucleus sampling p', min_value=0.1, max_value=1.0, value=0.95, step=0.01, format=None, key=None
)
__lowercase : Union[str, Any] = st.sidebar.slider(
'Temperature', min_value=0.1, max_value=1.0, value=0.7, step=0.01, format=None, key=None
)
__lowercase : List[str] = None
# start main text
__lowercase : int = [
'<MY QUESTION>',
'How do people make chocolate?',
'Why do we get a fever when we are sick?',
'How can different animals perceive different colors?',
'What is natural language processing?',
'What\'s the best way to treat a sunburn?',
'What exactly are vitamins ?',
'How does nuclear energy provide electricity?',
'What\'s the difference between viruses and bacteria?',
'Why are flutes classified as woodwinds when most of them are made out of metal ?',
'Why do people like drinking coffee even though it tastes so bad?',
'What happens when wine ages? How does it make the wine taste better?',
'If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?',
'How can we set a date to the beginning or end of an artistic period? Doesn\'t the change happen gradually?',
'How does New Zealand have so many large bird predators?',
]
__lowercase : Optional[int] = st.selectbox(
'What would you like to ask? ---- select <MY QUESTION> to enter a new query',
questions_list,
index=1,
)
if question_s == "<MY QUESTION>":
__lowercase : Any = st.text_input('Enter your question here:', '')
else:
__lowercase : Any = question_s
if st.button('Show me!'):
if action in [0, 1, 3]:
if index_type == "mixed":
__lowercase , __lowercase : Optional[int] = make_support(question, source=wiki_source, method='dense', n_results=10)
__lowercase , __lowercase : List[Any] = make_support(question, source=wiki_source, method='sparse', n_results=10)
__lowercase : Optional[int] = []
for res_d, res_s in zip(support_list_dense, support_list_sparse):
if tuple(res_d) not in support_list:
support_list += [tuple(res_d)]
if tuple(res_s) not in support_list:
support_list += [tuple(res_s)]
__lowercase : str = support_list[:10]
__lowercase : Optional[int] = '<P> ' + ' <P> '.join([res[-1] for res in support_list])
else:
__lowercase , __lowercase : Optional[Any] = make_support(question, source=wiki_source, method=index_type, n_results=10)
if action in [0, 3]:
__lowercase , __lowercase : int = answer_question(
question_doc,
sas_model,
sas_tokenizer,
min_len=min_len,
max_len=int(max_len),
sampling=(sampled == 'sampled'),
n_beams=n_beams,
top_p=top_p,
temp=temp,
)
st.markdown('### The model generated answer is:')
st.write(answer)
if action in [0, 1, 3] and wiki_source != "none":
st.markdown('--- \n ### The model is drawing information from the following Wikipedia passages:')
for i, res in enumerate(support_list):
__lowercase : str = 'https://en.wikipedia.org/wiki/{}'.format(res[0].replace(' ', '_'))
__lowercase : Any = res[1].strip()
if sec_titles == "":
__lowercase : List[str] = '[{}]({})'.format(res[0], wiki_url)
else:
__lowercase : Union[str, Any] = sec_titles.split(' & ')
__lowercase : str = ' & '.join(
['[{}]({}#{})'.format(sec.strip(), wiki_url, sec.strip().replace(' ', '_')) for sec in sec_list]
)
st.markdown(
'{0:02d} - **Article**: {1:<18} <br> _Section_: {2}'.format(i + 1, res[0], sections),
unsafe_allow_html=True,
)
if show_passages:
st.write(
'> <span style="font-family:arial; font-size:10pt;">' + res[-1] + '</span>', unsafe_allow_html=True
)
if action in [2, 3]:
__lowercase : str = find_nearest_training(question)
__lowercase : Optional[int] = nn_train_list[0]
st.markdown(
'--- \n ### The most similar question in the ELI5 training set was: \n\n {}'.format(train_exple['title'])
)
__lowercase : Any = [
'{}. {}'.format(i + 1, ' \n'.join([line.strip() for line in ans.split('\n') if line.strip() != '']))
for i, (ans, sc) in enumerate(zip(train_exple['answers']['text'], train_exple['answers']['score']))
if i == 0 or sc > 2
]
st.markdown('##### Its answers were: \n\n {}'.format('\n'.join(answers_st)))
__lowercase : List[Any] = '\n---\n\n**Disclaimer**\n\n*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.\nEvaluating biases of such a model and ensuring factual generations are still very much open research problems.\nTherefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*\n'
st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
| 27
| 1
|
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer
from transformers.testing_utils import require_tokenizers, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor
@require_tokenizers
@require_vision
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowerCAmelCase ( self : Any ):
'''simple docstring'''
_A = tempfile.mkdtemp()
# fmt: off
_A = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "want", "##want", "##ed", "wa", "un", "runn", "##ing", ",", "low", "lowest"]
# fmt: on
_A = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
_A = {
"do_resize": True,
"size": {"height": 18, "width": 18},
"do_normalize": True,
"image_mean": [0.5, 0.5, 0.5],
"image_std": [0.5, 0.5, 0.5],
}
_A = os.path.join(self.tmpdirname , __UpperCAmelCase )
with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp:
json.dump(__UpperCAmelCase , __UpperCAmelCase )
def lowerCAmelCase ( self : Any , **__UpperCAmelCase : int ):
'''simple docstring'''
return BertTokenizer.from_pretrained(self.tmpdirname , **__UpperCAmelCase )
def lowerCAmelCase ( self : int , **__UpperCAmelCase : Union[str, Any] ):
'''simple docstring'''
return ViTImageProcessor.from_pretrained(self.tmpdirname , **__UpperCAmelCase )
def lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def lowerCAmelCase ( self : Any ):
'''simple docstring'''
_A = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
_A = [Image.fromarray(np.moveaxis(__UpperCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
_A = self.get_tokenizer()
_A = self.get_image_processor()
_A = VisionTextDualEncoderProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
processor.save_pretrained(self.tmpdirname )
_A = VisionTextDualEncoderProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , __UpperCAmelCase )
def lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
_A = VisionTextDualEncoderProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_A = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
_A = self.get_image_processor(do_normalize=__UpperCAmelCase , padding_value=1.0 )
_A = VisionTextDualEncoderProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=__UpperCAmelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __UpperCAmelCase )
def lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
_A = self.get_image_processor()
_A = self.get_tokenizer()
_A = VisionTextDualEncoderProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
_A = self.prepare_image_inputs()
_A = image_processor(__UpperCAmelCase , return_tensors="np" )
_A = processor(images=__UpperCAmelCase , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
_A = self.get_image_processor()
_A = self.get_tokenizer()
_A = VisionTextDualEncoderProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
_A = "lower newer"
_A = processor(text=__UpperCAmelCase )
_A = tokenizer(__UpperCAmelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
_A = self.get_image_processor()
_A = self.get_tokenizer()
_A = VisionTextDualEncoderProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
_A = "lower newer"
_A = self.prepare_image_inputs()
_A = processor(text=__UpperCAmelCase , images=__UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "token_type_ids", "attention_mask", "pixel_values"] )
# test if it raises when no input is passed
with self.assertRaises(__UpperCAmelCase ):
processor()
def lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
_A = self.get_image_processor()
_A = self.get_tokenizer()
_A = VisionTextDualEncoderProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
_A = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_A = processor.batch_decode(__UpperCAmelCase )
_A = tokenizer.batch_decode(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
def lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
_A = self.get_image_processor()
_A = self.get_tokenizer()
_A = VisionTextDualEncoderProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
_A = "lower newer"
_A = self.prepare_image_inputs()
_A = processor(text=__UpperCAmelCase , images=__UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 174
|
'''simple docstring'''
import unittest
import numpy as np
import requests
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
lowerCamelCase_ = False
if is_vision_available():
from PIL import Image
from transformers import PixaStructImageProcessor
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Optional[int] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Optional[Any]=7 , __UpperCAmelCase : Dict=3 , __UpperCAmelCase : List[str]=18 , __UpperCAmelCase : Union[str, Any]=30 , __UpperCAmelCase : Union[str, Any]=400 , __UpperCAmelCase : List[Any]=None , __UpperCAmelCase : str=True , __UpperCAmelCase : List[str]=True , __UpperCAmelCase : Union[str, Any]=None , ):
'''simple docstring'''
_A = size if size is not None else {"height": 20, "width": 20}
_A = parent
_A = batch_size
_A = num_channels
_A = image_size
_A = min_resolution
_A = max_resolution
_A = size
_A = do_normalize
_A = do_convert_rgb
_A = [512, 1024, 2048, 4096]
_A = patch_size if patch_size is not None else {"height": 16, "width": 16}
def lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
return {"do_normalize": self.do_normalize, "do_convert_rgb": self.do_convert_rgb}
def lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
_A = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg"
_A = Image.open(requests.get(__UpperCAmelCase , stream=__UpperCAmelCase ).raw ).convert("RGB" )
return raw_image
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason='''`Pix2StructImageProcessor` requires `torch>=1.11.0`.''' , )
@require_torch
@require_vision
class _UpperCAmelCase ( snake_case_ , unittest.TestCase ):
"""simple docstring"""
snake_case = PixaStructImageProcessor if is_vision_available() else None
def lowerCAmelCase ( self : int ):
'''simple docstring'''
_A = PixaStructImageProcessingTester(self )
@property
def lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
_A = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__UpperCAmelCase , "do_normalize" ) )
self.assertTrue(hasattr(__UpperCAmelCase , "do_convert_rgb" ) )
def lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
_A = self.image_processor_tester.prepare_dummy_image()
_A = self.image_processing_class(**self.image_processor_dict )
_A = 2048
_A = image_processor(__UpperCAmelCase , return_tensors="pt" , max_patches=__UpperCAmelCase )
self.assertTrue(torch.allclose(inputs.flattened_patches.mean() , torch.tensor(0.0606 ) , atol=1E-3 , rtol=1E-3 ) )
def lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
_A = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_A = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCAmelCase , Image.Image )
# Test not batched input
_A = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
_A = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=__UpperCAmelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
_A = image_processor(
__UpperCAmelCase , return_tensors="pt" , max_patches=__UpperCAmelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
_A = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_A = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCAmelCase , Image.Image )
# Test not batched input
_A = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
_A = True
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
with self.assertRaises(__UpperCAmelCase ):
_A = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=__UpperCAmelCase ).flattened_patches
_A = "Hello"
_A = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=__UpperCAmelCase , header_text=__UpperCAmelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
_A = image_processor(
__UpperCAmelCase , return_tensors="pt" , max_patches=__UpperCAmelCase , header_text=__UpperCAmelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
_A = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_A = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCAmelCase , numpify=__UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCAmelCase , np.ndarray )
_A = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
_A = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=__UpperCAmelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
_A = image_processor(
__UpperCAmelCase , return_tensors="pt" , max_patches=__UpperCAmelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def lowerCAmelCase ( self : int ):
'''simple docstring'''
_A = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_A = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCAmelCase , torchify=__UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCAmelCase , torch.Tensor )
# Test not batched input
_A = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
_A = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=__UpperCAmelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
_A = image_processor(
__UpperCAmelCase , return_tensors="pt" , max_patches=__UpperCAmelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason='''`Pix2StructImageProcessor` requires `torch>=1.11.0`.''' , )
@require_torch
@require_vision
class _UpperCAmelCase ( snake_case_ , unittest.TestCase ):
"""simple docstring"""
snake_case = PixaStructImageProcessor if is_vision_available() else None
def lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
_A = PixaStructImageProcessingTester(self , num_channels=4 )
_A = 3
@property
def lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase ( self : int ):
'''simple docstring'''
_A = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__UpperCAmelCase , "do_normalize" ) )
self.assertTrue(hasattr(__UpperCAmelCase , "do_convert_rgb" ) )
def lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
_A = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_A = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCAmelCase , Image.Image )
# Test not batched input
_A = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* (self.image_processor_tester.num_channels - 1)
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
_A = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=__UpperCAmelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
_A = image_processor(
__UpperCAmelCase , return_tensors="pt" , max_patches=__UpperCAmelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
| 174
| 1
|
from ..utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_pt_objects import * # noqa F403
else:
from .scheduling_consistency_models import CMStochasticIterativeScheduler
from .scheduling_ddim import DDIMScheduler
from .scheduling_ddim_inverse import DDIMInverseScheduler
from .scheduling_ddim_parallel import DDIMParallelScheduler
from .scheduling_ddpm import DDPMScheduler
from .scheduling_ddpm_parallel import DDPMParallelScheduler
from .scheduling_deis_multistep import DEISMultistepScheduler
from .scheduling_dpmsolver_multistep import DPMSolverMultistepScheduler
from .scheduling_dpmsolver_multistep_inverse import DPMSolverMultistepInverseScheduler
from .scheduling_dpmsolver_singlestep import DPMSolverSinglestepScheduler
from .scheduling_euler_ancestral_discrete import EulerAncestralDiscreteScheduler
from .scheduling_euler_discrete import EulerDiscreteScheduler
from .scheduling_heun_discrete import HeunDiscreteScheduler
from .scheduling_ipndm import IPNDMScheduler
from .scheduling_k_dpm_2_ancestral_discrete import KDPMaAncestralDiscreteScheduler
from .scheduling_k_dpm_2_discrete import KDPMaDiscreteScheduler
from .scheduling_karras_ve import KarrasVeScheduler
from .scheduling_pndm import PNDMScheduler
from .scheduling_repaint import RePaintScheduler
from .scheduling_sde_ve import ScoreSdeVeScheduler
from .scheduling_sde_vp import ScoreSdeVpScheduler
from .scheduling_unclip import UnCLIPScheduler
from .scheduling_unipc_multistep import UniPCMultistepScheduler
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin
from .scheduling_vq_diffusion import VQDiffusionScheduler
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_flax_objects import * # noqa F403
else:
from .scheduling_ddim_flax import FlaxDDIMScheduler
from .scheduling_ddpm_flax import FlaxDDPMScheduler
from .scheduling_dpmsolver_multistep_flax import FlaxDPMSolverMultistepScheduler
from .scheduling_karras_ve_flax import FlaxKarrasVeScheduler
from .scheduling_lms_discrete_flax import FlaxLMSDiscreteScheduler
from .scheduling_pndm_flax import FlaxPNDMScheduler
from .scheduling_sde_ve_flax import FlaxScoreSdeVeScheduler
from .scheduling_utils_flax import (
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
broadcast_to_shape_from_left,
)
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .scheduling_lms_discrete import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .scheduling_dpmsolver_sde import DPMSolverSDEScheduler
| 343
|
'''simple docstring'''
from . import (
albert,
align,
altclip,
audio_spectrogram_transformer,
auto,
autoformer,
bark,
bart,
barthez,
bartpho,
beit,
bert,
bert_generation,
bert_japanese,
bertweet,
big_bird,
bigbird_pegasus,
biogpt,
bit,
blenderbot,
blenderbot_small,
blip,
blip_a,
bloom,
bridgetower,
byta,
camembert,
canine,
chinese_clip,
clap,
clip,
clipseg,
codegen,
conditional_detr,
convbert,
convnext,
convnextva,
cpm,
cpmant,
ctrl,
cvt,
dataavec,
deberta,
deberta_va,
decision_transformer,
deformable_detr,
deit,
deprecated,
deta,
detr,
dialogpt,
dinat,
distilbert,
dit,
donut,
dpr,
dpt,
efficientformer,
efficientnet,
electra,
encodec,
encoder_decoder,
ernie,
ernie_m,
esm,
falcon,
flaubert,
flava,
fnet,
focalnet,
fsmt,
funnel,
git,
glpn,
gpta,
gpt_bigcode,
gpt_neo,
gpt_neox,
gpt_neox_japanese,
gpt_swa,
gptj,
gptsan_japanese,
graphormer,
groupvit,
herbert,
hubert,
ibert,
imagegpt,
informer,
instructblip,
jukebox,
layoutlm,
layoutlmva,
layoutlmva,
layoutxlm,
led,
levit,
lilt,
llama,
longformer,
longta,
luke,
lxmert,
mam_aaa,
marian,
markuplm,
maskaformer,
maskformer,
mbart,
mbartaa,
mega,
megatron_bert,
megatron_gpta,
mgp_str,
mluke,
mobilebert,
mobilenet_va,
mobilenet_va,
mobilevit,
mobilevitva,
mpnet,
mra,
mta,
musicgen,
mvp,
nat,
nezha,
nllb,
nllb_moe,
nystromformer,
oneformer,
open_llama,
openai,
opt,
owlvit,
pegasus,
pegasus_x,
perceiver,
phobert,
pixastruct,
plbart,
poolformer,
prophetnet,
qdqbert,
rag,
realm,
reformer,
regnet,
rembert,
resnet,
roberta,
roberta_prelayernorm,
roc_bert,
roformer,
rwkv,
sam,
segformer,
sew,
sew_d,
speech_encoder_decoder,
speech_to_text,
speech_to_text_a,
speechta,
splinter,
squeezebert,
swiftformer,
swin,
swinasr,
swinva,
switch_transformers,
ta,
table_transformer,
tapas,
time_series_transformer,
timesformer,
timm_backbone,
transfo_xl,
trocr,
tvlt,
umta,
unispeech,
unispeech_sat,
upernet,
videomae,
vilt,
vision_encoder_decoder,
vision_text_dual_encoder,
visual_bert,
vit,
vit_hybrid,
vit_mae,
vit_msn,
vivit,
wavaveca,
wavaveca_conformer,
wavaveca_phoneme,
wavaveca_with_lm,
wavlm,
whisper,
x_clip,
xglm,
xlm,
xlm_prophetnet,
xlm_roberta,
xlm_roberta_xl,
xlnet,
xmod,
yolos,
yoso,
)
| 67
| 0
|
'''simple docstring'''
import unittest
import numpy as np
from transformers import RobertaPreLayerNormConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roberta_prelayernorm.modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
)
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Union[str, Any] , _lowerCAmelCase : Any , _lowerCAmelCase : str=1_3 , _lowerCAmelCase : Optional[int]=7 , _lowerCAmelCase : List[str]=True , _lowerCAmelCase : Any=True , _lowerCAmelCase : Optional[int]=True , _lowerCAmelCase : Tuple=True , _lowerCAmelCase : str=9_9 , _lowerCAmelCase : int=3_2 , _lowerCAmelCase : Union[str, Any]=5 , _lowerCAmelCase : Optional[int]=4 , _lowerCAmelCase : Union[str, Any]=3_7 , _lowerCAmelCase : Any="gelu" , _lowerCAmelCase : Any=0.1 , _lowerCAmelCase : Any=0.1 , _lowerCAmelCase : int=5_1_2 , _lowerCAmelCase : Optional[int]=1_6 , _lowerCAmelCase : Tuple=2 , _lowerCAmelCase : Optional[int]=0.02 , _lowerCAmelCase : Tuple=4 , ):
'''simple docstring'''
__lowercase =parent
__lowercase =batch_size
__lowercase =seq_length
__lowercase =is_training
__lowercase =use_attention_mask
__lowercase =use_token_type_ids
__lowercase =use_labels
__lowercase =vocab_size
__lowercase =hidden_size
__lowercase =num_hidden_layers
__lowercase =num_attention_heads
__lowercase =intermediate_size
__lowercase =hidden_act
__lowercase =hidden_dropout_prob
__lowercase =attention_probs_dropout_prob
__lowercase =max_position_embeddings
__lowercase =type_vocab_size
__lowercase =type_sequence_label_size
__lowercase =initializer_range
__lowercase =num_choices
def __lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
__lowercase =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
__lowercase =None
if self.use_attention_mask:
__lowercase =random_attention_mask([self.batch_size, self.seq_length])
__lowercase =None
if self.use_token_type_ids:
__lowercase =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
__lowercase =RobertaPreLayerNormConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_lowerCAmelCase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def __lowerCamelCase ( self : Any):
'''simple docstring'''
__lowercase =self.prepare_config_and_inputs()
__lowercase , __lowercase , __lowercase , __lowercase =config_and_inputs
__lowercase ={'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask}
return config, inputs_dict
def __lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
__lowercase =self.prepare_config_and_inputs()
__lowercase , __lowercase , __lowercase , __lowercase =config_and_inputs
__lowercase =True
__lowercase =floats_tensor([self.batch_size, self.seq_length, self.hidden_size])
__lowercase =ids_tensor([self.batch_size, self.seq_length] , vocab_size=2)
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
# Copied from tests.models.roberta.test_modelling_flax_roberta.FlaxRobertaPreLayerNormModelTest with ROBERTA->ROBERTA_PRELAYERNORM,Roberta->RobertaPreLayerNorm,roberta-base->andreasmadsen/efficient_mlm_m0.40
class _UpperCamelCase ( A , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = True
lowerCAmelCase__ = (
(
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
)
if is_flax_available()
else ()
)
def __lowerCamelCase ( self : List[Any]):
'''simple docstring'''
__lowercase =FlaxRobertaPreLayerNormModelTester(self)
@slow
def __lowerCamelCase ( self : int):
'''simple docstring'''
for model_class_name in self.all_model_classes:
__lowercase =model_class_name.from_pretrained('andreasmadsen/efficient_mlm_m0.40' , from_pt=_lowerCAmelCase)
__lowercase =model(np.ones((1, 1)))
self.assertIsNotNone(_lowerCAmelCase)
@require_flax
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def __lowerCamelCase ( self : str):
'''simple docstring'''
__lowercase =FlaxRobertaPreLayerNormForMaskedLM.from_pretrained('andreasmadsen/efficient_mlm_m0.40' , from_pt=_lowerCAmelCase)
__lowercase =np.array([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] , dtype=jnp.intaa)
__lowercase =model(_lowerCAmelCase)[0]
__lowercase =[1, 1_1, 5_0_2_6_5]
self.assertEqual(list(output.shape) , _lowerCAmelCase)
# compare the actual values for a slice.
__lowercase =np.array(
[[[40.4880, 18.0199, -5.2367], [-1.8877, -4.0885, 10.7085], [-2.2613, -5.6110, 7.2665]]] , dtype=np.floataa)
self.assertTrue(np.allclose(output[:, :3, :3] , _lowerCAmelCase , atol=1e-4))
@slow
def __lowerCamelCase ( self : str):
'''simple docstring'''
__lowercase =FlaxRobertaPreLayerNormModel.from_pretrained('andreasmadsen/efficient_mlm_m0.40' , from_pt=_lowerCAmelCase)
__lowercase =np.array([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] , dtype=jnp.intaa)
__lowercase =model(_lowerCAmelCase)[0]
# compare the actual values for a slice.
__lowercase =np.array(
[[[0.0208, -0.0356, 0.0237], [-0.1569, -0.0411, -0.2626], [0.1879, 0.0125, -0.0089]]] , dtype=np.floataa)
self.assertTrue(np.allclose(output[:, :3, :3] , _lowerCAmelCase , atol=1e-4))
| 48
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowerCamelCase = logging.get_logger(__name__)
lowerCamelCase = {
"""microsoft/resnet-50""": """https://huggingface.co/microsoft/resnet-50/blob/main/config.json""",
}
class _UpperCamelCase ( A , A ):
'''simple docstring'''
lowerCAmelCase__ = """resnet"""
lowerCAmelCase__ = ["""basic""", """bottleneck"""]
def __init__( self : Any , _lowerCAmelCase : List[str]=3 , _lowerCAmelCase : Optional[int]=6_4 , _lowerCAmelCase : str=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8] , _lowerCAmelCase : Any=[3, 4, 6, 3] , _lowerCAmelCase : List[Any]="bottleneck" , _lowerCAmelCase : List[str]="relu" , _lowerCAmelCase : int=False , _lowerCAmelCase : int=None , _lowerCAmelCase : Any=None , **_lowerCAmelCase : Any , ):
'''simple docstring'''
super().__init__(**_lowerCAmelCase)
if layer_type not in self.layer_types:
raise ValueError(f"""layer_type={layer_type} is not one of {','.join(self.layer_types)}""")
__lowercase =num_channels
__lowercase =embedding_size
__lowercase =hidden_sizes
__lowercase =depths
__lowercase =layer_type
__lowercase =hidden_act
__lowercase =downsample_in_first_stage
__lowercase =['stem'] + [f"""stage{idx}""" for idx in range(1 , len(_lowerCAmelCase) + 1)]
__lowercase , __lowercase =get_aligned_output_features_output_indices(
out_features=_lowerCAmelCase , out_indices=_lowerCAmelCase , stage_names=self.stage_names)
class _UpperCamelCase ( A ):
'''simple docstring'''
lowerCAmelCase__ = version.parse("""1.11""" )
@property
def __lowerCamelCase ( self : List[Any]):
'''simple docstring'''
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
])
@property
def __lowerCamelCase ( self : Tuple):
'''simple docstring'''
return 1e-3
| 48
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__lowerCamelCase : Dict = {'''configuration_vit_mae''': ['''VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ViTMAEConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Any = [
'''VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ViTMAEForPreTraining''',
'''ViTMAELayer''',
'''ViTMAEModel''',
'''ViTMAEPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Optional[Any] = [
'''TFViTMAEForPreTraining''',
'''TFViTMAEModel''',
'''TFViTMAEPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_vit_mae import VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMAEConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_mae import (
VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMAEForPreTraining,
ViTMAELayer,
ViTMAEModel,
ViTMAEPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit_mae import TFViTMAEForPreTraining, TFViTMAEModel, TFViTMAEPreTrainedModel
else:
import sys
__lowerCamelCase : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 18
|
'''simple docstring'''
import argparse
import logging
import os
from datetime import datetime
import numpy as np
import torch
from torch import nn
from torch.utils.data import DataLoader, RandomSampler, TensorDataset
from tqdm import tqdm
from transformers import GPTaLMHeadModel
lowerCamelCase_ = logging.getLogger(__name__)
def __lowercase ( __lowercase , __lowercase ) -> Optional[int]:
'''simple docstring'''
if os.path.exists(__lowercase ):
if os.path.exists(os.path.join(__lowercase , "config.json" ) ) and os.path.isfile(
os.path.join(__lowercase , "config.json" ) ):
os.remove(os.path.join(__lowercase , "config.json" ) )
if os.path.exists(os.path.join(__lowercase , "pytorch_model.bin" ) ) and os.path.isfile(
os.path.join(__lowercase , "pytorch_model.bin" ) ):
os.remove(os.path.join(__lowercase , "pytorch_model.bin" ) )
else:
os.makedirs(__lowercase )
model.save_pretrained(__lowercase )
def __lowercase ( __lowercase , __lowercase=False ) -> Optional[int]:
'''simple docstring'''
_A = 2
if unlogit:
_A = torch.pow(__lowercase , __lowercase )
_A = p * torch.log(__lowercase )
_A = 0
return -plogp.sum(dim=-1 )
def __lowercase ( __lowercase ) -> Optional[Any]:
'''simple docstring'''
logger.info("lv, h >\t" + "\t".join(F'''{x + 1}''' for x in range(len(__lowercase ) ) ) )
for row in range(len(__lowercase ) ):
if tensor.dtype != torch.long:
logger.info(F'''layer {row + 1}:\t''' + "\t".join(F'''{x:.5f}''' for x in tensor[row].cpu().data ) )
else:
logger.info(F'''layer {row + 1}:\t''' + "\t".join(F'''{x:d}''' for x in tensor[row].cpu().data ) )
def __lowercase ( __lowercase , __lowercase , __lowercase , __lowercase=True , __lowercase=True , __lowercase=None , __lowercase=False ) -> int:
'''simple docstring'''
_A , _A = model.config.num_hidden_layers, model.config.num_attention_heads
_A = torch.zeros(__lowercase , __lowercase ).to(args.device )
_A = torch.zeros(__lowercase , __lowercase ).to(args.device )
if head_mask is None:
_A = torch.ones(__lowercase , __lowercase ).to(args.device )
head_mask.requires_grad_(requires_grad=__lowercase )
# If actually pruned attention multi-head, set head mask to None to avoid shape mismatch
if actually_pruned:
_A = None
_A = 0.0
_A = 0.0
for step, inputs in enumerate(tqdm(__lowercase , desc="Iteration" , disable=args.local_rank not in [-1, 0] ) ):
_A = tuple(t.to(args.device ) for t in inputs )
((_A) , ) = inputs
# Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below)
_A = model(__lowercase , labels=__lowercase , head_mask=__lowercase )
# (loss), lm_logits, presents, (all hidden_states), (attentions)
_A , _A , _A = (
outputs[0],
outputs[1],
outputs[-1],
) # Loss and logits are the first, attention the last
loss.backward() # Backpropagate to populate the gradients in the head mask
total_loss += loss.detach().cpu().numpy()
if compute_entropy:
for layer, attn in enumerate(__lowercase ):
_A = entropy(attn.detach() , __lowercase )
attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach()
if compute_importance:
head_importance += head_mask.grad.abs().detach()
tot_tokens += torch.ones_like(__lowercase ).float().detach().sum().data
# Normalize
attn_entropy /= tot_tokens
head_importance /= tot_tokens
# Layerwise importance normalization
if not args.dont_normalize_importance_by_layer:
_A = 2
_A = torch.pow(torch.pow(__lowercase , __lowercase ).sum(-1 ) , 1 / exponent )
head_importance /= norm_by_layer.unsqueeze(-1 ) + 1e-20
if not args.dont_normalize_global_importance:
_A = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min())
# Print matrices
if compute_entropy:
logger.info("Attention entropies" )
print_ad_tensor(__lowercase )
if compute_importance:
logger.info("Head importance scores" )
print_ad_tensor(__lowercase )
logger.info("Head ranked by importance scores" )
_A = torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device )
_A = torch.arange(
head_importance.numel() , device=args.device )
_A = head_ranks.view_as(__lowercase )
print_ad_tensor(__lowercase )
return attn_entropy, head_importance, total_loss
def __lowercase ( __lowercase , __lowercase , __lowercase ) -> List[str]:
'''simple docstring'''
_A , _A , _A = compute_heads_importance(__lowercase , __lowercase , __lowercase , compute_entropy=__lowercase )
_A = 1 / loss # instead of downsteam score use the LM loss
logger.info("Pruning: original score: %f, threshold: %f" , __lowercase , original_score * args.masking_threshold )
_A = torch.ones_like(__lowercase )
_A = max(1 , int(new_head_mask.numel() * args.masking_amount ) )
_A = original_score
while current_score >= original_score * args.masking_threshold:
_A = new_head_mask.clone().detach() # save current head mask
# heads from least important to most - keep only not-masked heads
_A = float("Inf" )
_A = head_importance.view(-1 ).sort()[1]
if len(__lowercase ) <= num_to_mask:
print("BREAK BY num_to_mask" )
break
# mask heads
_A = current_heads_to_mask[:num_to_mask]
logger.info("Heads to mask: %s" , str(current_heads_to_mask.tolist() ) )
_A = new_head_mask.view(-1 )
_A = 0.0
_A = new_head_mask.view_as(__lowercase )
_A = new_head_mask.clone().detach()
print_ad_tensor(__lowercase )
# Compute metric and head importance again
_A , _A , _A = compute_heads_importance(
__lowercase , __lowercase , __lowercase , compute_entropy=__lowercase , head_mask=__lowercase )
_A = 1 / loss
logger.info(
"Masking: current score: %f, remaining heads %d (%.1f percents)" , __lowercase , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 100 , )
logger.info("Final head mask" )
print_ad_tensor(__lowercase )
np.save(os.path.join(args.output_dir , "head_mask.npy" ) , head_mask.detach().cpu().numpy() )
return head_mask
def __lowercase ( __lowercase , __lowercase , __lowercase , __lowercase ) -> List[str]:
'''simple docstring'''
_A = datetime.now()
_A , _A , _A = compute_heads_importance(
__lowercase , __lowercase , __lowercase , compute_entropy=__lowercase , compute_importance=__lowercase , head_mask=__lowercase )
_A = 1 / loss
_A = datetime.now() - before_time
_A = sum(p.numel() for p in model.parameters() )
_A = {
layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(__lowercase ) )
}
for k, v in heads_to_prune.items():
if isinstance(__lowercase , __lowercase ):
_A = [
v,
]
assert sum(len(__lowercase ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item()
model.prune_heads(__lowercase )
_A = sum(p.numel() for p in model.parameters() )
_A = datetime.now()
_A , _A , _A = compute_heads_importance(
__lowercase , __lowercase , __lowercase , compute_entropy=__lowercase , compute_importance=__lowercase , head_mask=__lowercase , actually_pruned=__lowercase , )
_A = 1 / loss
_A = datetime.now() - before_time
logger.info(
"Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)" , __lowercase , __lowercase , pruned_num_params / original_num_params * 100 , )
logger.info("Pruning: score with masking: %f score with pruning: %f" , __lowercase , __lowercase )
logger.info("Pruning: speed ratio (original timing / new timing): %f percents" , original_time / new_time * 100 )
save_model(__lowercase , args.output_dir )
def __lowercase ( ) -> Union[str, Any]:
'''simple docstring'''
_A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--data_dir" , default=__lowercase , type=__lowercase , required=__lowercase , help="The input data dir. Should contain the .tsv files (or other data files) for the task." , )
parser.add_argument(
"--model_name_or_path" , default=__lowercase , type=__lowercase , required=__lowercase , help="Path to pretrained model or model identifier from huggingface.co/models" , )
parser.add_argument(
"--output_dir" , default=__lowercase , type=__lowercase , required=__lowercase , help="The output directory where the model predictions and checkpoints will be written." , )
# Other parameters
parser.add_argument(
"--config_name" , default="" , type=__lowercase , help="Pretrained config name or path if not the same as model_name_or_path" , )
parser.add_argument(
"--tokenizer_name" , default="" , type=__lowercase , help="Pretrained tokenizer name or path if not the same as model_name_or_path" , )
parser.add_argument(
"--cache_dir" , default=__lowercase , type=__lowercase , help="Where do you want to store the pre-trained models downloaded from s3" , )
parser.add_argument(
"--data_subset" , type=__lowercase , default=-1 , help="If > 0: limit the data to a subset of data_subset instances." )
parser.add_argument(
"--overwrite_output_dir" , action="store_true" , help="Whether to overwrite data in output directory" )
parser.add_argument(
"--overwrite_cache" , action="store_true" , help="Overwrite the cached training and evaluation sets" )
parser.add_argument(
"--dont_normalize_importance_by_layer" , action="store_true" , help="Don't normalize importance score by layers" )
parser.add_argument(
"--dont_normalize_global_importance" , action="store_true" , help="Don't normalize all importance scores between 0 and 1" , )
parser.add_argument(
"--try_masking" , action="store_true" , help="Whether to try to mask head until a threshold of accuracy." )
parser.add_argument(
"--masking_threshold" , default=0.9 , type=__lowercase , help="masking threshold in term of metrics (stop masking when metric < threshold * original metric value)." , )
parser.add_argument(
"--masking_amount" , default=0.1 , type=__lowercase , help="Amount to heads to masking at each masking step." )
parser.add_argument("--metric_name" , default="acc" , type=__lowercase , help="Metric to use for head masking." )
parser.add_argument(
"--max_seq_length" , default=128 , type=__lowercase , help=(
"The maximum total input sequence length after WordPiece tokenization. \n"
"Sequences longer than this will be truncated, sequences shorter padded."
) , )
parser.add_argument("--batch_size" , default=1 , type=__lowercase , help="Batch size." )
parser.add_argument("--seed" , type=__lowercase , default=42 )
parser.add_argument("--local_rank" , type=__lowercase , default=-1 , help="local_rank for distributed training on gpus" )
parser.add_argument("--no_cuda" , action="store_true" , help="Whether not to use CUDA when available" )
parser.add_argument("--server_ip" , type=__lowercase , default="" , help="Can be used for distant debugging." )
parser.add_argument("--server_port" , type=__lowercase , default="" , help="Can be used for distant debugging." )
_A = parser.parse_args()
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("Waiting for debugger attach" )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=__lowercase )
ptvsd.wait_for_attach()
# Setup devices and distributed training
if args.local_rank == -1 or args.no_cuda:
_A = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu" )
_A = 0 if args.no_cuda else torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank )
_A = torch.device("cuda" , args.local_rank )
_A = 1
torch.distributed.init_process_group(backend="nccl" ) # Initializes the distributed backend
# Setup logging
logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN )
logger.info("device: {} n_gpu: {}, distributed: {}".format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) )
_A = GPTaLMHeadModel.from_pretrained(args.model_name_or_path )
# Distributed and parallel training
model.to(args.device )
if args.local_rank != -1:
_A = nn.parallel.DistributedDataParallel(
__lowercase , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=__lowercase )
elif args.n_gpu > 1:
_A = nn.DataParallel(__lowercase )
# Print/save training arguments
os.makedirs(args.output_dir , exist_ok=__lowercase )
torch.save(__lowercase , os.path.join(args.output_dir , "run_args.bin" ) )
logger.info("Training/evaluation parameters %s" , __lowercase )
# Prepare dataset
_A = np.concatenate(
[
np.loadtxt(args.data_dir , dtype=np.intaa ),
] )
_A = (torch.from_numpy(__lowercase ),)
_A = TensorDataset(*__lowercase )
_A = RandomSampler(__lowercase )
_A = DataLoader(__lowercase , sampler=__lowercase , batch_size=args.batch_size )
# Compute head entropy and importance score
compute_heads_importance(__lowercase , __lowercase , __lowercase )
# Try head masking (set heads to zero until the score goes under a threshole)
# and head pruning (remove masked heads and see the effect on the network)
if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0:
_A = mask_heads(__lowercase , __lowercase , __lowercase )
prune_heads(__lowercase , __lowercase , __lowercase , __lowercase )
if __name__ == "__main__":
main()
| 79
| 0
|
from __future__ import annotations
def __UpperCamelCase ( _A : int , _A : int ) ->list[str]:
"""simple docstring"""
if partitions <= 0:
raise ValueError("""partitions must be a positive number!""" )
if partitions > number_of_bytes:
raise ValueError("""partitions can not > number_of_bytes!""" )
lowerCamelCase_ =number_of_bytes // partitions
lowerCamelCase_ =[]
for i in range(snake_case__ ):
lowerCamelCase_ =i * bytes_per_partition + 1
lowerCamelCase_ =(
number_of_bytes if i == partitions - 1 else (i + 1) * bytes_per_partition
)
allocation_list.append(f'{start_bytes}-{end_bytes}' )
return allocation_list
if __name__ == "__main__":
import doctest
doctest.testmod()
| 364
|
import os
import tempfile
import unittest
from transformers import NezhaConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
)
from transformers.models.nezha.modeling_nezha import NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST
class _SCREAMING_SNAKE_CASE :
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=13 , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=99 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=5 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=37 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=128 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=0.0_2 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=None , )-> str:
lowerCamelCase_ =parent
lowerCamelCase_ =batch_size
lowerCamelCase_ =seq_length
lowerCamelCase_ =is_training
lowerCamelCase_ =use_input_mask
lowerCamelCase_ =use_token_type_ids
lowerCamelCase_ =use_labels
lowerCamelCase_ =vocab_size
lowerCamelCase_ =hidden_size
lowerCamelCase_ =num_hidden_layers
lowerCamelCase_ =num_attention_heads
lowerCamelCase_ =intermediate_size
lowerCamelCase_ =hidden_act
lowerCamelCase_ =hidden_dropout_prob
lowerCamelCase_ =attention_probs_dropout_prob
lowerCamelCase_ =max_position_embeddings
lowerCamelCase_ =type_vocab_size
lowerCamelCase_ =type_sequence_label_size
lowerCamelCase_ =initializer_range
lowerCamelCase_ =num_labels
lowerCamelCase_ =num_choices
lowerCamelCase_ =scope
def _snake_case ( self )-> int:
lowerCamelCase_ =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase_ =None
if self.use_input_mask:
lowerCamelCase_ =random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase_ =None
if self.use_token_type_ids:
lowerCamelCase_ =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCamelCase_ =None
lowerCamelCase_ =None
lowerCamelCase_ =None
if self.use_labels:
lowerCamelCase_ =ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase_ =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase_ =ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase_ =self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _snake_case ( self )-> Dict:
return NezhaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , )
def _snake_case ( self )-> Tuple:
(
(
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) ,
) =self.prepare_config_and_inputs()
lowerCamelCase_ =True
lowerCamelCase_ =floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
lowerCamelCase_ =ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )-> Optional[int]:
lowerCamelCase_ =NezhaModel(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
lowerCamelCase_ =model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =model(_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , )-> Tuple:
lowerCamelCase_ =True
lowerCamelCase_ =NezhaModel(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
lowerCamelCase_ =model(
_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , encoder_hidden_states=_SCREAMING_SNAKE_CASE , encoder_attention_mask=_SCREAMING_SNAKE_CASE , )
lowerCamelCase_ =model(
_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , encoder_hidden_states=_SCREAMING_SNAKE_CASE , )
lowerCamelCase_ =model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )-> List[str]:
lowerCamelCase_ =NezhaForMaskedLM(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
lowerCamelCase_ =model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )-> Tuple:
lowerCamelCase_ =NezhaForNextSentencePrediction(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
lowerCamelCase_ =model(
_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )-> int:
lowerCamelCase_ =NezhaForPreTraining(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
lowerCamelCase_ =model(
_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE , next_sentence_label=_SCREAMING_SNAKE_CASE , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )-> Dict:
lowerCamelCase_ =NezhaForQuestionAnswering(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
lowerCamelCase_ =model(
_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , start_positions=_SCREAMING_SNAKE_CASE , end_positions=_SCREAMING_SNAKE_CASE , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )-> int:
lowerCamelCase_ =self.num_labels
lowerCamelCase_ =NezhaForSequenceClassification(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
lowerCamelCase_ =model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )-> Dict:
lowerCamelCase_ =self.num_labels
lowerCamelCase_ =NezhaForTokenClassification(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
lowerCamelCase_ =model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )-> Tuple:
lowerCamelCase_ =self.num_choices
lowerCamelCase_ =NezhaForMultipleChoice(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
lowerCamelCase_ =input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCamelCase_ =token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCamelCase_ =input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCamelCase_ =model(
_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _snake_case ( self )-> List[str]:
lowerCamelCase_ =self.prepare_config_and_inputs()
(
(
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) ,
) =config_and_inputs
lowerCamelCase_ ={"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class _SCREAMING_SNAKE_CASE ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase):
_UpperCamelCase:Optional[int] = (
(
NezhaModel,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
)
if is_torch_available()
else ()
)
_UpperCamelCase:int = (
{
"feature-extraction": NezhaModel,
"fill-mask": NezhaForMaskedLM,
"question-answering": NezhaForQuestionAnswering,
"text-classification": NezhaForSequenceClassification,
"token-classification": NezhaForTokenClassification,
"zero-shot": NezhaForSequenceClassification,
}
if is_torch_available()
else {}
)
_UpperCamelCase:Tuple = True
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False )-> Optional[Any]:
lowerCamelCase_ =super()._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , return_labels=_SCREAMING_SNAKE_CASE )
if return_labels:
if model_class in get_values(_SCREAMING_SNAKE_CASE ):
lowerCamelCase_ =torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_SCREAMING_SNAKE_CASE )
return inputs_dict
def _snake_case ( self )-> Dict:
lowerCamelCase_ =NezhaModelTester(self )
lowerCamelCase_ =ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , hidden_size=37 )
def _snake_case ( self )-> List[str]:
self.config_tester.run_common_tests()
def _snake_case ( self )-> str:
lowerCamelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_SCREAMING_SNAKE_CASE )
def _snake_case ( self )-> Union[str, Any]:
lowerCamelCase_ =self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*_SCREAMING_SNAKE_CASE )
def _snake_case ( self )-> List[Any]:
# This regression test was failing with PyTorch < 1.3
(
(
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) ,
) =self.model_tester.prepare_config_and_inputs_for_decoder()
lowerCamelCase_ =None
self.model_tester.create_and_check_model_as_decoder(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , )
def _snake_case ( self )-> Dict:
lowerCamelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_SCREAMING_SNAKE_CASE )
def _snake_case ( self )-> List[Any]:
lowerCamelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_SCREAMING_SNAKE_CASE )
def _snake_case ( self )-> List[Any]:
lowerCamelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_next_sequence_prediction(*_SCREAMING_SNAKE_CASE )
def _snake_case ( self )-> Optional[Any]:
lowerCamelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*_SCREAMING_SNAKE_CASE )
def _snake_case ( self )-> Union[str, Any]:
lowerCamelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_SCREAMING_SNAKE_CASE )
def _snake_case ( self )-> Tuple:
lowerCamelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_SCREAMING_SNAKE_CASE )
def _snake_case ( self )-> Any:
lowerCamelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_SCREAMING_SNAKE_CASE )
@slow
def _snake_case ( self )-> Union[str, Any]:
for model_name in NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ =NezhaModel.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
@slow
@require_torch_gpu
def _snake_case ( self )-> Any:
lowerCamelCase_ , lowerCamelCase_ =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# NezhaForMultipleChoice behaves incorrectly in JIT environments.
if model_class == NezhaForMultipleChoice:
return
lowerCamelCase_ =True
lowerCamelCase_ =model_class(config=_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowerCamelCase_ =torch.jit.trace(
_SCREAMING_SNAKE_CASE , (inputs_dict["""input_ids"""].to("""cpu""" ), inputs_dict["""attention_mask"""].to("""cpu""" )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(_SCREAMING_SNAKE_CASE , os.path.join(_SCREAMING_SNAKE_CASE , """bert.pt""" ) )
lowerCamelCase_ =torch.jit.load(os.path.join(_SCREAMING_SNAKE_CASE , """bert.pt""" ) , map_location=_SCREAMING_SNAKE_CASE )
loaded(inputs_dict["""input_ids"""].to(_SCREAMING_SNAKE_CASE ) , inputs_dict["""attention_mask"""].to(_SCREAMING_SNAKE_CASE ) )
@require_torch
class _SCREAMING_SNAKE_CASE ( unittest.TestCase):
@slow
def _snake_case ( self )-> Dict:
lowerCamelCase_ =NezhaModel.from_pretrained("""sijunhe/nezha-cn-base""" )
lowerCamelCase_ =torch.tensor([[0, 1, 2, 3, 4, 5]] )
lowerCamelCase_ =torch.tensor([[0, 1, 1, 1, 1, 1]] )
with torch.no_grad():
lowerCamelCase_ =model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE )[0]
lowerCamelCase_ =torch.Size((1, 6, 768) )
self.assertEqual(output.shape , _SCREAMING_SNAKE_CASE )
lowerCamelCase_ =torch.tensor([[[0.0_6_8_5, 0.2_4_4_1, 0.1_1_0_2], [0.0_6_0_0, 0.1_9_0_6, 0.1_3_4_9], [0.0_2_2_1, 0.0_8_1_9, 0.0_5_8_6]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , _SCREAMING_SNAKE_CASE , atol=1E-4 ) )
@slow
def _snake_case ( self )-> Optional[Any]:
lowerCamelCase_ =NezhaForMaskedLM.from_pretrained("""sijunhe/nezha-cn-base""" )
lowerCamelCase_ =torch.tensor([[0, 1, 2, 3, 4, 5]] )
lowerCamelCase_ =torch.tensor([[1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
lowerCamelCase_ =model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE )[0]
lowerCamelCase_ =torch.Size((1, 6, 2_1128) )
self.assertEqual(output.shape , _SCREAMING_SNAKE_CASE )
lowerCamelCase_ =torch.tensor(
[[-2.7_9_3_9, -1.7_9_0_2, -2.2_1_8_9], [-2.8_5_8_5, -1.8_9_0_8, -2.3_7_2_3], [-2.6_4_9_9, -1.7_7_5_0, -2.2_5_5_8]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , _SCREAMING_SNAKE_CASE , atol=1E-4 ) )
| 49
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import _LazyModule
_lowercase : Dict = {"processing_wav2vec2_with_lm": ["Wav2Vec2ProcessorWithLM"]}
if TYPE_CHECKING:
from .processing_wavaveca_with_lm import WavaVecaProcessorWithLM
else:
import sys
_lowercase : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 239
|
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
from typing import List
from unittest.mock import Mock
import torch
from torch.utils.data import DataLoader, IterableDataset, TensorDataset
from accelerate.accelerator import Accelerator
from accelerate.utils.dataclasses import DistributedType
class __magic_name__ ( _UpperCAmelCase):
def __init__( self : Optional[Any] , lowercase_ : str ):
lowercase_ : int = data
def __iter__( self : int ):
for element in self.data:
yield element
def lowerCamelCase ( UpperCAmelCase__ : Any=True ) -> Any:
lowercase_ : Optional[int] = Accelerator(even_batches=UpperCAmelCase__ )
assert accelerator.num_processes == 2, "this script expects that two GPUs are available"
return accelerator
def lowerCamelCase ( UpperCAmelCase__ : Accelerator , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : bool = False ) -> Optional[Any]:
if iterable:
lowercase_ : Dict = DummyIterableDataset(torch.as_tensor(range(UpperCAmelCase__ ) ) )
else:
lowercase_ : Union[str, Any] = TensorDataset(torch.as_tensor(range(UpperCAmelCase__ ) ) )
lowercase_ : Any = DataLoader(UpperCAmelCase__ , batch_size=UpperCAmelCase__ )
lowercase_ : Any = accelerator.prepare(UpperCAmelCase__ )
return dl
def lowerCamelCase ( UpperCAmelCase__ : Accelerator , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : List[int] , ) -> int:
lowercase_ : List[str] = create_dataloader(accelerator=UpperCAmelCase__ , dataset_size=UpperCAmelCase__ , batch_size=UpperCAmelCase__ )
lowercase_ : Tuple = [len(batch[0] ) for batch in dl]
if accelerator.process_index == 0:
assert batch_sizes == process_0_expected_batch_sizes
elif accelerator.process_index == 1:
assert batch_sizes == process_1_expected_batch_sizes
def lowerCamelCase ( ) -> int:
lowercase_ : Tuple = create_accelerator()
# without padding, we would expect a different number of batches
verify_dataloader_batch_sizes(
UpperCAmelCase__ , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1, 1] , )
# without padding, we would expect the same number of batches, but different sizes
verify_dataloader_batch_sizes(
UpperCAmelCase__ , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 2] , )
def lowerCamelCase ( ) -> Optional[int]:
lowercase_ : Optional[int] = create_accelerator(even_batches=UpperCAmelCase__ )
verify_dataloader_batch_sizes(
UpperCAmelCase__ , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1] , )
verify_dataloader_batch_sizes(
UpperCAmelCase__ , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 1] , )
def lowerCamelCase ( ) -> List[str]:
lowercase_ : str = create_accelerator(even_batches=UpperCAmelCase__ )
lowercase_ : Dict = torch.nn.Linear(1 , 1 )
lowercase_ : Optional[Any] = accelerator.prepare(UpperCAmelCase__ )
lowercase_ : List[Any] = create_dataloader(UpperCAmelCase__ , dataset_size=3 , batch_size=1 )
lowercase_ : Optional[Any] = []
with accelerator.join_uneven_inputs([ddp_model] ):
for batch_idx, batch in enumerate(UpperCAmelCase__ ):
lowercase_ : Any = ddp_model(batch[0].float() )
lowercase_ : List[str] = output.sum()
loss.backward()
batch_idxs.append(UpperCAmelCase__ )
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
assert batch_idxs == [0, 1]
elif accelerator.process_index == 1:
assert batch_idxs == [0]
def lowerCamelCase ( UpperCAmelCase__ : List[str] ) -> List[str]:
with warnings.catch_warnings(record=UpperCAmelCase__ ) as w:
with accelerator.join_uneven_inputs([Mock()] ):
pass
assert issubclass(w[-1].category , UpperCAmelCase__ )
assert "only supported for multi-GPU" in str(w[-1].message )
def lowerCamelCase ( ) -> Any:
lowercase_ : str = True
lowercase_ : Tuple = False
lowercase_ : str = create_accelerator(even_batches=UpperCAmelCase__ )
lowercase_ : Union[str, Any] = torch.nn.Linear(1 , 1 )
lowercase_ : Any = accelerator.prepare(UpperCAmelCase__ )
lowercase_ : Optional[int] = create_dataloader(UpperCAmelCase__ , dataset_size=3 , batch_size=1 )
lowercase_ : List[Any] = create_dataloader(UpperCAmelCase__ , dataset_size=3 , batch_size=1 )
with accelerator.join_uneven_inputs([ddp_model] , even_batches=UpperCAmelCase__ ):
lowercase_ : Union[str, Any] = train_dl.batch_sampler.even_batches
lowercase_ : List[str] = valid_dl.batch_sampler.even_batches
assert train_dl_overridden_value == overridden_even_batches
assert valid_dl_overridden_value == overridden_even_batches
assert train_dl.batch_sampler.even_batches == default_even_batches
assert valid_dl.batch_sampler.even_batches == default_even_batches
def lowerCamelCase ( ) -> Dict:
lowercase_ : str = True
lowercase_ : Optional[Any] = False
lowercase_ : Union[str, Any] = create_accelerator(even_batches=UpperCAmelCase__ )
lowercase_ : Optional[int] = torch.nn.Linear(1 , 1 )
lowercase_ : Optional[int] = accelerator.prepare(UpperCAmelCase__ )
create_dataloader(UpperCAmelCase__ , dataset_size=3 , batch_size=1 , iterable=UpperCAmelCase__ )
lowercase_ : List[str] = create_dataloader(UpperCAmelCase__ , dataset_size=3 , batch_size=1 )
with warnings.catch_warnings():
warnings.filterwarnings("""ignore""" )
try:
with accelerator.join_uneven_inputs([ddp_model] , even_batches=UpperCAmelCase__ ):
lowercase_ : Optional[Any] = batch_dl.batch_sampler.even_batches
except AttributeError:
# ensure attribute error is not raised when processing iterable dl
raise AssertionError
assert batch_dl_overridden_value == overridden_even_batches
assert batch_dl.batch_sampler.even_batches == default_even_batches
def lowerCamelCase ( ) -> List[Any]:
lowercase_ : Optional[Any] = create_accelerator()
lowercase_ : Optional[int] = torch.nn.Linear(1 , 1 )
lowercase_ : List[Any] = accelerator.prepare(UpperCAmelCase__ )
create_dataloader(UpperCAmelCase__ , dataset_size=3 , batch_size=1 , iterable=UpperCAmelCase__ )
with warnings.catch_warnings(record=UpperCAmelCase__ ) as w:
with accelerator.join_uneven_inputs([ddp_model] , even_batches=UpperCAmelCase__ ):
pass
assert issubclass(w[-1].category , UpperCAmelCase__ )
assert "only supported for map-style datasets" in str(w[-1].message )
def lowerCamelCase ( ) -> List[str]:
lowercase_ : List[Any] = create_accelerator()
accelerator.print("""Test that even_batches variable ensures uniform batches across processes""" )
test_default_ensures_even_batch_sizes()
accelerator.print("""Run tests with even_batches disabled""" )
test_can_disable_even_batches()
accelerator.print("""Test joining uneven inputs""" )
test_can_join_uneven_inputs()
accelerator.print("""Test overriding even_batches when joining uneven inputs""" )
test_join_can_override_even_batches()
accelerator.print("""Test overriding even_batches for mixed dataloader types""" )
test_join_can_override_for_mixed_type_dataloaders()
accelerator.print("""Test overriding even_batches raises a warning for iterable dataloaders""" )
test_join_raises_warning_for_iterable_when_overriding_even_batches()
accelerator.print("""Test join with non DDP distributed raises warning""" )
lowercase_ : List[Any] = accelerator.state.distributed_type
lowercase_ : Union[str, Any] = DistributedType.FSDP
test_join_raises_warning_for_non_ddp_distributed(UpperCAmelCase__ )
lowercase_ : str = original_state
if __name__ == "__main__":
main()
| 239
| 1
|
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class A__ ( unittest.TestCase ):
def __init__( self , UpperCamelCase__ , UpperCamelCase__=7 , UpperCamelCase__=3 , UpperCamelCase__=18 , UpperCamelCase__=30 , UpperCamelCase__=400 , UpperCamelCase__=True , UpperCamelCase__=None , UpperCamelCase__=True , UpperCamelCase__=None , ) -> str:
'''simple docstring'''
A_ = size if size is not None else {"""shortest_edge""": 20}
A_ = crop_size if crop_size is not None else {"""height""": 18, """width""": 18}
A_ = parent
A_ = batch_size
A_ = num_channels
A_ = image_size
A_ = min_resolution
A_ = max_resolution
A_ = do_resize
A_ = size
A_ = do_center_crop
A_ = crop_size
def snake_case_ ( self ) -> List[str]:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class A__ ( _snake_case , unittest.TestCase ):
lowercase = MobileNetVaImageProcessor if is_vision_available() else None
def snake_case_ ( self ) -> List[str]:
'''simple docstring'''
A_ = MobileNetVaImageProcessingTester(self )
@property
def snake_case_ ( self ) -> Optional[int]:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def snake_case_ ( self ) -> str:
'''simple docstring'''
A_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCamelCase__ , """do_resize""" ) )
self.assertTrue(hasattr(UpperCamelCase__ , """size""" ) )
self.assertTrue(hasattr(UpperCamelCase__ , """do_center_crop""" ) )
self.assertTrue(hasattr(UpperCamelCase__ , """crop_size""" ) )
def snake_case_ ( self ) -> Tuple:
'''simple docstring'''
A_ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 20} )
self.assertEqual(image_processor.crop_size , {"""height""": 18, """width""": 18} )
A_ = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"""shortest_edge""": 42} )
self.assertEqual(image_processor.crop_size , {"""height""": 84, """width""": 84} )
def snake_case_ ( self ) -> Tuple:
'''simple docstring'''
pass
def snake_case_ ( self ) -> Dict:
'''simple docstring'''
# Initialize image_processing
A_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__ , Image.Image )
# Test not batched input
A_ = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
A_ = image_processing(UpperCamelCase__ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def snake_case_ ( self ) -> Union[str, Any]:
'''simple docstring'''
# Initialize image_processing
A_ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
A_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase__ , numpify=UpperCamelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__ , np.ndarray )
# Test not batched input
A_ = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
A_ = image_processing(UpperCamelCase__ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def snake_case_ ( self ) -> Optional[Any]:
'''simple docstring'''
# Initialize image_processing
A_ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase__ , torchify=UpperCamelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__ , torch.Tensor )
# Test not batched input
A_ = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
A_ = image_processing(UpperCamelCase__ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
| 101
|
'''simple docstring'''
def UpperCAmelCase__ ( UpperCAmelCase__ = 1_00 ) -> int:
A_ = n * (n + 1) * (2 * n + 1) / 6
A_ = (n * (n + 1) / 2) ** 2
return int(square_of_sum - sum_of_squares )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 101
| 1
|
'''simple docstring'''
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class a__ ( __A , unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase : Any = MgpstrTokenizer
__UpperCamelCase : Optional[Any] = False
__UpperCamelCase : Any = {}
__UpperCamelCase : Union[str, Any] = False
def _snake_case (self ):
super().setUp()
# fmt: off
__lowerCAmelCase = ['''[GO]''', '''[s]''', '''0''', '''1''', '''2''', '''3''', '''4''', '''5''', '''6''', '''7''', '''8''', '''9''', '''a''', '''b''', '''c''', '''d''', '''e''', '''f''', '''g''', '''h''', '''i''', '''j''', '''k''', '''l''', '''m''', '''n''', '''o''', '''p''', '''q''', '''r''', '''s''', '''t''', '''u''', '''v''', '''w''', '''x''', '''y''', '''z''']
# fmt: on
__lowerCAmelCase = dict(zip(__lowercase , range(len(__lowercase ) ) ) )
__lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__lowercase ) + '''\n''' )
def _snake_case (self , **__lowercase ):
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **__lowercase )
def _snake_case (self , __lowercase ):
__lowerCAmelCase = '''tester'''
__lowerCAmelCase = '''tester'''
return input_text, output_text
@unittest.skip('''MGP-STR always lower cases letters.''' )
def _snake_case (self ):
pass
def _snake_case (self ):
__lowerCAmelCase = self.get_tokenizers(do_lower_case=__lowercase )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
__lowerCAmelCase = '''[SPECIAL_TOKEN]'''
tokenizer.add_special_tokens({'''cls_token''': special_token} )
__lowerCAmelCase = tokenizer.encode([special_token] , add_special_tokens=__lowercase )
self.assertEqual(len(__lowercase ) , 1 )
__lowerCAmelCase = tokenizer.decode(__lowercase , skip_special_tokens=__lowercase )
self.assertTrue(special_token not in decoded )
def _snake_case (self ):
__lowerCAmelCase = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
__lowerCAmelCase , __lowerCAmelCase = self.get_input_output_texts(__lowercase )
__lowerCAmelCase = tokenizer.tokenize(__lowercase )
__lowerCAmelCase = tokenizer.convert_tokens_to_ids(__lowercase )
__lowerCAmelCase = tokenizer.encode(__lowercase , add_special_tokens=__lowercase )
self.assertListEqual(__lowercase , __lowercase )
__lowerCAmelCase = tokenizer.convert_ids_to_tokens(__lowercase )
self.assertNotEqual(len(__lowercase ) , 0 )
__lowerCAmelCase = tokenizer.decode(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
self.assertEqual(text_a.replace(''' ''' , '''''' ) , __lowercase )
@unittest.skip('''MGP-STR tokenizer only handles one sequence.''' )
def _snake_case (self ):
pass
@unittest.skip('''inputs cannot be pretokenized in MgpstrTokenizer''' )
def _snake_case (self ):
pass
| 174
|
'''simple docstring'''
import os
import posixpath
import uuid
from dataclasses import dataclass
from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union
import numpy as np
import pyarrow as pa
import datasets
from datasets.arrow_writer import ArrowWriter, ParquetWriter
from datasets.config import MAX_SHARD_SIZE
from datasets.filesystems import (
is_remote_filesystem,
rename,
)
from datasets.iterable_dataset import _BaseExamplesIterable
from datasets.utils.py_utils import convert_file_size_to_int
_UpperCAmelCase : int = datasets.utils.logging.get_logger(__name__)
if TYPE_CHECKING:
import pyspark
@dataclass
class a__ ( datasets.BuilderConfig ):
"""simple docstring"""
__UpperCamelCase : Optional[datasets.Features] = None
def __magic_name__( lowerCamelCase, lowerCamelCase, ):
import pyspark
def generate_fn():
__lowerCAmelCase = df.select('''*''', pyspark.sql.functions.spark_partition_id().alias('''part_id'''))
for partition_id in partition_order:
__lowerCAmelCase = df_with_partition_id.select('''*''').where(F"""part_id = {partition_id}""").drop('''part_id''')
__lowerCAmelCase = partition_df.collect()
__lowerCAmelCase = 0
for row in rows:
yield F"""{partition_id}_{row_id}""", row.asDict()
row_id += 1
return generate_fn
class a__ ( _BaseExamplesIterable ):
"""simple docstring"""
def __init__(self , __lowercase , __lowercase=None , ):
__lowerCAmelCase = df
__lowerCAmelCase = partition_order or range(self.df.rdd.getNumPartitions() )
__lowerCAmelCase = _generate_iterable_examples(self.df , self.partition_order )
def __iter__(self ):
yield from self.generate_examples_fn()
def _snake_case (self , __lowercase ):
__lowerCAmelCase = list(range(self.df.rdd.getNumPartitions() ) )
generator.shuffle(__lowercase )
return SparkExamplesIterable(self.df , partition_order=__lowercase )
def _snake_case (self , __lowercase , __lowercase ):
__lowerCAmelCase = self.split_shard_indices_by_worker(__lowercase , __lowercase )
return SparkExamplesIterable(self.df , partition_order=__lowercase )
@property
def _snake_case (self ):
return len(self.partition_order )
class a__ ( datasets.DatasetBuilder ):
"""simple docstring"""
__UpperCamelCase : int = SparkConfig
def __init__(self , __lowercase , __lowercase = None , __lowercase = None , **__lowercase , ):
import pyspark
__lowerCAmelCase = pyspark.sql.SparkSession.builder.getOrCreate()
__lowerCAmelCase = df
__lowerCAmelCase = working_dir
super().__init__(
cache_dir=__lowercase , config_name=str(self.df.semanticHash() ) , **__lowercase , )
def _snake_case (self ):
# Returns the path of the created file.
def create_cache_and_write_probe(__lowercase ):
# makedirs with exist_ok will recursively create the directory. It will not throw an error if directories
# already exist.
os.makedirs(self._cache_dir , exist_ok=__lowercase )
__lowerCAmelCase = os.path.join(self._cache_dir , '''fs_test''' + uuid.uuida().hex )
# Opening the file in append mode will create a new file unless it already exists, in which case it will not
# change the file contents.
open(__lowercase , '''a''' )
return [probe_file]
if self._spark.conf.get('''spark.master''' , '''''' ).startswith('''local''' ):
return
# If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS
# accessible to the driver.
# TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error.
if self._cache_dir:
__lowerCAmelCase = (
self._spark.sparkContext.parallelize(range(1 ) , 1 ).mapPartitions(__lowercase ).collect()
)
if os.path.isfile(probe[0] ):
return
raise ValueError(
'''When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir''' )
def _snake_case (self ):
return datasets.DatasetInfo(features=self.config.features )
def _snake_case (self , __lowercase ):
return [datasets.SplitGenerator(name=datasets.Split.TRAIN )]
def _snake_case (self , __lowercase ):
import pyspark
def get_arrow_batch_size(__lowercase ):
for batch in it:
yield pa.RecordBatch.from_pydict({'''batch_bytes''': [batch.nbytes]} )
__lowerCAmelCase = self.df.count()
__lowerCAmelCase = df_num_rows if df_num_rows <= 1_00 else 1_00
# Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample.
__lowerCAmelCase = (
self.df.limit(__lowercase )
.repartition(1 )
.mapInArrow(__lowercase , '''batch_bytes: long''' )
.agg(pyspark.sql.functions.sum('''batch_bytes''' ).alias('''sample_bytes''' ) )
.collect()[0]
.sample_bytes
/ sample_num_rows
)
__lowerCAmelCase = approx_bytes_per_row * df_num_rows
if approx_total_size > max_shard_size:
# Make sure there is at least one row per partition.
__lowerCAmelCase = min(__lowercase , int(approx_total_size / max_shard_size ) )
__lowerCAmelCase = self.df.repartition(__lowercase )
def _snake_case (self , __lowercase , __lowercase , __lowercase , ):
import pyspark
__lowerCAmelCase = ParquetWriter if file_format == '''parquet''' else ArrowWriter
__lowerCAmelCase = os.path.join(self._working_dir , os.path.basename(__lowercase ) ) if self._working_dir else fpath
__lowerCAmelCase = file_format == '''parquet'''
# Define these so that we don't reference self in write_arrow, which will result in a pickling error due to
# pickling the SparkContext.
__lowerCAmelCase = self.config.features
__lowerCAmelCase = self._writer_batch_size
__lowerCAmelCase = self._fs.storage_options
def write_arrow(__lowercase ):
# Within the same SparkContext, no two task attempts will share the same attempt ID.
__lowerCAmelCase = pyspark.TaskContext().taskAttemptId()
__lowerCAmelCase = next(__lowercase , __lowercase )
if first_batch is None:
# Some partitions might not receive any data.
return pa.RecordBatch.from_arrays(
[[task_id], [0], [0]] , names=['''task_id''', '''num_examples''', '''num_bytes'''] , )
__lowerCAmelCase = 0
__lowerCAmelCase = writer_class(
features=__lowercase , path=working_fpath.replace('''SSSSS''' , F"""{shard_id:05d}""" ).replace('''TTTTT''' , F"""{task_id:05d}""" ) , writer_batch_size=__lowercase , storage_options=__lowercase , embed_local_files=__lowercase , )
__lowerCAmelCase = pa.Table.from_batches([first_batch] )
writer.write_table(__lowercase )
for batch in it:
if max_shard_size is not None and writer._num_bytes >= max_shard_size:
__lowerCAmelCase , __lowerCAmelCase = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=['''task_id''', '''num_examples''', '''num_bytes'''] , )
shard_id += 1
__lowerCAmelCase = writer_class(
features=writer._features , path=working_fpath.replace('''SSSSS''' , F"""{shard_id:05d}""" ).replace('''TTTTT''' , F"""{task_id:05d}""" ) , writer_batch_size=__lowercase , storage_options=__lowercase , embed_local_files=__lowercase , )
__lowerCAmelCase = pa.Table.from_batches([batch] )
writer.write_table(__lowercase )
if writer._num_bytes > 0:
__lowerCAmelCase , __lowerCAmelCase = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=['''task_id''', '''num_examples''', '''num_bytes'''] , )
if working_fpath != fpath:
for file in os.listdir(os.path.dirname(__lowercase ) ):
__lowerCAmelCase = os.path.join(os.path.dirname(__lowercase ) , os.path.basename(__lowercase ) )
shutil.move(__lowercase , __lowercase )
__lowerCAmelCase = (
self.df.mapInArrow(__lowercase , '''task_id: long, num_examples: long, num_bytes: long''' )
.groupBy('''task_id''' )
.agg(
pyspark.sql.functions.sum('''num_examples''' ).alias('''total_num_examples''' ) , pyspark.sql.functions.sum('''num_bytes''' ).alias('''total_num_bytes''' ) , pyspark.sql.functions.count('''num_bytes''' ).alias('''num_shards''' ) , pyspark.sql.functions.collect_list('''num_examples''' ).alias('''shard_lengths''' ) , )
.collect()
)
for row in stats:
yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths)
def _snake_case (self , __lowercase , __lowercase = "arrow" , __lowercase = None , __lowercase = None , **__lowercase , ):
self._validate_cache_dir()
__lowerCAmelCase = convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE )
self._repartition_df_if_needed(__lowercase )
__lowerCAmelCase = not is_remote_filesystem(self._fs )
__lowerCAmelCase = os.path.join if is_local else posixpath.join
__lowerCAmelCase = '''-TTTTT-SSSSS-of-NNNNN'''
__lowerCAmelCase = F"""{self.name}-{split_generator.name}{SUFFIX}.{file_format}"""
__lowerCAmelCase = path_join(self._output_dir , __lowercase )
__lowerCAmelCase = 0
__lowerCAmelCase = 0
__lowerCAmelCase = 0
__lowerCAmelCase = []
__lowerCAmelCase = []
for task_id, content in self._prepare_split_single(__lowercase , __lowercase , __lowercase ):
(
(
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) ,
) = content
if num_bytes > 0:
total_num_examples += num_examples
total_num_bytes += num_bytes
total_shards += num_shards
task_id_and_num_shards.append((task_id, num_shards) )
all_shard_lengths.extend(__lowercase )
__lowerCAmelCase = total_num_examples
__lowerCAmelCase = total_num_bytes
# should rename everything at the end
logger.debug(F"""Renaming {total_shards} shards.""" )
if total_shards > 1:
__lowerCAmelCase = all_shard_lengths
# Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a
# pickling error due to pickling the SparkContext.
__lowerCAmelCase = self._fs
# use the -SSSSS-of-NNNNN pattern
def _rename_shard(
__lowercase , __lowercase , __lowercase , ):
rename(
__lowercase , fpath.replace('''SSSSS''' , F"""{shard_id:05d}""" ).replace('''TTTTT''' , F"""{task_id:05d}""" ) , fpath.replace('''TTTTT-SSSSS''' , F"""{global_shard_id:05d}""" ).replace('''NNNNN''' , F"""{total_shards:05d}""" ) , )
__lowerCAmelCase = []
__lowerCAmelCase = 0
for i in range(len(__lowercase ) ):
__lowerCAmelCase , __lowerCAmelCase = task_id_and_num_shards[i]
for shard_id in range(__lowercase ):
args.append([task_id, shard_id, global_shard_id] )
global_shard_id += 1
self._spark.sparkContext.parallelize(__lowercase , len(__lowercase ) ).map(lambda __lowercase : _rename_shard(*__lowercase ) ).collect()
else:
# don't use any pattern
__lowerCAmelCase = 0
__lowerCAmelCase = task_id_and_num_shards[0][0]
self._rename(
fpath.replace('''SSSSS''' , F"""{shard_id:05d}""" ).replace('''TTTTT''' , F"""{task_id:05d}""" ) , fpath.replace(__lowercase , '''''' ) , )
def _snake_case (self , __lowercase , ):
return SparkExamplesIterable(self.df )
| 174
| 1
|
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
lowerCAmelCase__ = logging.get_logger(__name__)
class _lowerCamelCase ( _lowercase ):
UpperCAmelCase_ = ["pixel_values"]
def __init__(self , __a = True , __a = None , __a = 0.9 , __a = PILImageResampling.BICUBIC , __a = True , __a = None , __a = 1 / 2_55 , __a = True , __a = True , __a = None , __a = None , **__a , ) -> None:
super().__init__(**__a )
UpperCamelCase = size if size is not None else {"shortest_edge": 2_24}
UpperCamelCase = get_size_dict(__a , default_to_square=__a )
UpperCamelCase = crop_size if crop_size is not None else {"height": 2_24, "width": 2_24}
UpperCamelCase = get_size_dict(__a , param_name="crop_size" )
UpperCamelCase = do_resize
UpperCamelCase = size
UpperCamelCase = crop_pct
UpperCamelCase = resample
UpperCamelCase = do_center_crop
UpperCamelCase = crop_size
UpperCamelCase = do_rescale
UpperCamelCase = rescale_factor
UpperCamelCase = do_normalize
UpperCamelCase = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
UpperCamelCase = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def snake_case_ (self , __a , __a , __a = None , __a = PILImageResampling.BICUBIC , __a = None , **__a , ) -> np.ndarray:
UpperCamelCase = get_size_dict(__a , default_to_square=__a )
if "shortest_edge" not in size and ("height" not in size or "width" not in size):
raise ValueError(F"size must contain 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}" )
if crop_pct is not None:
if "shortest_edge" in size:
UpperCamelCase = int(size["shortest_edge"] / crop_pct )
elif "height" in size and "width" in size:
if size["height"] == size["width"]:
UpperCamelCase = int(size["height"] / crop_pct )
else:
UpperCamelCase = (int(size["height"] / crop_pct ), int(size["width"] / crop_pct ))
else:
raise ValueError("Invalid size for resize: {}".format(__a ) )
UpperCamelCase = get_resize_output_image_size(__a , size=__a , default_to_square=__a )
else:
if "shortest_edge" in size:
UpperCamelCase = get_resize_output_image_size(__a , size=size["shortest_edge"] , default_to_square=__a )
elif "height" in size and "width" in size:
UpperCamelCase = (size["height"], size["width"])
else:
raise ValueError("Invalid size for resize: {}".format(__a ) )
return resize(__a , size=__a , resample=__a , data_format=__a , **__a )
def snake_case_ (self , __a , __a , __a = None , **__a , ) -> np.ndarray:
UpperCamelCase = get_size_dict(__a )
if "height" not in size or "width" not in size:
raise ValueError(F"size must contain 'height' and 'width' as keys. Got {size.keys()}" )
return center_crop(__a , size=(size["height"], size["width"]) , data_format=__a , **__a )
def snake_case_ (self , __a , __a , __a = None , **__a , ) -> List[Any]:
return rescale(__a , scale=__a , data_format=__a , **__a )
def snake_case_ (self , __a , __a , __a , __a = None , **__a , ) -> np.ndarray:
return normalize(__a , mean=__a , std=__a , data_format=__a , **__a )
def snake_case_ (self , __a , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = ChannelDimension.FIRST , **__a , ) -> PIL.Image.Image:
UpperCamelCase = do_resize if do_resize is not None else self.do_resize
UpperCamelCase = crop_pct if crop_pct is not None else self.crop_pct
UpperCamelCase = resample if resample is not None else self.resample
UpperCamelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCamelCase = do_rescale if do_rescale is not None else self.do_rescale
UpperCamelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCamelCase = do_normalize if do_normalize is not None else self.do_normalize
UpperCamelCase = image_mean if image_mean is not None else self.image_mean
UpperCamelCase = image_std if image_std is not None else self.image_std
UpperCamelCase = size if size is not None else self.size
UpperCamelCase = get_size_dict(__a , default_to_square=__a )
UpperCamelCase = crop_size if crop_size is not None else self.crop_size
UpperCamelCase = get_size_dict(__a , param_name="crop_size" )
UpperCamelCase = make_list_of_images(__a )
if not valid_images(__a ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_center_crop and crop_pct is None:
raise ValueError("Crop_pct must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
UpperCamelCase = [to_numpy_array(__a ) for image in images]
if do_resize:
UpperCamelCase = [self.resize(image=__a , size=__a , crop_pct=__a , resample=__a ) for image in images]
if do_center_crop:
UpperCamelCase = [self.center_crop(image=__a , size=__a ) for image in images]
if do_rescale:
UpperCamelCase = [self.rescale(image=__a , scale=__a ) for image in images]
if do_normalize:
UpperCamelCase = [self.normalize(image=__a , mean=__a , std=__a ) for image in images]
UpperCamelCase = [to_channel_dimension_format(__a , __a ) for image in images]
UpperCamelCase = {"pixel_values": images}
return BatchFeature(data=__a , tensor_type=__a )
| 350
|
"""simple docstring"""
from __future__ import annotations
from sys import maxsize
from typing import Generic, TypeVar
lowerCAmelCase__ = TypeVar('''T''')
def a__ ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return (position - 1) // 2
def a__ ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return (2 * position) + 1
def a__ ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return (2 * position) + 2
class _lowerCamelCase ( Generic[T] ):
def __init__(self ) -> None:
UpperCamelCase = []
UpperCamelCase = {}
UpperCamelCase = 0
def __len__(self ) -> int:
return self.elements
def __repr__(self ) -> str:
return str(self.heap )
def snake_case_ (self ) -> bool:
# Check if the priority queue is empty
return self.elements == 0
def snake_case_ (self , __a , __a ) -> None:
# Add an element with given priority to the queue
self.heap.append((elem, weight) )
UpperCamelCase = self.elements
self.elements += 1
self._bubble_up(__a )
def snake_case_ (self ) -> T:
# Remove and return the element with lowest weight (highest priority)
if self.elements > 1:
self._swap_nodes(0 , self.elements - 1 )
UpperCamelCase , UpperCamelCase = self.heap.pop()
del self.position_map[elem]
self.elements -= 1
if self.elements > 0:
UpperCamelCase , UpperCamelCase = self.heap[0]
self._bubble_down(__a )
return elem
def snake_case_ (self , __a , __a ) -> None:
# Update the weight of the given key
UpperCamelCase = self.position_map[elem]
UpperCamelCase = (elem, weight)
if position > 0:
UpperCamelCase = get_parent_position(__a )
UpperCamelCase , UpperCamelCase = self.heap[parent_position]
if parent_weight > weight:
self._bubble_up(__a )
else:
self._bubble_down(__a )
else:
self._bubble_down(__a )
def snake_case_ (self , __a ) -> None:
# Place a node at the proper position (upward movement) [to be used internally
# only]
UpperCamelCase = self.position_map[elem]
if curr_pos == 0:
return None
UpperCamelCase = get_parent_position(__a )
UpperCamelCase , UpperCamelCase = self.heap[curr_pos]
UpperCamelCase , UpperCamelCase = self.heap[parent_position]
if parent_weight > weight:
self._swap_nodes(__a , __a )
return self._bubble_up(__a )
return None
def snake_case_ (self , __a ) -> None:
# Place a node at the proper position (downward movement) [to be used
# internally only]
UpperCamelCase = self.position_map[elem]
UpperCamelCase , UpperCamelCase = self.heap[curr_pos]
UpperCamelCase = get_child_left_position(__a )
UpperCamelCase = get_child_right_position(__a )
if child_left_position < self.elements and child_right_position < self.elements:
UpperCamelCase , UpperCamelCase = self.heap[child_left_position]
UpperCamelCase , UpperCamelCase = self.heap[child_right_position]
if child_right_weight < child_left_weight and child_right_weight < weight:
self._swap_nodes(__a , __a )
return self._bubble_down(__a )
if child_left_position < self.elements:
UpperCamelCase , UpperCamelCase = self.heap[child_left_position]
if child_left_weight < weight:
self._swap_nodes(__a , __a )
return self._bubble_down(__a )
else:
return None
if child_right_position < self.elements:
UpperCamelCase , UpperCamelCase = self.heap[child_right_position]
if child_right_weight < weight:
self._swap_nodes(__a , __a )
return self._bubble_down(__a )
return None
def snake_case_ (self , __a , __a ) -> None:
# Swap the nodes at the given positions
UpperCamelCase = self.heap[nodea_pos][0]
UpperCamelCase = self.heap[nodea_pos][0]
UpperCamelCase , UpperCamelCase = (
self.heap[nodea_pos],
self.heap[nodea_pos],
)
UpperCamelCase = nodea_pos
UpperCamelCase = nodea_pos
class _lowerCamelCase ( Generic[T] ):
def __init__(self ) -> None:
UpperCamelCase = {}
UpperCamelCase = 0
def __repr__(self ) -> str:
return str(self.connections )
def __len__(self ) -> int:
return self.nodes
def snake_case_ (self , __a ) -> None:
# Add a node in the graph if it is not in the graph
if node not in self.connections:
UpperCamelCase = {}
self.nodes += 1
def snake_case_ (self , __a , __a , __a ) -> None:
# Add an edge between 2 nodes in the graph
self.add_node(__a )
self.add_node(__a )
UpperCamelCase = weight
UpperCamelCase = weight
def a__ ( _SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
UpperCamelCase = {node: maxsize for node in graph.connections}
UpperCamelCase = {node: None for node in graph.connections}
UpperCamelCase = MinPriorityQueue()
for node, weight in dist.items():
priority_queue.push(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if priority_queue.is_empty():
return dist, parent
# initialization
UpperCamelCase = priority_queue.extract_min()
UpperCamelCase = 0
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
UpperCamelCase = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(_SCREAMING_SNAKE_CASE , dist[neighbour] )
UpperCamelCase = node
# running prim's algorithm
while not priority_queue.is_empty():
UpperCamelCase = priority_queue.extract_min()
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
UpperCamelCase = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(_SCREAMING_SNAKE_CASE , dist[neighbour] )
UpperCamelCase = node
return dist, parent
| 244
| 0
|
from sklearn.metrics import fa_score
import datasets
SCREAMING_SNAKE_CASE__ : Optional[int] = '\nThe F1 score is the harmonic mean of the precision and recall. It can be computed with the equation:\nF1 = 2 * (precision * recall) / (precision + recall)\n'
SCREAMING_SNAKE_CASE__ : Dict = '\nArgs:\n predictions (`list` of `int`): Predicted labels.\n references (`list` of `int`): Ground truth labels.\n labels (`list` of `int`): The set of labels to include when `average` is not set to `\'binary\'`, and the order of the labels if `average` is `None`. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None.\n pos_label (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1.\n average (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `\'binary\'`.\n\n - \'binary\': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary.\n - \'micro\': Calculate metrics globally by counting the total true positives, false negatives and false positives.\n - \'macro\': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.\n - \'weighted\': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `\'macro\'` to account for label imbalance. This option can result in an F-score that is not between precision and recall.\n - \'samples\': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).\n sample_weight (`list` of `float`): Sample weights Defaults to None.\n\nReturns:\n f1 (`float` or `array` of `float`): F1 score or list of f1 scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher f1 scores are better.\n\nExamples:\n\n Example 1-A simple binary example\n >>> f1_metric = datasets.load_metric("f1")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0])\n >>> print(results)\n {\'f1\': 0.5}\n\n Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`.\n >>> f1_metric = datasets.load_metric("f1")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0)\n >>> print(round(results[\'f1\'], 2))\n 0.67\n\n Example 3-The same simple binary example as in Example 1, but with `sample_weight` included.\n >>> f1_metric = datasets.load_metric("f1")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3])\n >>> print(round(results[\'f1\'], 2))\n 0.35\n\n Example 4-A multiclass example, with different values for the `average` input.\n >>> predictions = [0, 2, 1, 0, 0, 1]\n >>> references = [0, 1, 2, 0, 1, 2]\n >>> results = f1_metric.compute(predictions=predictions, references=references, average="macro")\n >>> print(round(results[\'f1\'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average="micro")\n >>> print(round(results[\'f1\'], 2))\n 0.33\n >>> results = f1_metric.compute(predictions=predictions, references=references, average="weighted")\n >>> print(round(results[\'f1\'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=None)\n >>> print(results)\n {\'f1\': array([0.8, 0. , 0. ])}\n'
SCREAMING_SNAKE_CASE__ : Optional[int] = '\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCamelCase__ (datasets.Metric ):
'''simple docstring'''
def _lowercase ( self ) -> str:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("int32" ) ),
"references": datasets.Sequence(datasets.Value("int32" ) ),
}
if self.config_name == "multilabel"
else {
"predictions": datasets.Value("int32" ),
"references": datasets.Value("int32" ),
} ) , reference_urls=["https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html"] , )
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=None , UpperCamelCase__=1 , UpperCamelCase__="binary" , UpperCamelCase__=None ) -> Union[str, Any]:
lowerCamelCase : Dict = fa_score(
UpperCamelCase__ , UpperCamelCase__ , labels=UpperCamelCase__ , pos_label=UpperCamelCase__ , average=UpperCamelCase__ , sample_weight=UpperCamelCase__ )
return {"f1": float(UpperCamelCase__ ) if score.size == 1 else score}
| 48
|
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> list:
lowerCamelCase : Dict = len(_SCREAMING_SNAKE_CASE )
lowerCamelCase : Union[str, Any] = []
for i in range(len(_SCREAMING_SNAKE_CASE ) - pat_len + 1 ):
lowerCamelCase : Dict = True
for j in range(_SCREAMING_SNAKE_CASE ):
if s[i + j] != pattern[j]:
lowerCamelCase : Optional[int] = False
break
if match_found:
position.append(_SCREAMING_SNAKE_CASE )
return position
if __name__ == "__main__":
assert naive_pattern_search('ABCDEFG', 'DE') == [3]
print(naive_pattern_search('ABAAABCDBBABCDDEBCABC', 'ABC'))
| 48
| 1
|
import os
def SCREAMING_SNAKE_CASE ( snake_case_ : str = "input.txt" ):
with open(os.path.join(os.path.dirname(snake_case_ ) , snake_case_ ) ) as input_file:
snake_case__ : Dict = [
[int(snake_case_ ) for element in line.split("," )]
for line in input_file.readlines()
]
snake_case__ : Optional[int] = len(snake_case_ )
snake_case__ : Any = len(matrix[0] )
snake_case__ : Optional[int] = [[-1 for _ in range(snake_case_ )] for _ in range(snake_case_ )]
for i in range(snake_case_ ):
snake_case__ : str = matrix[i][0]
for j in range(1 , snake_case_ ):
for i in range(snake_case_ ):
snake_case__ : int = minimal_path_sums[i][j - 1] + matrix[i][j]
for i in range(1 , snake_case_ ):
snake_case__ : List[str] = min(
minimal_path_sums[i][j] , minimal_path_sums[i - 1][j] + matrix[i][j] )
for i in range(rows - 2 , -1 , -1 ):
snake_case__ : Tuple = min(
minimal_path_sums[i][j] , minimal_path_sums[i + 1][j] + matrix[i][j] )
return min(minimal_path_sums_row[-1] for minimal_path_sums_row in minimal_path_sums )
if __name__ == "__main__":
print(f"{solution() = }")
| 286
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowerCamelCase : List[str] = logging.get_logger(__name__)
__lowerCamelCase : List[Any] = {
"""facebook/xmod-base""": """https://huggingface.co/facebook/xmod-base/resolve/main/config.json""",
"""facebook/xmod-large-prenorm""": """https://huggingface.co/facebook/xmod-large-prenorm/resolve/main/config.json""",
"""facebook/xmod-base-13-125k""": """https://huggingface.co/facebook/xmod-base-13-125k/resolve/main/config.json""",
"""facebook/xmod-base-30-125k""": """https://huggingface.co/facebook/xmod-base-30-125k/resolve/main/config.json""",
"""facebook/xmod-base-30-195k""": """https://huggingface.co/facebook/xmod-base-30-195k/resolve/main/config.json""",
"""facebook/xmod-base-60-125k""": """https://huggingface.co/facebook/xmod-base-60-125k/resolve/main/config.json""",
"""facebook/xmod-base-60-265k""": """https://huggingface.co/facebook/xmod-base-60-265k/resolve/main/config.json""",
"""facebook/xmod-base-75-125k""": """https://huggingface.co/facebook/xmod-base-75-125k/resolve/main/config.json""",
"""facebook/xmod-base-75-269k""": """https://huggingface.co/facebook/xmod-base-75-269k/resolve/main/config.json""",
}
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ):
"""simple docstring"""
a_ = "xmod"
def __init__( self : List[str] , __A : List[str]=3_0_5_2_2 , __A : Tuple=7_6_8 , __A : str=1_2 , __A : List[Any]=1_2 , __A : List[str]=3_0_7_2 , __A : List[str]="gelu" , __A : List[Any]=0.1 , __A : Tuple=0.1 , __A : str=5_1_2 , __A : Union[str, Any]=2 , __A : List[Any]=0.0_2 , __A : List[str]=1e-1_2 , __A : Tuple=1 , __A : List[Any]=0 , __A : Optional[Any]=2 , __A : Optional[int]="absolute" , __A : Optional[int]=True , __A : Dict=None , __A : Optional[int]=False , __A : Dict=2 , __A : List[str]=False , __A : Dict=True , __A : Union[str, Any]=True , __A : Tuple=("en_XX",) , __A : Optional[Any]=None , **__A : Tuple , ):
super().__init__(pad_token_id=__A , bos_token_id=__A , eos_token_id=__A , **__A )
snake_case__ : Tuple = vocab_size
snake_case__ : Union[str, Any] = hidden_size
snake_case__ : int = num_hidden_layers
snake_case__ : str = num_attention_heads
snake_case__ : Tuple = hidden_act
snake_case__ : Optional[Any] = intermediate_size
snake_case__ : List[str] = hidden_dropout_prob
snake_case__ : Optional[int] = attention_probs_dropout_prob
snake_case__ : Union[str, Any] = max_position_embeddings
snake_case__ : str = type_vocab_size
snake_case__ : List[str] = initializer_range
snake_case__ : Dict = layer_norm_eps
snake_case__ : str = position_embedding_type
snake_case__ : List[str] = use_cache
snake_case__ : Tuple = classifier_dropout
snake_case__ : Any = pre_norm
snake_case__ : List[str] = adapter_reduction_factor
snake_case__ : List[Any] = adapter_layer_norm
snake_case__ : str = adapter_reuse_layer_norm
snake_case__ : Union[str, Any] = ln_before_adapter
snake_case__ : Tuple = list(__A )
snake_case__ : int = default_language
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ):
"""simple docstring"""
@property
def _lowercase ( self : int ):
if self.task == "multiple-choice":
snake_case__ : Any = {0: "batch", 1: "choice", 2: "sequence"}
else:
snake_case__ : Any = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 286
| 1
|
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPanoramaPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
@skip_mps
class _UpperCAmelCase ( snake_case_ , snake_case_ , unittest.TestCase ):
"""simple docstring"""
snake_case = StableDiffusionPanoramaPipeline
snake_case = TEXT_TO_IMAGE_PARAMS
snake_case = TEXT_TO_IMAGE_BATCH_PARAMS
snake_case = TEXT_TO_IMAGE_IMAGE_PARAMS
snake_case = TEXT_TO_IMAGE_IMAGE_PARAMS
def lowerCAmelCase ( self : str ):
'''simple docstring'''
torch.manual_seed(0 )
_A = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=1 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
_A = DDIMScheduler()
torch.manual_seed(0 )
_A = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
torch.manual_seed(0 )
_A = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
_A = CLIPTextModel(__UpperCAmelCase )
_A = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
_A = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def lowerCAmelCase ( self : List[Any] , __UpperCAmelCase : Any , __UpperCAmelCase : List[Any]=0 ):
'''simple docstring'''
_A = torch.manual_seed(__UpperCAmelCase )
_A = {
"prompt": "a photo of the dolomites",
"generator": generator,
# Setting height and width to None to prevent OOMs on CPU.
"height": None,
"width": None,
"num_inference_steps": 1,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
_A = "cpu" # ensure determinism for the device-dependent torch.Generator
_A = self.get_dummy_components()
_A = StableDiffusionPanoramaPipeline(**__UpperCAmelCase )
_A = sd_pipe.to(__UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
_A = self.get_dummy_inputs(__UpperCAmelCase )
_A = sd_pipe(**__UpperCAmelCase ).images
_A = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_A = np.array([0.6186, 0.5374, 0.4915, 0.4135, 0.4114, 0.4563, 0.5128, 0.4977, 0.4757] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
super().test_inference_batch_consistent(batch_sizes=[1, 2] )
def lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
super().test_inference_batch_single_identical(batch_size=2 , expected_max_diff=3.25E-3 )
def lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
_A = "cpu" # ensure determinism for the device-dependent torch.Generator
_A = self.get_dummy_components()
_A = StableDiffusionPanoramaPipeline(**__UpperCAmelCase )
_A = sd_pipe.to(__UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
_A = self.get_dummy_inputs(__UpperCAmelCase )
_A = "french fries"
_A = sd_pipe(**__UpperCAmelCase , negative_prompt=__UpperCAmelCase )
_A = output.images
_A = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_A = np.array([0.6187, 0.5375, 0.4915, 0.4136, 0.4114, 0.4563, 0.5128, 0.4976, 0.4757] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
_A = "cpu" # ensure determinism for the device-dependent torch.Generator
_A = self.get_dummy_components()
_A = StableDiffusionPanoramaPipeline(**__UpperCAmelCase )
_A = sd_pipe.to(__UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
_A = self.get_dummy_inputs(__UpperCAmelCase )
_A = sd_pipe(**__UpperCAmelCase , view_batch_size=2 )
_A = output.images
_A = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_A = np.array([0.6187, 0.5375, 0.4915, 0.4136, 0.4114, 0.4563, 0.5128, 0.4976, 0.4757] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
_A = "cpu" # ensure determinism for the device-dependent torch.Generator
_A = self.get_dummy_components()
_A = EulerAncestralDiscreteScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule="scaled_linear" )
_A = StableDiffusionPanoramaPipeline(**__UpperCAmelCase )
_A = sd_pipe.to(__UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
_A = self.get_dummy_inputs(__UpperCAmelCase )
_A = sd_pipe(**__UpperCAmelCase ).images
_A = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_A = np.array([0.4024, 0.6510, 0.4901, 0.5378, 0.5813, 0.5622, 0.4795, 0.4467, 0.4952] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCAmelCase ( self : Dict ):
'''simple docstring'''
_A = "cpu" # ensure determinism for the device-dependent torch.Generator
_A = self.get_dummy_components()
_A = PNDMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule="scaled_linear" , skip_prk_steps=__UpperCAmelCase )
_A = StableDiffusionPanoramaPipeline(**__UpperCAmelCase )
_A = sd_pipe.to(__UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
_A = self.get_dummy_inputs(__UpperCAmelCase )
_A = sd_pipe(**__UpperCAmelCase ).images
_A = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_A = np.array([0.6391, 0.6291, 0.4861, 0.5134, 0.5552, 0.4578, 0.5032, 0.5023, 0.4539] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase ( self : int , __UpperCAmelCase : Union[str, Any]=0 ):
'''simple docstring'''
_A = torch.manual_seed(__UpperCAmelCase )
_A = {
"prompt": "a photo of the dolomites",
"generator": generator,
"num_inference_steps": 3,
"guidance_scale": 7.5,
"output_type": "numpy",
}
return inputs
def lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
_A = "stabilityai/stable-diffusion-2-base"
_A = DDIMScheduler.from_pretrained(__UpperCAmelCase , subfolder="scheduler" )
_A = StableDiffusionPanoramaPipeline.from_pretrained(__UpperCAmelCase , scheduler=__UpperCAmelCase , safety_checker=__UpperCAmelCase )
pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
pipe.enable_attention_slicing()
_A = self.get_inputs()
_A = pipe(**__UpperCAmelCase ).images
_A = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 2048, 3)
_A = np.array(
[
0.36968392,
0.27025372,
0.32446766,
0.28379387,
0.36363274,
0.30733347,
0.27100027,
0.27054125,
0.25536096,
] )
assert np.abs(expected_slice - image_slice ).max() < 1E-2
def lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
_A = StableDiffusionPanoramaPipeline.from_pretrained(
"stabilityai/stable-diffusion-2-base" , safety_checker=__UpperCAmelCase )
_A = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
pipe.enable_attention_slicing()
_A = self.get_inputs()
_A = pipe(**__UpperCAmelCase ).images
_A = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 2048, 3)
_A = np.array(
[
[
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
]
] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
_A = 0
def callback_fn(__UpperCAmelCase : int , __UpperCAmelCase : int , __UpperCAmelCase : torch.FloatTensor ) -> None:
_A = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
_A = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 256)
_A = latents[0, -3:, -3:, -1]
_A = np.array(
[
0.18681869,
0.33907816,
0.5361276,
0.14432865,
-0.02856611,
-0.73941123,
0.23397987,
0.47322682,
-0.37823164,
] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
elif step == 2:
_A = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 256)
_A = latents[0, -3:, -3:, -1]
_A = np.array(
[
0.18539645,
0.33987248,
0.5378559,
0.14437142,
-0.02455261,
-0.7338317,
0.23990755,
0.47356272,
-0.3786505,
] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
_A = False
_A = "stabilityai/stable-diffusion-2-base"
_A = DDIMScheduler.from_pretrained(__UpperCAmelCase , subfolder="scheduler" )
_A = StableDiffusionPanoramaPipeline.from_pretrained(__UpperCAmelCase , scheduler=__UpperCAmelCase , safety_checker=__UpperCAmelCase )
_A = pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
pipe.enable_attention_slicing()
_A = self.get_inputs()
pipe(**__UpperCAmelCase , callback=__UpperCAmelCase , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def lowerCAmelCase ( self : Dict ):
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
_A = "stabilityai/stable-diffusion-2-base"
_A = DDIMScheduler.from_pretrained(__UpperCAmelCase , subfolder="scheduler" )
_A = StableDiffusionPanoramaPipeline.from_pretrained(__UpperCAmelCase , scheduler=__UpperCAmelCase , safety_checker=__UpperCAmelCase )
_A = pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
_A = self.get_inputs()
_A = pipe(**__UpperCAmelCase )
_A = torch.cuda.max_memory_allocated()
# make sure that less than 5.2 GB is allocated
assert mem_bytes < 5.5 * 10**9
| 79
|
import os
from pathlib import Path
from unittest.mock import patch
import pytest
import zstandard as zstd
from datasets.download.download_config import DownloadConfig
from datasets.utils.file_utils import (
OfflineModeIsEnabled,
cached_path,
fsspec_get,
fsspec_head,
ftp_get,
ftp_head,
get_from_cache,
http_get,
http_head,
)
__snake_case :List[str] = '''\
Text data.
Second line of data.'''
__snake_case :Optional[Any] = '''file'''
@pytest.fixture(scope='''session''' )
def __snake_case ( _UpperCAmelCase ):
__a = tmp_path_factory.mktemp('''data''' ) / (FILE_PATH + '''.zstd''')
__a = bytes(_UpperCAmelCase , '''utf-8''' )
with zstd.open(_UpperCAmelCase , '''wb''' ) as f:
f.write(_UpperCAmelCase )
return path
@pytest.fixture
def __snake_case ( _UpperCAmelCase ):
with open(os.path.join(tmpfs.local_root_dir , _UpperCAmelCase ) , '''w''' ) as f:
f.write(_UpperCAmelCase )
return FILE_PATH
@pytest.mark.parametrize('''compression_format''' , ['''gzip''', '''xz''', '''zstd'''] )
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__a = {'''gzip''': gz_file, '''xz''': xz_file, '''zstd''': zstd_path}
__a = input_paths[compression_format]
__a = tmp_path / '''cache'''
__a = DownloadConfig(cache_dir=_UpperCAmelCase , extract_compressed_file=_UpperCAmelCase )
__a = cached_path(_UpperCAmelCase , download_config=_UpperCAmelCase )
with open(_UpperCAmelCase ) as f:
__a = f.read()
with open(_UpperCAmelCase ) as f:
__a = f.read()
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize('''default_extracted''' , [True, False] )
@pytest.mark.parametrize('''default_cache_dir''' , [True, False] )
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__a = '''custom_cache'''
__a = '''custom_extracted_dir'''
__a = tmp_path / '''custom_extracted_path'''
if default_extracted:
__a = ('''downloads''' if default_cache_dir else custom_cache_dir, '''extracted''')
else:
monkeypatch.setattr('''datasets.config.EXTRACTED_DATASETS_DIR''' , _UpperCAmelCase )
monkeypatch.setattr('''datasets.config.EXTRACTED_DATASETS_PATH''' , str(_UpperCAmelCase ) )
__a = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir)
__a = xz_file
__a = (
DownloadConfig(extract_compressed_file=_UpperCAmelCase )
if default_cache_dir
else DownloadConfig(cache_dir=tmp_path / custom_cache_dir , extract_compressed_file=_UpperCAmelCase )
)
__a = cached_path(_UpperCAmelCase , download_config=_UpperCAmelCase )
assert Path(_UpperCAmelCase ).parent.parts[-2:] == expected
def __snake_case ( _UpperCAmelCase ):
# absolute path
__a = str(Path(_UpperCAmelCase ).resolve() )
assert cached_path(_UpperCAmelCase ) == text_file
# relative path
__a = str(Path(_UpperCAmelCase ).resolve().relative_to(Path(os.getcwd() ) ) )
assert cached_path(_UpperCAmelCase ) == text_file
def __snake_case ( _UpperCAmelCase ):
# absolute path
__a = str(tmp_path.resolve() / '''__missing_file__.txt''' )
with pytest.raises(_UpperCAmelCase ):
cached_path(_UpperCAmelCase )
# relative path
__a = '''./__missing_file__.txt'''
with pytest.raises(_UpperCAmelCase ):
cached_path(_UpperCAmelCase )
def __snake_case ( _UpperCAmelCase ):
__a = get_from_cache(f'tmp://{tmpfs_file}' )
with open(_UpperCAmelCase ) as f:
__a = f.read()
assert output_file_content == FILE_CONTENT
@patch('''datasets.config.HF_DATASETS_OFFLINE''' , _UpperCAmelCase )
def __snake_case ( ):
with pytest.raises(_UpperCAmelCase ):
cached_path('''https://huggingface.co''' )
@patch('''datasets.config.HF_DATASETS_OFFLINE''' , _UpperCAmelCase )
def __snake_case ( _UpperCAmelCase ):
__a = tmp_path_factory.mktemp('''data''' ) / '''file.html'''
with pytest.raises(_UpperCAmelCase ):
http_get('''https://huggingface.co''' , temp_file=_UpperCAmelCase )
with pytest.raises(_UpperCAmelCase ):
http_head('''https://huggingface.co''' )
@patch('''datasets.config.HF_DATASETS_OFFLINE''' , _UpperCAmelCase )
def __snake_case ( _UpperCAmelCase ):
__a = tmp_path_factory.mktemp('''data''' ) / '''file.html'''
with pytest.raises(_UpperCAmelCase ):
ftp_get('''ftp://huggingface.co''' , temp_file=_UpperCAmelCase )
with pytest.raises(_UpperCAmelCase ):
ftp_head('''ftp://huggingface.co''' )
@patch('''datasets.config.HF_DATASETS_OFFLINE''' , _UpperCAmelCase )
def __snake_case ( _UpperCAmelCase ):
__a = tmp_path_factory.mktemp('''data''' ) / '''file.html'''
with pytest.raises(_UpperCAmelCase ):
fsspec_get('''s3://huggingface.co''' , temp_file=_UpperCAmelCase )
with pytest.raises(_UpperCAmelCase ):
fsspec_head('''s3://huggingface.co''' )
| 49
| 0
|
"""simple docstring"""
from __future__ import annotations
import sys
from collections import deque
from typing import Generic, TypeVar
snake_case__ : str = TypeVar('''T''')
class snake_case_( Generic[T] ):
__UpperCamelCase = 42 # Cache store of keys
__UpperCamelCase = 42 # References of the keys in cache
__UpperCamelCase = 10 # Maximum capacity of cache
def __init__( self : Union[str, Any] , UpperCamelCase_ : int ):
lowerCAmelCase : List[str] = deque()
lowerCAmelCase : Optional[Any] = set()
if not n:
lowerCAmelCase : Tuple = sys.maxsize
elif n < 0:
raise ValueError('''n should be an integer greater than 0.''' )
else:
lowerCAmelCase : Union[str, Any] = n
def lowerCamelCase__ ( self : List[Any] , UpperCamelCase_ : T ):
if x not in self.key_reference:
if len(self.dq_store ) == LRUCache._MAX_CAPACITY:
lowerCAmelCase : Tuple = self.dq_store.pop()
self.key_reference.remove(UpperCamelCase_ )
else:
self.dq_store.remove(UpperCamelCase_ )
self.dq_store.appendleft(UpperCamelCase_ )
self.key_reference.add(UpperCamelCase_ )
def lowerCamelCase__ ( self : Any ):
for k in self.dq_store:
print(UpperCamelCase_ )
def __repr__( self : Union[str, Any] ):
return F'''LRUCache({self._MAX_CAPACITY}) => {list(self.dq_store )}'''
if __name__ == "__main__":
import doctest
doctest.testmod()
snake_case__ : LRUCache[str | int] = LRUCache(4)
lru_cache.refer('''A''')
lru_cache.refer(2)
lru_cache.refer(3)
lru_cache.refer('''A''')
lru_cache.refer(4)
lru_cache.refer(5)
lru_cache.display()
print(lru_cache)
assert str(lru_cache) == "LRUCache(4) => [5, 4, 'A', 3]"
| 314
|
"""simple docstring"""
import sys
from typing import Tuple
import numpy as np
import torch
from PIL import Image
from torch import nn
from transformers.image_utils import PILImageResampling
from utils import img_tensorize
class snake_case_:
def __init__( self : Dict , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : int=sys.maxsize ):
lowerCAmelCase : Tuple = '''bilinear'''
lowerCAmelCase : List[Any] = max_size
lowerCAmelCase : Optional[int] = short_edge_length
def __call__( self : Optional[int] , UpperCamelCase_ : Optional[int] ):
lowerCAmelCase : Tuple = []
for img in imgs:
lowerCAmelCase, lowerCAmelCase : List[str] = img.shape[:2]
# later: provide list and randomly choose index for resize
lowerCAmelCase : int = np.random.randint(self.short_edge_length[0] , self.short_edge_length[1] + 1 )
if size == 0:
return img
lowerCAmelCase : Optional[Any] = size * 1.0 / min(UpperCamelCase_ , UpperCamelCase_ )
if h < w:
lowerCAmelCase, lowerCAmelCase : List[str] = size, scale * w
else:
lowerCAmelCase, lowerCAmelCase : int = scale * h, size
if max(UpperCamelCase_ , UpperCamelCase_ ) > self.max_size:
lowerCAmelCase : Union[str, Any] = self.max_size * 1.0 / max(UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase : Tuple = newh * scale
lowerCAmelCase : str = neww * scale
lowerCAmelCase : Union[str, Any] = int(neww + 0.5 )
lowerCAmelCase : str = int(newh + 0.5 )
if img.dtype == np.uinta:
lowerCAmelCase : Tuple = Image.fromarray(UpperCamelCase_ )
lowerCAmelCase : Optional[int] = pil_image.resize((neww, newh) , PILImageResampling.BILINEAR )
lowerCAmelCase : Union[str, Any] = np.asarray(UpperCamelCase_ )
else:
lowerCAmelCase : List[str] = img.permute(2 , 0 , 1 ).unsqueeze(0 ) # 3, 0, 1) # hw(c) -> nchw
lowerCAmelCase : Optional[int] = nn.functional.interpolate(
UpperCamelCase_ , (newh, neww) , mode=self.interp_method , align_corners=UpperCamelCase_ ).squeeze(0 )
img_augs.append(UpperCamelCase_ )
return img_augs
class snake_case_:
def __init__( self : Tuple , UpperCamelCase_ : Any ):
lowerCAmelCase : Any = ResizeShortestEdge([cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST] , cfg.INPUT.MAX_SIZE_TEST )
lowerCAmelCase : List[Any] = cfg.INPUT.FORMAT
lowerCAmelCase : Tuple = cfg.SIZE_DIVISIBILITY
lowerCAmelCase : int = cfg.PAD_VALUE
lowerCAmelCase : Union[str, Any] = cfg.INPUT.MAX_SIZE_TEST
lowerCAmelCase : Union[str, Any] = cfg.MODEL.DEVICE
lowerCAmelCase : Union[str, Any] = torch.tensor(cfg.MODEL.PIXEL_STD ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 )
lowerCAmelCase : List[Any] = torch.tensor(cfg.MODEL.PIXEL_MEAN ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 )
lowerCAmelCase : Optional[int] = lambda UpperCamelCase_ : (x - self.pixel_mean) / self.pixel_std
def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase_ : List[Any] ):
lowerCAmelCase : Dict = tuple(max(UpperCamelCase_ ) for s in zip(*[img.shape for img in images] ) )
lowerCAmelCase : Dict = [im.shape[-2:] for im in images]
lowerCAmelCase : Dict = [
nn.functional.pad(
UpperCamelCase_ , [0, max_size[-1] - size[1], 0, max_size[-2] - size[0]] , value=self.pad_value , )
for size, im in zip(UpperCamelCase_ , UpperCamelCase_ )
]
return torch.stack(UpperCamelCase_ ), torch.tensor(UpperCamelCase_ )
def __call__( self : List[str] , UpperCamelCase_ : str , UpperCamelCase_ : Optional[int]=False ):
with torch.no_grad():
if not isinstance(UpperCamelCase_ , UpperCamelCase_ ):
lowerCAmelCase : List[Any] = [images]
if single_image:
assert len(UpperCamelCase_ ) == 1
for i in range(len(UpperCamelCase_ ) ):
if isinstance(images[i] , torch.Tensor ):
images.insert(UpperCamelCase_ , images.pop(UpperCamelCase_ ).to(self.device ).float() )
elif not isinstance(images[i] , torch.Tensor ):
images.insert(
UpperCamelCase_ , torch.as_tensor(img_tensorize(images.pop(UpperCamelCase_ ) , input_format=self.input_format ) )
.to(self.device )
.float() , )
# resize smallest edge
lowerCAmelCase : Dict = torch.tensor([im.shape[:2] for im in images] )
lowerCAmelCase : str = self.aug(UpperCamelCase_ )
# transpose images and convert to torch tensors
# images = [torch.as_tensor(i.astype("float32")).permute(2, 0, 1).to(self.device) for i in images]
# now normalize before pad to avoid useless arithmetic
lowerCAmelCase : int = [self.normalizer(UpperCamelCase_ ) for x in images]
# now pad them to do the following operations
lowerCAmelCase, lowerCAmelCase : Optional[Any] = self.pad(UpperCamelCase_ )
# Normalize
if self.size_divisibility > 0:
raise NotImplementedError()
# pad
lowerCAmelCase : Union[str, Any] = torch.true_divide(UpperCamelCase_ , UpperCamelCase_ )
if single_image:
return images[0], sizes[0], scales_yx[0]
else:
return images, sizes, scales_yx
def _snake_case ( _snake_case : str , _snake_case : List[Any] ):
boxes[:, 0::2] *= scale_yx[:, 1]
boxes[:, 1::2] *= scale_yx[:, 0]
return boxes
def _snake_case ( _snake_case : Any , _snake_case : Tuple[int, int] ):
assert torch.isfinite(_snake_case ).all(), "Box tensor contains infinite or NaN!"
lowerCAmelCase, lowerCAmelCase : Optional[int] = box_size
tensor[:, 0].clamp_(min=0 , max=_snake_case )
tensor[:, 1].clamp_(min=0 , max=_snake_case )
tensor[:, 2].clamp_(min=0 , max=_snake_case )
tensor[:, 3].clamp_(min=0 , max=_snake_case )
| 314
| 1
|
import math
import unittest
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
assert isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(lowerCAmelCase__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
class lowercase ( unittest.TestCase ):
def A__ ( self):
self.assertTrue(is_prime(2))
self.assertTrue(is_prime(3))
self.assertTrue(is_prime(5))
self.assertTrue(is_prime(7))
self.assertTrue(is_prime(1_1))
self.assertTrue(is_prime(1_3))
self.assertTrue(is_prime(1_7))
self.assertTrue(is_prime(1_9))
self.assertTrue(is_prime(2_3))
self.assertTrue(is_prime(2_9))
def A__ ( self):
with self.assertRaises(A__):
is_prime(-1_9)
self.assertFalse(
is_prime(0) ,'''Zero doesn\'t have any positive factors, primes must have exactly two.''' ,)
self.assertFalse(
is_prime(1) ,'''One only has 1 positive factor, primes must have exactly two.''' ,)
self.assertFalse(is_prime(2 * 2))
self.assertFalse(is_prime(2 * 3))
self.assertFalse(is_prime(3 * 3))
self.assertFalse(is_prime(3 * 5))
self.assertFalse(is_prime(3 * 5 * 7))
if __name__ == "__main__":
unittest.main()
| 101
|
import argparse
import json
import os
import re
import shutil
import torch
from transformers import BioGptConfig, BioGptForCausalLM
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
lowercase__ :str = 2
class lowercase :
def __init__( self ,*, # begin keyword-only arguments
A__="<s>" ,A__="<pad>" ,A__="</s>" ,A__="<unk>" ,A__=None ,):
lowercase , lowercase , lowercase , lowercase = bos, unk, pad, eos
lowercase = []
lowercase = []
lowercase = {}
lowercase = self.add_symbol(A__)
lowercase = self.add_symbol(A__)
lowercase = self.add_symbol(A__)
lowercase = self.add_symbol(A__)
if extra_special_symbols:
for s in extra_special_symbols:
self.add_symbol(A__)
lowercase = len(self.symbols)
def __eq__( self ,A__):
return self.indices == other.indices
def __getitem__( self ,A__):
if idx < len(self.symbols):
return self.symbols[idx]
return self.unk_word
def __len__( self):
return len(self.symbols)
def __contains__( self ,A__):
return sym in self.indices
@classmethod
def A__ ( cls ,A__):
lowercase = cls()
d.add_from_file(A__)
return d
def A__ ( self ,A__ ,A__=1 ,A__=False):
if word in self.indices and not overwrite:
lowercase = self.indices[word]
lowercase = self.count[idx] + n
return idx
else:
lowercase = len(self.symbols)
lowercase = idx
self.symbols.append(A__)
self.count.append(A__)
return idx
def A__ ( self ,A__):
return 0
def A__ ( self ,A__):
if isinstance(A__ ,A__):
try:
with open(A__ ,'''r''' ,encoding='''utf-8''') as fd:
self.add_from_file(A__)
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception('''Incorrect encoding detected in {}, please rebuild the dataset'''.format(A__))
return
lowercase = f.readlines()
lowercase = self._load_meta(A__)
for line in lines[indices_start_line:]:
try:
lowercase , lowercase = line.rstrip().rsplit(''' ''' ,1)
if field == "#fairseq:overwrite":
lowercase = True
lowercase , lowercase = line.rsplit(''' ''' ,1)
else:
lowercase = False
lowercase = int(A__)
lowercase = line
if word in self and not overwrite:
raise RuntimeError(
'''Duplicate word found when loading Dictionary: \'{}\'. '''
'''Duplicate words can overwrite earlier ones by adding the '''
'''#fairseq:overwrite flag at the end of the corresponding row '''
'''in the dictionary file. If using the Camembert model, please '''
'''download an updated copy of the model file.'''.format(A__))
self.add_symbol(A__ ,n=A__ ,overwrite=A__)
except ValueError:
raise ValueError('''Incorrect dictionary format, expected \'<token> <cnt> [flags]\'''')
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
# (1) remove word breaking symbol, (2) add word ending symbol where the word is not broken up,
# e.g.: d = {'le@@': 5, 'tt@@': 6, 'er': 7} => {'le': 5, 'tt': 6, 'er</w>': 7}
lowercase = dict((re.sub(R'''@@$''' , '''''' , lowerCAmelCase__ ), v) if k.endswith('''@@''' ) else (re.sub(R'''$''' , '''</w>''' , lowerCAmelCase__ ), v) for k, v in d.items() )
lowercase = '''<s> <pad> </s> <unk>'''.split()
# restore the special tokens
for k in keep_keys:
del da[f'{k}</w>']
lowercase = d[k] # restore
return da
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
# prep
if not os.path.exists(lowerCAmelCase__ ):
raise ValueError(f'path {biogpt_checkpoint_path} does not exist!' )
os.makedirs(lowerCAmelCase__ , exist_ok=lowerCAmelCase__ )
print(f'Writing results to {pytorch_dump_folder_path}' )
# handle various types of models
lowercase = os.path.join(lowerCAmelCase__ , '''checkpoint.pt''' )
if not os.path.isfile(lowerCAmelCase__ ):
raise ValueError(f'path to the file {checkpoint_file} does not exist!' )
lowercase = torch.load(lowerCAmelCase__ , map_location='''cpu''' )
lowercase = chkpt['''cfg''']['''model''']
# dicts
lowercase = os.path.join(lowerCAmelCase__ , '''dict.txt''' )
if not os.path.isfile(lowerCAmelCase__ ):
raise ValueError(f'path to the file {dict_file} does not exist!' )
lowercase = Dictionary.load(lowerCAmelCase__ )
lowercase = rewrite_dict_keys(src_dict.indices )
lowercase = len(lowerCAmelCase__ )
lowercase = os.path.join(lowerCAmelCase__ , VOCAB_FILES_NAMES['''vocab_file'''] )
print(f'Generating {src_vocab_file} of {src_vocab_size} records' )
with open(lowerCAmelCase__ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(lowerCAmelCase__ , ensure_ascii=lowerCAmelCase__ , indent=lowerCAmelCase__ ) )
# merges_file (bpecodes)
lowercase = os.path.join(lowerCAmelCase__ , '''bpecodes''' )
if not os.path.isfile(lowerCAmelCase__ ):
raise ValueError(f'path to the file {bpecodes_file} does not exist!' )
lowercase = os.path.join(lowerCAmelCase__ , VOCAB_FILES_NAMES['''merges_file'''] )
shutil.copyfile(lowerCAmelCase__ , lowerCAmelCase__ )
# model config
lowercase = os.path.join(lowerCAmelCase__ , '''config.json''' )
lowercase = {
'''activation_dropout''': args['''activation_dropout'''],
'''architectures''': ['''BioGptForCausalLM'''],
'''attention_probs_dropout_prob''': args['''attention_dropout'''],
'''bos_token_id''': 0,
'''eos_token_id''': 2,
'''hidden_act''': args['''activation_fn'''],
'''hidden_dropout_prob''': args['''dropout'''],
'''hidden_size''': args['''decoder_embed_dim'''],
'''initializer_range''': 0.02,
'''intermediate_size''': args['''decoder_ffn_embed_dim'''],
'''layer_norm_eps''': 1E-12,
'''layerdrop''': args['''decoder_layerdrop'''],
'''max_position_embeddings''': args['''max_target_positions'''],
'''model_type''': '''biogpt''',
'''num_attention_heads''': args['''decoder_attention_heads'''],
'''num_hidden_layers''': args['''decoder_layers'''],
'''pad_token_id''': 1,
'''scale_embedding''': not args['''no_scale_embedding'''],
'''tie_word_embeddings''': args['''share_decoder_input_output_embed'''],
'''vocab_size''': src_vocab_size,
}
# good hparam defaults to start with
print(f'Generating {biogpt_model_config_file}' )
with open(lowerCAmelCase__ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(lowerCAmelCase__ , ensure_ascii=lowerCAmelCase__ , indent=lowerCAmelCase__ ) )
# tokenizer config
lowercase = os.path.join(lowerCAmelCase__ , lowerCAmelCase__ )
lowercase = {
'''bos_token''': '''<s>''',
'''eos_token''': '''</s>''',
'''model_max_length''': 1024,
'''pad_token''': '''<pad>''',
'''special_tokens_map_file''': None,
'''tokenizer_class''': '''BioGptTokenizer''',
'''unk_token''': '''<unk>''',
}
print(f'Generating {biogpt_tokenizer_config_file}' )
with open(lowerCAmelCase__ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(lowerCAmelCase__ , ensure_ascii=lowerCAmelCase__ , indent=lowerCAmelCase__ ) )
# model
lowercase = chkpt['''model''']
# remove unneeded keys
lowercase = [
'''decoder.version''',
]
for k in ignore_keys:
model_state_dict.pop(lowerCAmelCase__ , lowerCAmelCase__ )
lowercase = list(model_state_dict.keys() )
for layer_name in layer_names:
if layer_name.endswith('''output_projection.weight''' ):
lowercase = model_state_dict.pop(lowerCAmelCase__ )
else:
lowercase = model_state_dict.pop(lowerCAmelCase__ )
lowercase = BioGptConfig.from_pretrained(lowerCAmelCase__ )
lowercase = BioGptForCausalLM(lowerCAmelCase__ )
# check that it loads ok
model_new.load_state_dict(lowerCAmelCase__ )
# save
lowercase = os.path.join(lowerCAmelCase__ , lowerCAmelCase__ )
print(f'Generating {pytorch_weights_dump_path}' )
torch.save(lowerCAmelCase__ , lowerCAmelCase__ )
print('''Conversion is done!''' )
if __name__ == "__main__":
lowercase__ :Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--biogpt_checkpoint_path",
default=None,
type=str,
required=True,
help=(
"Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,"
" bpecodes, etc."
),
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
lowercase__ :Any = parser.parse_args()
convert_biogpt_checkpoint_to_pytorch(args.biogpt_checkpoint_path, args.pytorch_dump_folder_path)
| 101
| 1
|
'''simple docstring'''
import sacrebleu as scb
from packaging import version
from sacrebleu import CHRF
import datasets
_UpperCAmelCase : Union[str, Any] = """\
@inproceedings{popovic-2015-chrf,
title = \"chr{F}: character n-gram {F}-score for automatic {MT} evaluation\",
author = \"Popovi{\'c}, Maja\",
booktitle = \"Proceedings of the Tenth Workshop on Statistical Machine Translation\",
month = sep,
year = \"2015\",
address = \"Lisbon, Portugal\",
publisher = \"Association for Computational Linguistics\",
url = \"https://aclanthology.org/W15-3049\",
doi = \"10.18653/v1/W15-3049\",
pages = \"392--395\",
}
@inproceedings{popovic-2017-chrf,
title = \"chr{F}++: words helping character n-grams\",
author = \"Popovi{\'c}, Maja\",
booktitle = \"Proceedings of the Second Conference on Machine Translation\",
month = sep,
year = \"2017\",
address = \"Copenhagen, Denmark\",
publisher = \"Association for Computational Linguistics\",
url = \"https://aclanthology.org/W17-4770\",
doi = \"10.18653/v1/W17-4770\",
pages = \"612--618\",
}
@inproceedings{post-2018-call,
title = \"A Call for Clarity in Reporting {BLEU} Scores\",
author = \"Post, Matt\",
booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\",
month = oct,
year = \"2018\",
address = \"Belgium, Brussels\",
publisher = \"Association for Computational Linguistics\",
url = \"https://www.aclweb.org/anthology/W18-6319\",
pages = \"186--191\",
}
"""
_UpperCAmelCase : Tuple = """\
ChrF and ChrF++ are two MT evaluation metrics. They both use the F-score statistic for character n-gram matches,
and ChrF++ adds word n-grams as well which correlates more strongly with direct assessment. We use the implementation
that is already present in sacrebleu.
The implementation here is slightly different from sacrebleu in terms of the required input format. The length of
the references and hypotheses lists need to be the same, so you may need to transpose your references compared to
sacrebleu's required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534
See the README.md file at https://github.com/mjpost/sacreBLEU#chrf--chrf for more information.
"""
_UpperCAmelCase : Union[str, Any] = """
Produces ChrF(++) scores for hypotheses given reference translations.
Args:
predictions (list of str): The predicted sentences.
references (list of list of str): The references. There should be one reference sub-list for each prediction sentence.
char_order (int): Character n-gram order. Defaults to `6`.
word_order (int): Word n-gram order. If equals to `2`, the metric is referred to as chrF++. Defaults to `0`.
beta (int): Determine the importance of recall w.r.t precision. Defaults to `2`.
lowercase (bool): if `True`, enables case-insensitivity. Defaults to `False`.
whitespace (bool): If `True`, include whitespaces when extracting character n-grams.
eps_smoothing (bool): If `True`, applies epsilon smoothing similar
to reference chrF++.py, NLTK and Moses implementations. If `False`,
it takes into account effective match order similar to sacreBLEU < 2.0.0. Defaults to `False`.
Returns:
'score' (float): The chrF (chrF++) score,
'char_order' (int): The character n-gram order,
'word_order' (int): The word n-gram order. If equals to 2, the metric is referred to as chrF++,
'beta' (int): Determine the importance of recall w.r.t precision
Examples:
Example 1--a simple example of calculating chrF:
>>> prediction = [\"The relationship between cats and dogs is not exactly friendly.\", \"a good bookshop is just a genteel black hole that knows how to read.\"]
>>> reference = [[\"The relationship between dogs and cats is not exactly friendly.\"], [\"A good bookshop is just a genteel Black Hole that knows how to read.\"]]
>>> chrf = datasets.load_metric(\"chrf\")
>>> results = chrf.compute(predictions=prediction, references=reference)
>>> print(results)
{'score': 84.64214891738334, 'char_order': 6, 'word_order': 0, 'beta': 2}
Example 2--the same example, but with the argument word_order=2, to calculate chrF++ instead of chrF:
>>> prediction = [\"The relationship between cats and dogs is not exactly friendly.\", \"a good bookshop is just a genteel black hole that knows how to read.\"]
>>> reference = [[\"The relationship between dogs and cats is not exactly friendly.\"], [\"A good bookshop is just a genteel Black Hole that knows how to read.\"]]
>>> chrf = datasets.load_metric(\"chrf\")
>>> results = chrf.compute(predictions=prediction,
... references=reference,
... word_order=2)
>>> print(results)
{'score': 82.87263732906315, 'char_order': 6, 'word_order': 2, 'beta': 2}
Example 3--the same chrF++ example as above, but with `lowercase=True` to normalize all case:
>>> prediction = [\"The relationship between cats and dogs is not exactly friendly.\", \"a good bookshop is just a genteel black hole that knows how to read.\"]
>>> reference = [[\"The relationship between dogs and cats is not exactly friendly.\"], [\"A good bookshop is just a genteel Black Hole that knows how to read.\"]]
>>> chrf = datasets.load_metric(\"chrf\")
>>> results = chrf.compute(predictions=prediction,
... references=reference,
... word_order=2,
... lowercase=True)
>>> print(results)
{'score': 92.12853119829202, 'char_order': 6, 'word_order': 2, 'beta': 2}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a__ ( datasets.Metric ):
"""simple docstring"""
def _snake_case (self ):
if version.parse(scb.__version__ ) < version.parse('''1.4.12''' ):
raise ImportWarning(
'''To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn\'t match this condition.\n'''
'''You can install it with `pip install "sacrebleu>=1.4.12"`.''' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='''https://github.com/mjpost/sacreBLEU#chrf--chrf''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Sequence(datasets.Value('''string''' , id='''sequence''' ) , id='''references''' ),
} ) , codebase_urls=['''https://github.com/mjpost/sacreBLEU#chrf--chrf'''] , reference_urls=[
'''https://github.com/m-popovic/chrF''',
] , )
def _snake_case (self , __lowercase , __lowercase , __lowercase = CHRF.CHAR_ORDER , __lowercase = CHRF.WORD_ORDER , __lowercase = CHRF.BETA , __lowercase = False , __lowercase = False , __lowercase = False , ):
__lowerCAmelCase = len(references[0] )
if any(len(__lowercase ) != references_per_prediction for refs in references ):
raise ValueError('''Sacrebleu requires the same number of references for each prediction''' )
__lowerCAmelCase = [[refs[i] for refs in references] for i in range(__lowercase )]
__lowerCAmelCase = CHRF(__lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase )
__lowerCAmelCase = sb_chrf.corpus_score(__lowercase , __lowercase )
return {
"score": output.score,
"char_order": output.char_order,
"word_order": output.word_order,
"beta": output.beta,
}
| 370
|
'''simple docstring'''
import math
import os
from copy import deepcopy
import datasets
import evaluate
import torch
import transformers
from datasets import load_dataset
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from accelerate import Accelerator
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import is_tpu_available, set_seed
_UpperCAmelCase : Dict = """true"""
def __magic_name__( lowerCamelCase, lowerCamelCase=8_2, lowerCamelCase=1_6):
set_seed(4_2)
__lowerCAmelCase = RegressionModel()
__lowerCAmelCase = deepcopy(lowerCamelCase)
__lowerCAmelCase = RegressionDataset(length=lowerCamelCase)
__lowerCAmelCase = DataLoader(lowerCamelCase, batch_size=lowerCamelCase)
model.to(accelerator.device)
__lowerCAmelCase , __lowerCAmelCase = accelerator.prepare(lowerCamelCase, lowerCamelCase)
return model, ddp_model, dataloader
def __magic_name__( lowerCamelCase, lowerCamelCase=False):
__lowerCAmelCase = AutoTokenizer.from_pretrained('''hf-internal-testing/mrpc-bert-base-cased''')
__lowerCAmelCase = load_dataset('''glue''', '''mrpc''', split='''validation''')
def tokenize_function(lowerCamelCase):
__lowerCAmelCase = tokenizer(examples['''sentence1'''], examples['''sentence2'''], truncation=lowerCamelCase, max_length=lowerCamelCase)
return outputs
with accelerator.main_process_first():
__lowerCAmelCase = dataset.map(
lowerCamelCase, batched=lowerCamelCase, remove_columns=['''idx''', '''sentence1''', '''sentence2'''], )
__lowerCAmelCase = tokenized_datasets.rename_column('''label''', '''labels''')
def collate_fn(lowerCamelCase):
if use_longest:
return tokenizer.pad(lowerCamelCase, padding='''longest''', return_tensors='''pt''')
return tokenizer.pad(lowerCamelCase, padding='''max_length''', max_length=1_2_8, return_tensors='''pt''')
return DataLoader(lowerCamelCase, shuffle=lowerCamelCase, collate_fn=lowerCamelCase, batch_size=1_6)
def __magic_name__( lowerCamelCase, lowerCamelCase):
__lowerCAmelCase = Accelerator(dispatch_batches=lowerCamelCase, split_batches=lowerCamelCase)
__lowerCAmelCase = get_dataloader(lowerCamelCase, not dispatch_batches)
__lowerCAmelCase = AutoModelForSequenceClassification.from_pretrained(
'''hf-internal-testing/mrpc-bert-base-cased''', return_dict=lowerCamelCase)
__lowerCAmelCase , __lowerCAmelCase = accelerator.prepare(lowerCamelCase, lowerCamelCase)
return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator
def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase):
__lowerCAmelCase = []
for batch in dataloader:
__lowerCAmelCase , __lowerCAmelCase = batch.values()
with torch.no_grad():
__lowerCAmelCase = model(lowerCamelCase)
__lowerCAmelCase , __lowerCAmelCase = accelerator.gather_for_metrics((logit, target))
logits_and_targets.append((logit, target))
__lowerCAmelCase , __lowerCAmelCase = [], []
for logit, targ in logits_and_targets:
logits.append(lowerCamelCase)
targs.append(lowerCamelCase)
__lowerCAmelCase , __lowerCAmelCase = torch.cat(lowerCamelCase), torch.cat(lowerCamelCase)
return logits, targs
def __magic_name__( lowerCamelCase, lowerCamelCase=8_2, lowerCamelCase=False, lowerCamelCase=False, lowerCamelCase=1_6):
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = get_basic_setup(lowerCamelCase, lowerCamelCase, lowerCamelCase)
__lowerCAmelCase , __lowerCAmelCase = generate_predictions(lowerCamelCase, lowerCamelCase, lowerCamelCase)
assert (
len(lowerCamelCase) == num_samples
), F"""Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(lowerCamelCase)}"""
def __magic_name__( lowerCamelCase = False, lowerCamelCase = False):
__lowerCAmelCase = evaluate.load('''glue''', '''mrpc''')
__lowerCAmelCase , __lowerCAmelCase = get_mrpc_setup(lowerCamelCase, lowerCamelCase)
# First do baseline
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = setup['''no''']
model.to(lowerCamelCase)
model.eval()
for batch in dataloader:
batch.to(lowerCamelCase)
with torch.inference_mode():
__lowerCAmelCase = model(**lowerCamelCase)
__lowerCAmelCase = outputs.logits.argmax(dim=-1)
metric.add_batch(predictions=lowerCamelCase, references=batch['''labels'''])
__lowerCAmelCase = metric.compute()
# Then do distributed
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = setup['''ddp''']
model.eval()
for batch in dataloader:
with torch.inference_mode():
__lowerCAmelCase = model(**lowerCamelCase)
__lowerCAmelCase = outputs.logits.argmax(dim=-1)
__lowerCAmelCase = batch['''labels''']
__lowerCAmelCase , __lowerCAmelCase = accelerator.gather_for_metrics((preds, references))
metric.add_batch(predictions=lowerCamelCase, references=lowerCamelCase)
__lowerCAmelCase = metric.compute()
for key in "accuracy f1".split():
assert math.isclose(
baseline[key], distributed[key]), F"""Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n"""
def __magic_name__( ):
__lowerCAmelCase = Accelerator(split_batches=lowerCamelCase, dispatch_batches=lowerCamelCase)
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_warning()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# These are a bit slower so they should only be ran on the GPU or TPU
if torch.cuda.is_available() or is_tpu_available():
if accelerator.is_local_main_process:
print('''**Testing gather_for_metrics**''')
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
if accelerator.is_local_main_process:
print(F"""With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`""")
test_mrpc(lowerCamelCase, lowerCamelCase)
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('''**Test torch metrics**''')
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
__lowerCAmelCase = Accelerator(split_batches=lowerCamelCase, dispatch_batches=lowerCamelCase)
if accelerator.is_local_main_process:
print(F"""With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99""")
test_torch_metrics(lowerCamelCase, 9_9)
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('''**Test last batch is not dropped when perfectly divisible**''')
__lowerCAmelCase = Accelerator()
test_torch_metrics(lowerCamelCase, 5_1_2)
accelerator.state._reset_state()
def __magic_name__( lowerCamelCase):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 9
| 0
|
'''simple docstring'''
import torch
from diffusers import StableDiffusionPipeline
lowercase__ : Dict = 'path-to-your-trained-model'
lowercase__ : List[Any] = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.floataa).to('cuda')
lowercase__ : Tuple = 'A photo of sks dog in a bucket'
lowercase__ : List[str] = pipe(prompt, num_inference_steps=50, guidance_scale=7.5).images[0]
image.save('dog-bucket.png')
| 324
|
def __magic_name__ ( __a : str ):
'''simple docstring'''
return [
txt[:a] + txt[a].upper() + txt[a + 1 :]
for a in range(len(__a ) )
if txt[a].isalpha()
]
if __name__ == "__main__":
__import__('''doctest''').testmod()
| 244
| 0
|
"""simple docstring"""
from __future__ import annotations
class lowerCAmelCase__ :
def __init__( self : List[Any] , _lowerCamelCase : Dict ):
_snake_case = TypeError(
'''Matrices must be formed from a list of zero or more lists containing at '''
'''least one and the same number of values, each of which must be of type '''
'''int or float.''' )
if len(__lowercase ) != 0:
_snake_case = len(rows[0] )
if cols == 0:
raise error
for row in rows:
if len(__lowercase ) != cols:
raise error
for value in row:
if not isinstance(__lowercase , (int, float) ):
raise error
_snake_case = rows
else:
_snake_case = []
def lowercase ( self : str ):
return [[row[i] for row in self.rows] for i in range(len(self.rows[0] ) )]
@property
def lowercase ( self : List[str] ):
return len(self.rows )
@property
def lowercase ( self : int ):
return len(self.rows[0] )
@property
def lowercase ( self : Dict ):
return (self.num_rows, self.num_columns)
@property
def lowercase ( self : int ):
return self.order[0] == self.order[1]
def lowercase ( self : str ):
_snake_case = [
[0 if column_num != row_num else 1 for column_num in range(self.num_rows )]
for row_num in range(self.num_rows )
]
return Matrix(__lowercase )
def lowercase ( self : Union[str, Any] ):
if not self.is_square:
return 0
if self.order == (0, 0):
return 1
if self.order == (1, 1):
return int(self.rows[0][0] )
if self.order == (2, 2):
return int(
(self.rows[0][0] * self.rows[1][1])
- (self.rows[0][1] * self.rows[1][0]) )
else:
return sum(
self.rows[0][column] * self.cofactors().rows[0][column]
for column in range(self.num_columns ) )
def lowercase ( self : Optional[Any] ):
return bool(self.determinant() )
def lowercase ( self : Tuple , _lowerCamelCase : List[str] , _lowerCamelCase : Dict ):
_snake_case = [
[
self.rows[other_row][other_column]
for other_column in range(self.num_columns )
if other_column != column
]
for other_row in range(self.num_rows )
if other_row != row
]
return Matrix(__lowercase ).determinant()
def lowercase ( self : Dict , _lowerCamelCase : List[str] , _lowerCamelCase : Union[str, Any] ):
if (row + column) % 2 == 0:
return self.get_minor(__lowercase , __lowercase )
return -1 * self.get_minor(__lowercase , __lowercase )
def lowercase ( self : Any ):
return Matrix(
[
[self.get_minor(__lowercase , __lowercase ) for column in range(self.num_columns )]
for row in range(self.num_rows )
] )
def lowercase ( self : Tuple ):
return Matrix(
[
[
self.minors().rows[row][column]
if (row + column) % 2 == 0
else self.minors().rows[row][column] * -1
for column in range(self.minors().num_columns )
]
for row in range(self.minors().num_rows )
] )
def lowercase ( self : Optional[Any] ):
_snake_case = [
[self.cofactors().rows[column][row] for column in range(self.num_columns )]
for row in range(self.num_rows )
]
return Matrix(__lowercase )
def lowercase ( self : Optional[Any] ):
_snake_case = self.determinant()
if not determinant:
raise TypeError('''Only matrices with a non-zero determinant have an inverse''' )
return self.adjugate() * (1 / determinant)
def __repr__( self : int ):
return str(self.rows )
def __str__( self : Optional[int] ):
if self.num_rows == 0:
return "[]"
if self.num_rows == 1:
return "[[" + ". ".join(str(self.rows[0] ) ) + "]]"
return (
"["
+ "\n ".join(
[
'''[''' + '''. '''.join([str(__lowercase ) for value in row] ) + '''.]'''
for row in self.rows
] )
+ "]"
)
def lowercase ( self : Optional[Any] , _lowerCamelCase : Dict , _lowerCamelCase : str = None ):
_snake_case = TypeError('''Row must be a list containing all ints and/or floats''' )
if not isinstance(__lowercase , __lowercase ):
raise type_error
for value in row:
if not isinstance(__lowercase , (int, float) ):
raise type_error
if len(__lowercase ) != self.num_columns:
raise ValueError(
'''Row must be equal in length to the other rows in the matrix''' )
if position is None:
self.rows.append(__lowercase )
else:
_snake_case = self.rows[0:position] + [row] + self.rows[position:]
def lowercase ( self : Union[str, Any] , _lowerCamelCase : str , _lowerCamelCase : List[str] = None ):
_snake_case = TypeError(
'''Column must be a list containing all ints and/or floats''' )
if not isinstance(__lowercase , __lowercase ):
raise type_error
for value in column:
if not isinstance(__lowercase , (int, float) ):
raise type_error
if len(__lowercase ) != self.num_rows:
raise ValueError(
'''Column must be equal in length to the other columns in the matrix''' )
if position is None:
_snake_case = [self.rows[i] + [column[i]] for i in range(self.num_rows )]
else:
_snake_case = [
self.rows[i][0:position] + [column[i]] + self.rows[i][position:]
for i in range(self.num_rows )
]
def __eq__( self : Union[str, Any] , _lowerCamelCase : Optional[int] ):
if not isinstance(__lowercase , __lowercase ):
return NotImplemented
return self.rows == other.rows
def __ne__( self : List[Any] , _lowerCamelCase : List[Any] ):
return not self == other
def __neg__( self : str ):
return self * -1
def __add__( self : Optional[int] , _lowerCamelCase : Dict ):
if self.order != other.order:
raise ValueError('''Addition requires matrices of the same order''' )
return Matrix(
[
[self.rows[i][j] + other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __sub__( self : Any , _lowerCamelCase : List[str] ):
if self.order != other.order:
raise ValueError('''Subtraction requires matrices of the same order''' )
return Matrix(
[
[self.rows[i][j] - other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __mul__( self : int , _lowerCamelCase : Optional[Any] ):
if isinstance(__lowercase , (int, float) ):
return Matrix(
[[int(element * other ) for element in row] for row in self.rows] )
elif isinstance(__lowercase , __lowercase ):
if self.num_columns != other.num_rows:
raise ValueError(
'''The number of columns in the first matrix must '''
'''be equal to the number of rows in the second''' )
return Matrix(
[
[Matrix.dot_product(__lowercase , __lowercase ) for column in other.columns()]
for row in self.rows
] )
else:
raise TypeError(
'''A Matrix can only be multiplied by an int, float, or another matrix''' )
def __pow__( self : Any , _lowerCamelCase : List[str] ):
if not isinstance(__lowercase , __lowercase ):
raise TypeError('''A Matrix can only be raised to the power of an int''' )
if not self.is_square:
raise ValueError('''Only square matrices can be raised to a power''' )
if other == 0:
return self.identity()
if other < 0:
if self.is_invertable():
return self.inverse() ** (-other)
raise ValueError(
'''Only invertable matrices can be raised to a negative power''' )
_snake_case = self
for _ in range(other - 1 ):
result *= self
return result
@classmethod
def lowercase ( cls : Union[str, Any] , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Any ):
return sum(row[i] * column[i] for i in range(len(__lowercase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 359
|
"""simple docstring"""
from __future__ import annotations
class lowerCAmelCase__ :
def __init__( self : Optional[int] , _lowerCamelCase : int = 0 ):
_snake_case = key
def lowercase ( self : Tuple , _lowerCamelCase : str , _lowerCamelCase : int ):
assert isinstance(_lowerCamelCase , _lowerCamelCase ) and isinstance(_lowerCamelCase , _lowerCamelCase )
_snake_case = key or self.__key or 1
# make sure key is an appropriate size
key %= 255
return [chr(ord(_lowerCamelCase ) ^ key ) for ch in content]
def lowercase ( self : List[Any] , _lowerCamelCase : str , _lowerCamelCase : int ):
assert isinstance(_lowerCamelCase , _lowerCamelCase ) and isinstance(_lowerCamelCase , _lowerCamelCase )
_snake_case = key or self.__key or 1
# make sure key is an appropriate size
key %= 255
return [chr(ord(_lowerCamelCase ) ^ key ) for ch in content]
def lowercase ( self : Union[str, Any] , _lowerCamelCase : str , _lowerCamelCase : int = 0 ):
assert isinstance(_lowerCamelCase , _lowerCamelCase ) and isinstance(_lowerCamelCase , _lowerCamelCase )
_snake_case = key or self.__key or 1
# make sure key can be any size
while key > 255:
key -= 255
# This will be returned
_snake_case = ''''''
for ch in content:
ans += chr(ord(_lowerCamelCase ) ^ key )
return ans
def lowercase ( self : List[str] , _lowerCamelCase : str , _lowerCamelCase : int = 0 ):
assert isinstance(_lowerCamelCase , _lowerCamelCase ) and isinstance(_lowerCamelCase , _lowerCamelCase )
_snake_case = key or self.__key or 1
# make sure key can be any size
while key > 255:
key -= 255
# This will be returned
_snake_case = ''''''
for ch in content:
ans += chr(ord(_lowerCamelCase ) ^ key )
return ans
def lowercase ( self : List[Any] , _lowerCamelCase : str , _lowerCamelCase : int = 0 ):
assert isinstance(_lowerCamelCase , _lowerCamelCase ) and isinstance(_lowerCamelCase , _lowerCamelCase )
try:
with open(_lowerCamelCase ) as fin, open('''encrypt.out''' , '''w+''' ) as fout:
# actual encrypt-process
for line in fin:
fout.write(self.encrypt_string(_lowerCamelCase , _lowerCamelCase ) )
except OSError:
return False
return True
def lowercase ( self : Optional[Any] , _lowerCamelCase : str , _lowerCamelCase : int ):
assert isinstance(_lowerCamelCase , _lowerCamelCase ) and isinstance(_lowerCamelCase , _lowerCamelCase )
try:
with open(_lowerCamelCase ) as fin, open('''decrypt.out''' , '''w+''' ) as fout:
# actual encrypt-process
for line in fin:
fout.write(self.decrypt_string(_lowerCamelCase , _lowerCamelCase ) )
except OSError:
return False
return True
# Tests
# crypt = XORCipher()
# key = 67
# # test encrypt
# print(crypt.encrypt("hallo welt",key))
# # test decrypt
# print(crypt.decrypt(crypt.encrypt("hallo welt",key), key))
# # test encrypt_string
# print(crypt.encrypt_string("hallo welt",key))
# # test decrypt_string
# print(crypt.decrypt_string(crypt.encrypt_string("hallo welt",key),key))
# if (crypt.encrypt_file("test.txt",key)):
# print("encrypt successful")
# else:
# print("encrypt unsuccessful")
# if (crypt.decrypt_file("encrypt.out",key)):
# print("decrypt successful")
# else:
# print("decrypt unsuccessful")
| 40
| 0
|
"""simple docstring"""
def UpperCAmelCase__ ( _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
assert x is not None
assert y is not None
A_ : int = len(_UpperCAmelCase )
A_ : List[Any] = len(_UpperCAmelCase )
# declaring the array for storing the dp values
A_ : int = [[0] * (n + 1) for _ in range(m + 1 )] # noqa: E741
for i in range(1 , m + 1 ):
for j in range(1 , n + 1 ):
A_ : List[Any] = 1 if x[i - 1] == y[j - 1] else 0
A_ : Dict = max(l[i - 1][j] , l[i][j - 1] , l[i - 1][j - 1] + match )
A_ : str = ''
A_ , A_ : Any = m, n
while i > 0 and j > 0:
A_ : Union[str, Any] = 1 if x[i - 1] == y[j - 1] else 0
if l[i][j] == l[i - 1][j - 1] + match:
if match == 1:
A_ : Union[str, Any] = x[i - 1] + seq
i -= 1
j -= 1
elif l[i][j] == l[i - 1][j]:
i -= 1
else:
j -= 1
return l[m][n], seq
if __name__ == "__main__":
lowerCamelCase_ : str = 'AGGTAB'
lowerCamelCase_ : str = 'GXTXAYB'
lowerCamelCase_ : List[str] = 4
lowerCamelCase_ : Tuple = 'GTAB'
lowerCamelCase_ , lowerCamelCase_ : List[str] = longest_common_subsequence(a, b)
print('len =', ln, ', sub-sequence =', subseq)
import doctest
doctest.testmod()
| 286
|
"""simple docstring"""
from copy import deepcopy
class _UpperCAmelCase :
'''simple docstring'''
def __init__( self , snake_case_ = None , snake_case_ = None ):
"""simple docstring"""
if arr is None and size is not None:
A_ : Union[str, Any] = size
A_ : List[str] = [0] * size
elif arr is not None:
self.init(snake_case_ )
else:
raise ValueError('Either arr or size must be specified' )
def lowerCamelCase_ ( self , snake_case_ ):
"""simple docstring"""
A_ : Union[str, Any] = len(snake_case_ )
A_ : Optional[int] = deepcopy(snake_case_ )
for i in range(1 , self.size ):
A_ : Optional[Any] = self.next_(snake_case_ )
if j < self.size:
self.tree[j] += self.tree[i]
def lowerCamelCase_ ( self ):
"""simple docstring"""
A_ : int = self.tree[:]
for i in range(self.size - 1 , 0 , -1 ):
A_ : Optional[int] = self.next_(snake_case_ )
if j < self.size:
arr[j] -= arr[i]
return arr
@staticmethod
def lowerCamelCase_ ( snake_case_ ):
"""simple docstring"""
return index + (index & (-index))
@staticmethod
def lowerCamelCase_ ( snake_case_ ):
"""simple docstring"""
return index - (index & (-index))
def lowerCamelCase_ ( self , snake_case_ , snake_case_ ):
"""simple docstring"""
if index == 0:
self.tree[0] += value
return
while index < self.size:
self.tree[index] += value
A_ : List[str] = self.next_(snake_case_ )
def lowerCamelCase_ ( self , snake_case_ , snake_case_ ):
"""simple docstring"""
self.add(snake_case_ , value - self.get(snake_case_ ) )
def lowerCamelCase_ ( self , snake_case_ ):
"""simple docstring"""
if right == 0:
return 0
A_ : Any = self.tree[0]
right -= 1 # make right inclusive
while right > 0:
result += self.tree[right]
A_ : Tuple = self.prev(snake_case_ )
return result
def lowerCamelCase_ ( self , snake_case_ , snake_case_ ):
"""simple docstring"""
return self.prefix(snake_case_ ) - self.prefix(snake_case_ )
def lowerCamelCase_ ( self , snake_case_ ):
"""simple docstring"""
return self.query(snake_case_ , index + 1 )
def lowerCamelCase_ ( self , snake_case_ ):
"""simple docstring"""
value -= self.tree[0]
if value < 0:
return -1
A_ : List[Any] = 1 # Largest power of 2 <= size
while j * 2 < self.size:
j *= 2
A_ : Tuple = 0
while j > 0:
if i + j < self.size and self.tree[i + j] <= value:
value -= self.tree[i + j]
i += j
j //= 2
return i
if __name__ == "__main__":
import doctest
doctest.testmod()
| 286
| 1
|
import inspect
import tempfile
import unittest
from huggingface_hub import hf_hub_download
from transformers import is_torch_available
from transformers.testing_utils import is_flaky, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
lowerCamelCase_ : Any = 1E-4
if is_torch_available():
import torch
from transformers import AutoformerConfig, AutoformerForPrediction, AutoformerModel
from transformers.models.autoformer.modeling_autoformer import AutoformerDecoder, AutoformerEncoder
@require_torch
class a__ :
def __init__( self , UpperCAmelCase , UpperCAmelCase=1_6 , UpperCAmelCase=1_3 , UpperCAmelCase=7 , UpperCAmelCase=1_4 , UpperCAmelCase=1_0 , UpperCAmelCase=1_9 , UpperCAmelCase=5 , UpperCAmelCase=4 , UpperCAmelCase=True , UpperCAmelCase=1_6 , UpperCAmelCase=2 , UpperCAmelCase=4 , UpperCAmelCase=4 , UpperCAmelCase="gelu" , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=[1, 2, 3, 4, 5] , UpperCAmelCase=2_5 , UpperCAmelCase=5 , ) -> Any:
__a = d_model
__a = parent
__a = batch_size
__a = prediction_length
__a = context_length
__a = cardinality
__a = num_time_features
__a = lags_sequence
__a = embedding_dimension
__a = is_training
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = intermediate_size
__a = hidden_act
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = context_length
__a = prediction_length + label_length
__a = label_length
__a = moving_average
__a = autocorrelation_factor
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
return AutoformerConfig(
d_model=self.d_model , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , prediction_length=self.prediction_length , context_length=self.context_length , label_length=self.label_length , lags_sequence=self.lags_sequence , num_time_features=self.num_time_features , num_static_categorical_features=1 , cardinality=[self.cardinality] , embedding_dimension=[self.embedding_dimension] , moving_average=self.moving_average , )
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase ) -> Optional[int]:
__a = config.context_length + max(config.lags_sequence )
__a = ids_tensor([self.batch_size, 1] , config.cardinality[0] )
__a = floats_tensor([self.batch_size, _past_length, config.num_time_features] )
__a = floats_tensor([self.batch_size, _past_length] )
__a = floats_tensor([self.batch_size, _past_length] ) > 0.5
# decoder inputs
__a = floats_tensor([self.batch_size, config.prediction_length, config.num_time_features] )
__a = floats_tensor([self.batch_size, config.prediction_length] )
__a = {
'past_values': past_values,
'static_categorical_features': static_categorical_features,
'past_time_features': past_time_features,
'past_observed_mask': past_observed_mask,
'future_time_features': future_time_features,
'future_values': future_values,
}
return inputs_dict
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
__a = self.get_config()
__a = self.prepare_autoformer_inputs_dict(_a )
return config, inputs_dict
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
__a , __a = self.prepare_config_and_inputs()
return config, inputs_dict
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase , UpperCAmelCase ) -> Union[str, Any]:
__a = AutoformerModel(config=_a ).to(_a ).eval()
__a = model(**_a )
__a = outputs.encoder_last_hidden_state
__a = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
__a = model.get_encoder()
encoder.save_pretrained(_a )
__a = AutoformerEncoder.from_pretrained(_a ).to(_a )
__a , __a , __a , __a , __a = model.create_network_inputs(**_a )
__a , __a = model.decomposition_layer(transformer_inputs[:, : config.context_length, ...] )
__a = torch.cat(
(transformer_inputs[:, : config.context_length, ...], feature[:, : config.context_length, ...]) , dim=-1 , )
__a = encoder(inputs_embeds=_a )[0]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1e-3 )
__a = (
torch.mean(transformer_inputs[:, : config.context_length, ...] , dim=1 )
.unsqueeze(1 )
.repeat(1 , config.prediction_length , 1 )
)
__a = torch.zeros(
[transformer_inputs.shape[0], config.prediction_length, transformer_inputs.shape[2]] , device=enc_input.device , )
__a = torch.cat(
(
torch.cat((seasonal_input[:, -config.label_length :, ...], zeros) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
__a = torch.cat(
(
torch.cat((trend_input[:, -config.label_length :, ...], mean) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
with tempfile.TemporaryDirectory() as tmpdirname:
__a = model.get_decoder()
decoder.save_pretrained(_a )
__a = AutoformerDecoder.from_pretrained(_a ).to(_a )
__a = decoder(
trend=_a , inputs_embeds=_a , encoder_hidden_states=_a , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1e-3 )
@require_torch
class a__ ( __lowercase , __lowercase , unittest.TestCase ):
A__ : Tuple = (AutoformerModel, AutoformerForPrediction) if is_torch_available() else ()
A__ : Any = (AutoformerForPrediction,) if is_torch_available() else ()
A__ : Any = {'feature-extraction': AutoformerModel} if is_torch_available() else {}
A__ : Any = False
A__ : List[str] = False
A__ : Dict = False
A__ : int = False
A__ : Union[str, Any] = False
A__ : Optional[int] = False
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
__a = AutoformerModelTester(self )
__a = ConfigTester(self , config_class=_a , has_text_modality=_a )
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
self.config_tester.run_common_tests()
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
__a , __a = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
__a = model_class(_a )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_a )
__a , __a = model_class.from_pretrained(_a , output_loading_info=_a )
self.assertEqual(info['missing_keys'] , [] )
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
__a = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*_a )
@unittest.skip(reason='Model has no tokens embeddings' )
def __SCREAMING_SNAKE_CASE ( self ) -> int:
pass
def __SCREAMING_SNAKE_CASE ( self ) -> int:
__a = inspect.signature(getattr(_a , 'forward' ) )
# The main input is the name of the argument after `self`
__a = list(model_signature.parameters.keys() )[1]
self.assertEqual(AutoformerModel.main_input_name , _a )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a = model_class(_a )
__a = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__a = [*signature.parameters.keys()]
__a = [
'past_values',
'past_time_features',
'past_observed_mask',
'static_categorical_features',
'static_real_features',
'future_values',
'future_time_features',
]
if model.__class__.__name__ in ["AutoformerForPrediction"]:
expected_arg_names.append('future_observed_mask' )
expected_arg_names.extend(
[
'decoder_attention_mask',
'head_mask',
'decoder_head_mask',
'cross_attn_head_mask',
'encoder_outputs',
'past_key_values',
'output_hidden_states',
'output_attentions',
'use_cache',
'return_dict',
] )
self.assertListEqual(arg_names[: len(_a )] , _a )
def __SCREAMING_SNAKE_CASE ( self ) -> int:
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
__a = True
__a = getattr(self.model_tester , 'seq_length' , _a )
__a = getattr(self.model_tester , 'decoder_seq_length' , _a )
__a = getattr(self.model_tester , 'encoder_seq_length' , _a )
__a = getattr(self.model_tester , 'd_model' , _a )
__a = getattr(self.model_tester , 'num_attention_heads' , _a )
__a = d_model // num_attention_heads
for model_class in self.all_model_classes:
__a = True
__a = False
__a = True
__a = model_class(_a )
model.to(_a )
model.eval()
with torch.no_grad():
__a = model(**self._prepare_for_class(_a , _a ) )
__a = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(_a ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__a = True
__a = model_class(_a )
model.to(_a )
model.eval()
with torch.no_grad():
__a = model(**self._prepare_for_class(_a , _a ) )
__a = outputs.encoder_attentions
self.assertEqual(len(_a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
__a = len(_a )
__a = 7
if "last_hidden_state" in outputs:
correct_outlen += 1
if "trend" in outputs:
correct_outlen += 1
if "past_key_values" in outputs:
correct_outlen += 1 # past_key_values have been returned
if "loss" in outputs:
correct_outlen += 1
if "params" in outputs:
correct_outlen += 1
self.assertEqual(_a , _a )
# decoder attentions
__a = outputs.decoder_attentions
self.assertIsInstance(_a , (list, tuple) )
self.assertEqual(len(_a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# cross attentions
__a = outputs.cross_attentions
self.assertIsInstance(_a , (list, tuple) )
self.assertEqual(len(_a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(cross_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# Check attention is always last and order is fine
__a = True
__a = True
__a = model_class(_a )
model.to(_a )
model.eval()
with torch.no_grad():
__a = model(**self._prepare_for_class(_a , _a ) )
self.assertEqual(out_len + 2 , len(_a ) )
__a = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(_a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
@is_flaky()
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
super().test_retain_grad_hidden_states_attentions()
def lowerCAmelCase( __lowerCamelCase="train-batch.pt" ):
__a = hf_hub_download(repo_id='hf-internal-testing/tourism-monthly-batch' , filename=__a , repo_type='dataset' )
__a = torch.load(__a , map_location=__a )
return batch
@require_torch
@slow
class a__ ( unittest.TestCase ):
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
__a = AutoformerModel.from_pretrained('huggingface/autoformer-tourism-monthly' ).to(_a )
__a = prepare_batch()
with torch.no_grad():
__a = model(
past_values=batch['past_values'] , past_time_features=batch['past_time_features'] , past_observed_mask=batch['past_observed_mask'] , static_categorical_features=batch['static_categorical_features'] , future_values=batch['future_values'] , future_time_features=batch['future_time_features'] , )[0]
__a = torch.Size(
(6_4, model.config.prediction_length + model.config.label_length, model.config.feature_size) )
self.assertEqual(output.shape , _a )
__a = torch.tensor(
[[0.3_593, -1.3_398, 0.6_330], [0.2_279, 1.5_396, -0.1_792], [0.0_450, 1.3_225, -0.2_335]] , device=_a )
self.assertTrue(torch.allclose(output[0, :3, :3] , _a , atol=_a ) )
def __SCREAMING_SNAKE_CASE ( self ) -> int:
__a = AutoformerForPrediction.from_pretrained('huggingface/autoformer-tourism-monthly' ).to(_a )
__a = prepare_batch('val-batch.pt' )
with torch.no_grad():
__a = model(
past_values=batch['past_values'] , past_time_features=batch['past_time_features'] , past_observed_mask=batch['past_observed_mask'] , static_categorical_features=batch['static_categorical_features'] , ).encoder_last_hidden_state
__a = torch.Size((6_4, model.config.context_length, model.config.d_model) )
self.assertEqual(output.shape , _a )
__a = torch.tensor(
[[-0.0_734, -0.9_036, 0.8_358], [4.7_186, 2.4_113, 1.9_581], [1.7_953, 2.3_558, 1.2_970]] , device=_a )
self.assertTrue(torch.allclose(output[0, :3, :3] , _a , atol=_a ) )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
__a = AutoformerForPrediction.from_pretrained('huggingface/autoformer-tourism-monthly' ).to(_a )
__a = prepare_batch('val-batch.pt' )
with torch.no_grad():
__a = model.generate(
static_categorical_features=batch['static_categorical_features'] , past_time_features=batch['past_time_features'] , past_values=batch['past_values'] , future_time_features=batch['future_time_features'] , past_observed_mask=batch['past_observed_mask'] , )
__a = torch.Size((6_4, model.config.num_parallel_samples, model.config.prediction_length) )
self.assertEqual(outputs.sequences.shape , _a )
__a = torch.tensor([3_1_3_0.6_7_6_3, 4_0_5_6.5_2_9_3, 7_0_5_3.0_7_8_6] , device=_a )
__a = outputs.sequences.mean(dim=1 )
self.assertTrue(torch.allclose(mean_prediction[0, -3:] , _a , rtol=1e-1 ) )
| 350
|
def lowerCAmelCase( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
# Return True if there is node that has not iterated.
__a = [False] * len(__lowerCamelCase )
__a = []
queue.append(__lowerCamelCase )
__a = True
while queue:
__a = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(__lowerCamelCase )
__a = True
__a = u
return visited[t]
def lowerCAmelCase( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
# This array is filled by BFS and to store path
__a = [-1] * (len(__lowerCamelCase ))
__a = 0
while bfs(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
__a = float('Inf' )
__a = sink
while s != source:
# Find the minimum value in select path
__a = min(__lowerCamelCase , graph[parent[s]][s] )
__a = parent[s]
max_flow += path_flow
__a = sink
while v != source:
__a = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
__a = parent[v]
return max_flow
lowerCamelCase_ : str = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
lowerCamelCase_ , lowerCamelCase_ : Union[str, Any] = 0, 5
print(ford_fulkerson(graph, source, sink))
| 197
| 0
|
def UpperCAmelCase_ ( _A , _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = [1]
for i in range(2 , _A ):
factorials.append(factorials[-1] * i )
assert 0 <= k < factorials[-1] * n, "k out of bounds"
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = list(range(_A ) )
# Find permutation
while factorials:
SCREAMING_SNAKE_CASE__ = factorials.pop()
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = divmod(_A , _A )
permutation.append(elements[number] )
elements.remove(elements[number] )
permutation.append(elements[0] )
return permutation
if __name__ == "__main__":
import doctest
doctest.testmod()
| 314
|
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer
from transformers.testing_utils import require_tokenizers, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor
@require_tokenizers
@require_vision
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def lowercase_ ( self : str ) -> Dict:
SCREAMING_SNAKE_CASE__ = tempfile.mkdtemp()
# fmt: off
SCREAMING_SNAKE_CASE__ = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''', ''',''', '''low''', '''lowest''']
# fmt: on
SCREAMING_SNAKE_CASE__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
SCREAMING_SNAKE_CASE__ = {
'''do_resize''': True,
'''size''': {'''height''': 18, '''width''': 18},
'''do_normalize''': True,
'''image_mean''': [0.5, 0.5, 0.5],
'''image_std''': [0.5, 0.5, 0.5],
}
SCREAMING_SNAKE_CASE__ = os.path.join(self.tmpdirname , __lowerCamelCase )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(__lowerCamelCase , __lowerCamelCase )
def lowercase_ ( self : Dict , **__lowerCamelCase : Dict ) -> Union[str, Any]:
return BertTokenizer.from_pretrained(self.tmpdirname , **__lowerCamelCase )
def lowercase_ ( self : Optional[Any] , **__lowerCamelCase : Dict ) -> int:
return ViTImageProcessor.from_pretrained(self.tmpdirname , **__lowerCamelCase )
def lowercase_ ( self : str ) -> Optional[int]:
shutil.rmtree(self.tmpdirname )
def lowercase_ ( self : List[Any] ) -> Tuple:
SCREAMING_SNAKE_CASE__ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
SCREAMING_SNAKE_CASE__ = [Image.fromarray(np.moveaxis(__lowerCamelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowercase_ ( self : Optional[int] ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ = self.get_image_processor()
SCREAMING_SNAKE_CASE__ = VisionTextDualEncoderProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
processor.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE__ = VisionTextDualEncoderProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , __lowerCamelCase )
def lowercase_ ( self : Any ) -> int:
SCREAMING_SNAKE_CASE__ = VisionTextDualEncoderProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE__ = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
SCREAMING_SNAKE_CASE__ = self.get_image_processor(do_normalize=__lowerCamelCase , padding_value=1.0 )
SCREAMING_SNAKE_CASE__ = VisionTextDualEncoderProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=__lowerCamelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __lowerCamelCase )
def lowercase_ ( self : List[Any] ) -> List[str]:
SCREAMING_SNAKE_CASE__ = self.get_image_processor()
SCREAMING_SNAKE_CASE__ = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ = VisionTextDualEncoderProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE__ = image_processor(__lowerCamelCase , return_tensors='''np''' )
SCREAMING_SNAKE_CASE__ = processor(images=__lowerCamelCase , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def lowercase_ ( self : Tuple ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ = self.get_image_processor()
SCREAMING_SNAKE_CASE__ = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ = VisionTextDualEncoderProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = '''lower newer'''
SCREAMING_SNAKE_CASE__ = processor(text=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = tokenizer(__lowerCamelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowercase_ ( self : Optional[int] ) -> List[Any]:
SCREAMING_SNAKE_CASE__ = self.get_image_processor()
SCREAMING_SNAKE_CASE__ = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ = VisionTextDualEncoderProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = '''lower newer'''
SCREAMING_SNAKE_CASE__ = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE__ = processor(text=__lowerCamelCase , images=__lowerCamelCase )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with self.assertRaises(__lowerCamelCase ):
processor()
def lowercase_ ( self : Union[str, Any] ) -> int:
SCREAMING_SNAKE_CASE__ = self.get_image_processor()
SCREAMING_SNAKE_CASE__ = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ = VisionTextDualEncoderProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
SCREAMING_SNAKE_CASE__ = processor.batch_decode(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = tokenizer.batch_decode(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
def lowercase_ ( self : Union[str, Any] ) -> str:
SCREAMING_SNAKE_CASE__ = self.get_image_processor()
SCREAMING_SNAKE_CASE__ = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ = VisionTextDualEncoderProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = '''lower newer'''
SCREAMING_SNAKE_CASE__ = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE__ = processor(text=__lowerCamelCase , images=__lowerCamelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 314
| 1
|
def __UpperCamelCase ( _A : Any ) ->Optional[Any]:
"""simple docstring"""
lowerCamelCase_ =len(_A )
for i in range(length - 1 ):
lowerCamelCase_ =i
for k in range(i + 1 , _A ):
if collection[k] < collection[least]:
lowerCamelCase_ =k
if least != i:
lowerCamelCase_ , lowerCamelCase_ =(collection[i], collection[least])
return collection
if __name__ == "__main__":
__A : str = input('Enter numbers separated by a comma:\n').strip()
__A : Tuple = [int(item) for item in user_input.split(',')]
print(selection_sort(unsorted))
| 49
|
import unittest
from knapsack import greedy_knapsack as kp
class _SCREAMING_SNAKE_CASE ( unittest.TestCase):
def _snake_case ( self )-> Optional[Any]:
lowerCamelCase_ =[10, 20, 30, 40, 50, 60]
lowerCamelCase_ =[2, 4, 6, 8, 10, 12]
lowerCamelCase_ =100
self.assertEqual(kp.calc_profit(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , 210 )
def _snake_case ( self )-> Any:
self.assertRaisesRegex(_SCREAMING_SNAKE_CASE , """max_weight must greater than zero.""" )
def _snake_case ( self )-> Dict:
self.assertRaisesRegex(_SCREAMING_SNAKE_CASE , """Weight can not be negative.""" )
def _snake_case ( self )-> Dict:
self.assertRaisesRegex(_SCREAMING_SNAKE_CASE , """Profit can not be negative.""" )
def _snake_case ( self )-> Tuple:
self.assertRaisesRegex(_SCREAMING_SNAKE_CASE , """max_weight must greater than zero.""" )
def _snake_case ( self )-> Any:
self.assertRaisesRegex(
_SCREAMING_SNAKE_CASE , """The length of profit and weight must be same.""" )
if __name__ == "__main__":
unittest.main()
| 49
| 1
|
'''simple docstring'''
from math import factorial
def a__ ( a__ = 20 ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = 2 * n # middle entry of odd rows starting at row 3 is the solution for n = 1,
# 2, 3,...
__SCREAMING_SNAKE_CASE = n // 2
return int(factorial(lowercase__ ) / (factorial(lowercase__ ) * factorial(n - k )) )
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution(2_0))
else:
try:
UpperCAmelCase : Optional[int] = int(sys.argv[1])
print(solution(n))
except ValueError:
print('Invalid entry - please enter a number.')
| 267
|
from __future__ import annotations
def _UpperCamelCase ( lowercase__ ):
__SCREAMING_SNAKE_CASE : Dict = 0.00
__SCREAMING_SNAKE_CASE : List[str] = 0
for resistor in resistors:
if resistor <= 0:
__SCREAMING_SNAKE_CASE : Any = F'''Resistor at index {index} has a negative or zero value!'''
raise ValueError(lowercase__ )
first_sum += 1 / float(lowercase__ )
index += 1
return 1 / first_sum
def _UpperCamelCase ( lowercase__ ):
__SCREAMING_SNAKE_CASE : Tuple = 0.00
__SCREAMING_SNAKE_CASE : int = 0
for resistor in resistors:
sum_r += resistor
if resistor < 0:
__SCREAMING_SNAKE_CASE : Tuple = F'''Resistor at index {index} has a negative value!'''
raise ValueError(lowercase__ )
index += 1
return sum_r
if __name__ == "__main__":
import doctest
doctest.testmod()
| 9
| 0
|
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotSmallConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
_lowerCamelCase : Dict = "platform"
import jax
import jax.numpy as jnp
from transformers.models.blenderbot_small.modeling_flax_blenderbot_small import (
FlaxBlenderbotSmallForConditionalGeneration,
FlaxBlenderbotSmallModel,
shift_tokens_right,
)
def a__ ( UpperCAmelCase : List[Any] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Optional[int]=None , UpperCAmelCase : Optional[int]=None , UpperCAmelCase : Dict=None , UpperCAmelCase : List[str]=None , UpperCAmelCase : Dict=None , UpperCAmelCase : Optional[Any]=None , ) -> List[Any]:
if attention_mask is None:
UpperCAmelCase : List[Any] = np.where(input_ids != config.pad_token_id , 1 , 0 )
if decoder_attention_mask is None:
UpperCAmelCase : Any = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 )
if head_mask is None:
UpperCAmelCase : Optional[Any] = np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
UpperCAmelCase : List[str] = np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
UpperCAmelCase : Optional[int] = np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class __UpperCAmelCase :
def __init__( self : Optional[int], __A : Dict, __A : Union[str, Any]=1_3, __A : Any=7, __A : Union[str, Any]=True, __A : Optional[Any]=False, __A : int=9_9, __A : List[Any]=1_6, __A : Optional[int]=2, __A : Union[str, Any]=4, __A : List[Any]=4, __A : Dict="gelu", __A : int=0.1, __A : Union[str, Any]=0.1, __A : int=3_2, __A : Dict=2, __A : str=1, __A : Union[str, Any]=0, __A : Optional[Any]=0.0_2, ):
UpperCAmelCase : Optional[int] = parent
UpperCAmelCase : Union[str, Any] = batch_size
UpperCAmelCase : Dict = seq_length
UpperCAmelCase : Optional[Any] = is_training
UpperCAmelCase : Optional[Any] = use_labels
UpperCAmelCase : List[str] = vocab_size
UpperCAmelCase : int = hidden_size
UpperCAmelCase : List[Any] = num_hidden_layers
UpperCAmelCase : int = num_attention_heads
UpperCAmelCase : Dict = intermediate_size
UpperCAmelCase : str = hidden_act
UpperCAmelCase : Any = hidden_dropout_prob
UpperCAmelCase : Any = attention_probs_dropout_prob
UpperCAmelCase : Any = max_position_embeddings
UpperCAmelCase : str = eos_token_id
UpperCAmelCase : int = pad_token_id
UpperCAmelCase : int = bos_token_id
UpperCAmelCase : Optional[Any] = initializer_range
def __magic_name__ ( self : List[str] ):
UpperCAmelCase : Dict = np.clip(ids_tensor([self.batch_size, self.seq_length - 1], self.vocab_size ), 3, self.vocab_size )
UpperCAmelCase : Optional[int] = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1), dtype=np.intaa )), -1 )
UpperCAmelCase : Optional[Any] = shift_tokens_right(__lowerCAmelCase, 1, 2 )
UpperCAmelCase : Optional[int] = BlenderbotSmallConfig(
vocab_size=self.vocab_size, d_model=self.hidden_size, encoder_layers=self.num_hidden_layers, decoder_layers=self.num_hidden_layers, encoder_attention_heads=self.num_attention_heads, decoder_attention_heads=self.num_attention_heads, encoder_ffn_dim=self.intermediate_size, decoder_ffn_dim=self.intermediate_size, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, eos_token_id=self.eos_token_id, bos_token_id=self.bos_token_id, pad_token_id=self.pad_token_id, initializer_range=self.initializer_range, use_cache=__lowerCAmelCase, )
UpperCAmelCase : List[Any] = prepare_blenderbot_inputs_dict(__lowerCAmelCase, __lowerCAmelCase, __lowerCAmelCase )
return config, inputs_dict
def __magic_name__ ( self : Optional[Any] ):
UpperCAmelCase , UpperCAmelCase : str = self.prepare_config_and_inputs()
return config, inputs_dict
def __magic_name__ ( self : str, __A : Dict, __A : Optional[int], __A : Optional[int] ):
UpperCAmelCase : Dict = 2_0
UpperCAmelCase : Optional[Any] = model_class_name(__lowerCAmelCase )
UpperCAmelCase : Optional[int] = model.encode(inputs_dict['''input_ids'''] )
UpperCAmelCase , UpperCAmelCase : Union[str, Any] = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
UpperCAmelCase : List[Any] = model.init_cache(decoder_input_ids.shape[0], __lowerCAmelCase, __lowerCAmelCase )
UpperCAmelCase : Optional[Any] = jnp.ones((decoder_input_ids.shape[0], max_decoder_length), dtype='''i4''' )
UpperCAmelCase : Union[str, Any] = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :], (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1), )
UpperCAmelCase : Dict = model.decode(
decoder_input_ids[:, :-1], __lowerCAmelCase, decoder_attention_mask=__lowerCAmelCase, past_key_values=__lowerCAmelCase, decoder_position_ids=__lowerCAmelCase, )
UpperCAmelCase : List[str] = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]], dtype='''i4''' )
UpperCAmelCase : List[str] = model.decode(
decoder_input_ids[:, -1:], __lowerCAmelCase, decoder_attention_mask=__lowerCAmelCase, past_key_values=outputs_cache.past_key_values, decoder_position_ids=__lowerCAmelCase, )
UpperCAmelCase : Tuple = model.decode(__lowerCAmelCase, __lowerCAmelCase )
UpperCAmelCase : int = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3, msg=F'''Max diff is {diff}''' )
def __magic_name__ ( self : List[Any], __A : Union[str, Any], __A : int, __A : str ):
UpperCAmelCase : List[Any] = 2_0
UpperCAmelCase : List[str] = model_class_name(__lowerCAmelCase )
UpperCAmelCase : Union[str, Any] = model.encode(inputs_dict['''input_ids'''] )
UpperCAmelCase , UpperCAmelCase : List[str] = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
UpperCAmelCase : List[str] = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
], axis=-1, )
UpperCAmelCase : Optional[Any] = model.init_cache(decoder_input_ids.shape[0], __lowerCAmelCase, __lowerCAmelCase )
UpperCAmelCase : Optional[int] = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :], (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1), )
UpperCAmelCase : str = model.decode(
decoder_input_ids[:, :-1], __lowerCAmelCase, decoder_attention_mask=__lowerCAmelCase, past_key_values=__lowerCAmelCase, decoder_position_ids=__lowerCAmelCase, )
UpperCAmelCase : Optional[int] = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]], dtype='''i4''' )
UpperCAmelCase : Optional[int] = model.decode(
decoder_input_ids[:, -1:], __lowerCAmelCase, past_key_values=outputs_cache.past_key_values, decoder_attention_mask=__lowerCAmelCase, decoder_position_ids=__lowerCAmelCase, )
UpperCAmelCase : int = model.decode(__lowerCAmelCase, __lowerCAmelCase, decoder_attention_mask=__lowerCAmelCase )
UpperCAmelCase : List[Any] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3, msg=F'''Max diff is {diff}''' )
@require_flax
class __UpperCAmelCase ( unittest.TestCase ):
UpperCamelCase = 9_9
def __magic_name__ ( self : Tuple ):
UpperCAmelCase : Any = np.array(
[
[7_1, 8_2, 1_8, 3_3, 4_6, 9_1, 2],
[6_8, 3_4, 2_6, 5_8, 3_0, 8_2, 2],
[5, 9_7, 1_7, 3_9, 9_4, 4_0, 2],
[7_6, 8_3, 9_4, 2_5, 7_0, 7_8, 2],
[8_7, 5_9, 4_1, 3_5, 4_8, 6_6, 2],
[5_5, 1_3, 1_6, 5_8, 5, 2, 1], # note padding
[6_4, 2_7, 3_1, 5_1, 1_2, 7_5, 2],
[5_2, 6_4, 8_6, 1_7, 8_3, 3_9, 2],
[4_8, 6_1, 9, 2_4, 7_1, 8_2, 2],
[2_6, 1, 6_0, 4_8, 2_2, 1_3, 2],
[2_1, 5, 6_2, 2_8, 1_4, 7_6, 2],
[4_5, 9_8, 3_7, 8_6, 5_9, 4_8, 2],
[7_0, 7_0, 5_0, 9, 2_8, 0, 2],
], dtype=np.intaa, )
UpperCAmelCase : Dict = input_ids.shape[0]
UpperCAmelCase : Optional[Any] = BlenderbotSmallConfig(
vocab_size=self.vocab_size, d_model=2_4, encoder_layers=2, decoder_layers=2, encoder_attention_heads=2, decoder_attention_heads=2, encoder_ffn_dim=3_2, decoder_ffn_dim=3_2, max_position_embeddings=4_8, eos_token_id=2, pad_token_id=1, bos_token_id=0, )
return config, input_ids, batch_size
def __magic_name__ ( self : Optional[int] ):
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Tuple = self._get_config_and_data()
UpperCAmelCase : Dict = FlaxBlenderbotSmallForConditionalGeneration(__lowerCAmelCase )
UpperCAmelCase : str = lm_model(input_ids=__lowerCAmelCase )
UpperCAmelCase : Dict = (batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs['''logits'''].shape, __lowerCAmelCase )
def __magic_name__ ( self : Dict ):
UpperCAmelCase : str = BlenderbotSmallConfig(
vocab_size=self.vocab_size, d_model=1_4, encoder_layers=2, decoder_layers=2, encoder_attention_heads=2, decoder_attention_heads=2, encoder_ffn_dim=8, decoder_ffn_dim=8, max_position_embeddings=4_8, )
UpperCAmelCase : str = FlaxBlenderbotSmallForConditionalGeneration(__lowerCAmelCase )
UpperCAmelCase : Optional[int] = np.array([[7_1, 8_2, 1_8, 3_3, 4_6, 9_1, 2], [6_8, 3_4, 2_6, 5_8, 3_0, 2, 1]], dtype=np.intaa )
UpperCAmelCase : Union[str, Any] = np.array([[8_2, 7_1, 8_2, 1_8, 2], [5_8, 6_8, 2, 1, 1]], dtype=np.intaa )
UpperCAmelCase : Optional[int] = lm_model(input_ids=__lowerCAmelCase, decoder_input_ids=__lowerCAmelCase )
UpperCAmelCase : Optional[int] = (*summary.shape, config.vocab_size)
self.assertEqual(outputs['''logits'''].shape, __lowerCAmelCase )
def __magic_name__ ( self : Optional[Any] ):
UpperCAmelCase : int = np.array([[7_1, 8_2, 1_8, 3_3, 2, 1, 1], [6_8, 3_4, 2_6, 5_8, 3_0, 8_2, 2]], dtype=np.intaa )
UpperCAmelCase : Optional[int] = shift_tokens_right(__lowerCAmelCase, 1, 2 )
UpperCAmelCase : Union[str, Any] = np.equal(__lowerCAmelCase, 1 ).astype(np.floataa ).sum()
UpperCAmelCase : List[str] = np.equal(__lowerCAmelCase, 1 ).astype(np.floataa ).sum()
self.assertEqual(shifted.shape, input_ids.shape )
self.assertEqual(__lowerCAmelCase, n_pad_before - 1 )
self.assertTrue(np.equal(shifted[:, 0], 2 ).all() )
@require_flax
class __UpperCAmelCase ( _a , unittest.TestCase , _a ):
UpperCamelCase = True
UpperCamelCase = (
(
FlaxBlenderbotSmallModel,
FlaxBlenderbotSmallForConditionalGeneration,
)
if is_flax_available()
else ()
)
UpperCamelCase = (FlaxBlenderbotSmallForConditionalGeneration,) if is_flax_available() else ()
def __magic_name__ ( self : List[Any] ):
UpperCAmelCase : Tuple = FlaxBlenderbotSmallModelTester(self )
def __magic_name__ ( self : Dict ):
UpperCAmelCase , UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(__lowerCAmelCase, __lowerCAmelCase, __lowerCAmelCase )
def __magic_name__ ( self : Union[str, Any] ):
UpperCAmelCase , UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(__lowerCAmelCase, __lowerCAmelCase, __lowerCAmelCase )
def __magic_name__ ( self : Union[str, Any] ):
UpperCAmelCase , UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
UpperCAmelCase : List[Any] = self._prepare_for_class(__lowerCAmelCase, __lowerCAmelCase )
UpperCAmelCase : str = model_class(__lowerCAmelCase )
@jax.jit
def encode_jitted(__A : Dict, __A : Union[str, Any]=None, **__A : Any ):
return model.encode(input_ids=__lowerCAmelCase, attention_mask=__lowerCAmelCase )
with self.subTest('''JIT Enabled''' ):
UpperCAmelCase : Any = encode_jitted(**__lowerCAmelCase ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
UpperCAmelCase : Tuple = encode_jitted(**__lowerCAmelCase ).to_tuple()
self.assertEqual(len(__lowerCAmelCase ), len(__lowerCAmelCase ) )
for jitted_output, output in zip(__lowerCAmelCase, __lowerCAmelCase ):
self.assertEqual(jitted_output.shape, output.shape )
def __magic_name__ ( self : Optional[Any] ):
UpperCAmelCase , UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
UpperCAmelCase : Tuple = model_class(__lowerCAmelCase )
UpperCAmelCase : List[str] = model.encode(inputs_dict['''input_ids'''], inputs_dict['''attention_mask'''] )
UpperCAmelCase : str = {
'''decoder_input_ids''': inputs_dict['''decoder_input_ids'''],
'''decoder_attention_mask''': inputs_dict['''decoder_attention_mask'''],
'''encoder_outputs''': encoder_outputs,
}
@jax.jit
def decode_jitted(__A : Optional[Any], __A : Dict, __A : Union[str, Any] ):
return model.decode(
decoder_input_ids=__lowerCAmelCase, decoder_attention_mask=__lowerCAmelCase, encoder_outputs=__lowerCAmelCase, )
with self.subTest('''JIT Enabled''' ):
UpperCAmelCase : str = decode_jitted(**__lowerCAmelCase ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
UpperCAmelCase : int = decode_jitted(**__lowerCAmelCase ).to_tuple()
self.assertEqual(len(__lowerCAmelCase ), len(__lowerCAmelCase ) )
for jitted_output, output in zip(__lowerCAmelCase, __lowerCAmelCase ):
self.assertEqual(jitted_output.shape, output.shape )
@slow
def __magic_name__ ( self : Any ):
for model_class_name in self.all_model_classes:
UpperCAmelCase : List[str] = model_class_name.from_pretrained('''facebook/blenderbot_small-90M''' )
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
UpperCAmelCase : Optional[int] = np.ones((1, 1) ) * model.config.eos_token_id
UpperCAmelCase : List[str] = model(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
| 371
|
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.utils import ComputeEnvironment
from .cluster import get_cluster_input
from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401
from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401
from .sagemaker import get_sagemaker_input
_lowerCamelCase : Optional[Any] = "Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine"
def a__ ( ) -> List[Any]:
UpperCAmelCase : Dict = _ask_options(
'''In which compute environment are you running?''' , ['''This machine''', '''AWS (Amazon SageMaker)'''] , _convert_compute_environment , )
if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
UpperCAmelCase : List[Any] = get_sagemaker_input()
else:
UpperCAmelCase : Optional[Any] = get_cluster_input()
return config
def a__ ( UpperCAmelCase : Union[str, Any]=None ) -> List[Any]:
if subparsers is not None:
UpperCAmelCase : Optional[Any] = subparsers.add_parser('''config''' , description=UpperCAmelCase )
else:
UpperCAmelCase : List[str] = argparse.ArgumentParser('''Accelerate config command''' , description=UpperCAmelCase )
parser.add_argument(
'''--config_file''' , default=UpperCAmelCase , help=(
'''The path to use to store the config file. Will default to a file named default_config.yaml in the cache '''
'''location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '''
'''such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '''
'''with \'huggingface\'.'''
) , )
if subparsers is not None:
parser.set_defaults(func=UpperCAmelCase )
return parser
def a__ ( UpperCAmelCase : List[Any] ) -> Optional[int]:
UpperCAmelCase : str = get_user_input()
if args.config_file is not None:
UpperCAmelCase : Any = args.config_file
else:
if not os.path.isdir(UpperCAmelCase ):
os.makedirs(UpperCAmelCase )
UpperCAmelCase : List[str] = default_yaml_config_file
if config_file.endswith('''.json''' ):
config.to_json_file(UpperCAmelCase )
else:
config.to_yaml_file(UpperCAmelCase )
print(f'''accelerate configuration saved at {config_file}''' )
def a__ ( ) -> Dict:
UpperCAmelCase : str = config_command_parser()
UpperCAmelCase : str = parser.parse_args()
config_command(UpperCAmelCase )
if __name__ == "__main__":
main()
| 99
| 0
|
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = '''▁'''
lowerCAmelCase_ = {'''vocab_file''': '''sentencepiece.bpe.model'''}
lowerCAmelCase_ = {
'''vocab_file''': {
'''facebook/xglm-564M''': '''https://huggingface.co/facebook/xglm-564M/resolve/main/sentencepiece.bpe.model''',
}
}
lowerCAmelCase_ = {
'''facebook/xglm-564M''': 20_48,
}
class snake_case_ ( __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE : str = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE : Optional[Any] = ["input_ids", "attention_mask"]
def __init__( self : Dict , _UpperCamelCase : List[Any] , _UpperCamelCase : int="<s>" , _UpperCamelCase : int="</s>" , _UpperCamelCase : Optional[int]="</s>" , _UpperCamelCase : Tuple="<s>" , _UpperCamelCase : Dict="<unk>" , _UpperCamelCase : List[str]="<pad>" , _UpperCamelCase : Optional[Dict[str, Any]] = None , **_UpperCamelCase : Optional[Any] , ) ->None:
snake_case_ = {} if sp_model_kwargs is None else sp_model_kwargs
# Compatibility with the original tokenizer
snake_case_ = 7
snake_case_ = [f'''<madeupword{i}>''' for i in range(self.num_madeup_words )]
snake_case_ = kwargs.get('''additional_special_tokens''' , [] )
kwargs["additional_special_tokens"] += [
word for word in madeup_words if word not in kwargs["additional_special_tokens"]
]
super().__init__(
bos_token=_UpperCamelCase , eos_token=_UpperCamelCase , unk_token=_UpperCamelCase , sep_token=_UpperCamelCase , cls_token=_UpperCamelCase , pad_token=_UpperCamelCase , sp_model_kwargs=self.sp_model_kwargs , **_UpperCamelCase , )
snake_case_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(_UpperCamelCase ) )
snake_case_ = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
snake_case_ = 1
# Mimic fairseq token-to-id alignment for the first 4 token
snake_case_ = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
snake_case_ = len(self.sp_model )
snake_case_ = {f'''<madeupword{i}>''': sp_size + i + self.fairseq_offset for i in range(self.num_madeup_words )}
self.fairseq_tokens_to_ids.update(_UpperCamelCase )
snake_case_ = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self : Union[str, Any] ) ->str:
snake_case_ = self.__dict__.copy()
snake_case_ = None
snake_case_ = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : Any , _UpperCamelCase : Union[str, Any] ) ->Optional[Any]:
snake_case_ = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
snake_case_ = {}
snake_case_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def snake_case__( self : str , _UpperCamelCase : List[int] , _UpperCamelCase : Optional[List[int]] = None ) ->List[int]:
if token_ids_a is None:
return [self.sep_token_id] + token_ids_a
snake_case_ = [self.sep_token_id]
return sep + token_ids_a + sep + sep + token_ids_a
def snake_case__( self : List[Any] , _UpperCamelCase : List[int] , _UpperCamelCase : Optional[List[int]] = None , _UpperCamelCase : bool = False ) ->List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_UpperCamelCase , token_ids_a=_UpperCamelCase , already_has_special_tokens=_UpperCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(_UpperCamelCase ))
return [1] + ([0] * len(_UpperCamelCase )) + [1, 1] + ([0] * len(_UpperCamelCase ))
def snake_case__( self : Tuple , _UpperCamelCase : List[int] , _UpperCamelCase : Optional[List[int]] = None ) ->List[int]:
snake_case_ = [self.sep_token_id]
if token_ids_a is None:
return len(sep + token_ids_a ) * [0]
return len(sep + token_ids_a + sep + sep + token_ids_a ) * [0]
@property
def snake_case__( self : int ) ->Union[str, Any]:
return len(self.sp_model ) + self.fairseq_offset + self.num_madeup_words
def snake_case__( self : Any ) ->int:
snake_case_ = {self.convert_ids_to_tokens(_UpperCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def snake_case__( self : int , _UpperCamelCase : str ) ->List[str]:
return self.sp_model.encode(_UpperCamelCase , out_type=_UpperCamelCase )
def snake_case__( self : Optional[Any] , _UpperCamelCase : Optional[Any] ) ->Optional[int]:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
snake_case_ = self.sp_model.PieceToId(_UpperCamelCase )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def snake_case__( self : Dict , _UpperCamelCase : List[str] ) ->int:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def snake_case__( self : Tuple , _UpperCamelCase : int ) ->str:
snake_case_ = ''''''.join(_UpperCamelCase ).replace(_UpperCamelCase , ''' ''' ).strip()
return out_string
def snake_case__( self : Dict , _UpperCamelCase : str , _UpperCamelCase : Optional[str] = None ) ->Tuple[str]:
if not os.path.isdir(_UpperCamelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
snake_case_ = os.path.join(
_UpperCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _UpperCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(_UpperCamelCase , '''wb''' ) as fi:
snake_case_ = self.sp_model.serialized_model_proto()
fi.write(_UpperCamelCase )
return (out_vocab_file,)
| 8
|
"""simple docstring"""
import os
import unittest
from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast
from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _A ( _a ,unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase : str = LayoutLMTokenizer
UpperCAmelCase : int = LayoutLMTokenizerFast
UpperCAmelCase : Union[str, Any] = True
UpperCAmelCase : Optional[Any] = True
def __snake_case ( self : Optional[int]):
super().setUp()
a : Tuple = [
"[UNK]",
"[CLS]",
"[SEP]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
a : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"])
with open(self.vocab_file , "w" , encoding="utf-8") as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens]))
def __snake_case ( self : Optional[int] , **__UpperCAmelCase : Tuple):
return LayoutLMTokenizer.from_pretrained(self.tmpdirname , **__UpperCAmelCase)
def __snake_case ( self : Union[str, Any] , __UpperCAmelCase : str):
a : Tuple = "UNwant\u00E9d,running"
a : Dict = "unwanted, running"
return input_text, output_text
def __snake_case ( self : Any):
a : List[Any] = self.tokenizer_class(self.vocab_file)
a : str = tokenizer.tokenize("UNwant\u00E9d,running")
self.assertListEqual(__UpperCAmelCase , ["un", "##want", "##ed", ",", "runn", "##ing"])
self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCAmelCase) , [7, 4, 5, 10, 8, 9])
def __snake_case ( self : Dict):
pass
| 40
| 0
|
"""simple docstring"""
from maths.is_square_free import is_square_free
from maths.prime_factors import prime_factors
def __a ( __lowerCamelCase ):
UpperCAmelCase_ : str = prime_factors(__lowerCamelCase )
if is_square_free(__lowerCamelCase ):
return -1 if len(__lowerCamelCase ) % 2 else 1
return 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 23
|
"""simple docstring"""
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
_a = logging.get_logger(__name__) # pylint: disable=invalid-name
_a = '\n Examples:\n ```py\n >>> from diffusers import KandinskyV22Pipeline, KandinskyV22PriorPipeline\n >>> import torch\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained("kandinsky-community/kandinsky-2-2-prior")\n >>> pipe_prior.to("cuda")\n >>> prompt = "red cat, 4k photo"\n >>> out = pipe_prior(prompt)\n >>> image_emb = out.image_embeds\n >>> zero_image_emb = out.negative_image_embeds\n >>> pipe = KandinskyV22Pipeline.from_pretrained("kandinsky-community/kandinsky-2-2-decoder")\n >>> pipe.to("cuda")\n >>> image = pipe(\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=50,\n ... ).images\n >>> image[0].save("cat.png")\n ```\n'
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase=8 ):
UpperCAmelCase_ : List[str] = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
UpperCAmelCase_ : Tuple = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class A_ (lowercase__ ):
'''simple docstring'''
def __init__( self , lowercase_ , lowercase_ , lowercase_ , ):
"""simple docstring"""
super().__init__()
self.register_modules(
unet=lowercase_ , scheduler=lowercase_ , movq=lowercase_ , )
UpperCAmelCase_ : int = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ):
"""simple docstring"""
if latents is None:
UpperCAmelCase_ : Dict = randn_tensor(lowercase_ , generator=lowercase_ , device=lowercase_ , dtype=lowercase_ )
else:
if latents.shape != shape:
raise ValueError(F"""Unexpected latents shape, got {latents.shape}, expected {shape}""" )
UpperCAmelCase_ : str = latents.to(lowercase_ )
UpperCAmelCase_ : Dict = latents * scheduler.init_noise_sigma
return latents
def UpperCamelCase__ ( self , lowercase_=0 ):
"""simple docstring"""
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
UpperCAmelCase_ : Any = torch.device(F"""cuda:{gpu_id}""" )
UpperCAmelCase_ : int = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(lowercase_ , lowercase_ )
def UpperCamelCase__ ( self , lowercase_=0 ):
"""simple docstring"""
if is_accelerate_available() and is_accelerate_version(">=" , "0.17.0.dev0" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher." )
UpperCAmelCase_ : Any = torch.device(F"""cuda:{gpu_id}""" )
if self.device.type != "cpu":
self.to("cpu" , silence_dtype_warnings=lowercase_ )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
UpperCAmelCase_ : List[Any] = None
for cpu_offloaded_model in [self.unet, self.movq]:
UpperCAmelCase_ , UpperCAmelCase_ : str = cpu_offload_with_hook(lowercase_ , lowercase_ , prev_module_hook=lowercase_ )
# We'll offload the last model manually.
UpperCAmelCase_ : Tuple = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def UpperCamelCase__ ( self ):
"""simple docstring"""
if not hasattr(self.unet , "_hf_hook" ):
return self.device
for module in self.unet.modules():
if (
hasattr(lowercase_ , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(lowercase_ )
def __call__( self , lowercase_ , lowercase_ , lowercase_ = 512 , lowercase_ = 512 , lowercase_ = 100 , lowercase_ = 4.0 , lowercase_ = 1 , lowercase_ = None , lowercase_ = None , lowercase_ = "pil" , lowercase_ = True , ):
"""simple docstring"""
UpperCAmelCase_ : str = self._execution_device
UpperCAmelCase_ : List[Any] = guidance_scale > 1.0
if isinstance(lowercase_ , lowercase_ ):
UpperCAmelCase_ : int = torch.cat(lowercase_ , dim=0 )
UpperCAmelCase_ : Any = image_embeds.shape[0] * num_images_per_prompt
if isinstance(lowercase_ , lowercase_ ):
UpperCAmelCase_ : List[Any] = torch.cat(lowercase_ , dim=0 )
if do_classifier_free_guidance:
UpperCAmelCase_ : Tuple = image_embeds.repeat_interleave(lowercase_ , dim=0 )
UpperCAmelCase_ : List[str] = negative_image_embeds.repeat_interleave(lowercase_ , dim=0 )
UpperCAmelCase_ : Optional[Any] = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=lowercase_ )
self.scheduler.set_timesteps(lowercase_ , device=lowercase_ )
UpperCAmelCase_ : List[Any] = self.scheduler.timesteps
UpperCAmelCase_ : List[str] = self.unet.config.in_channels
UpperCAmelCase_ , UpperCAmelCase_ : Dict = downscale_height_and_width(lowercase_ , lowercase_ , self.movq_scale_factor )
# create initial latent
UpperCAmelCase_ : int = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , lowercase_ , lowercase_ , lowercase_ , self.scheduler , )
for i, t in enumerate(self.progress_bar(lowercase_ ) ):
# expand the latents if we are doing classifier free guidance
UpperCAmelCase_ : Tuple = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
UpperCAmelCase_ : Union[str, Any] = {"image_embeds": image_embeds}
UpperCAmelCase_ : Optional[Any] = self.unet(
sample=lowercase_ , timestep=lowercase_ , encoder_hidden_states=lowercase_ , added_cond_kwargs=lowercase_ , return_dict=lowercase_ , )[0]
if do_classifier_free_guidance:
UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = noise_pred.split(latents.shape[1] , dim=1 )
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = noise_pred.chunk(2 )
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = variance_pred.chunk(2 )
UpperCAmelCase_ : int = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
UpperCAmelCase_ : str = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , "variance_type" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
UpperCAmelCase_ , UpperCAmelCase_ : Dict = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
UpperCAmelCase_ : List[str] = self.scheduler.step(
lowercase_ , lowercase_ , lowercase_ , generator=lowercase_ , )[0]
# post-processing
UpperCAmelCase_ : Tuple = self.movq.decode(lowercase_ , force_not_quantize=lowercase_ )["sample"]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F"""Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}""" )
if output_type in ["np", "pil"]:
UpperCAmelCase_ : List[Any] = image * 0.5 + 0.5
UpperCAmelCase_ : int = image.clamp(0 , 1 )
UpperCAmelCase_ : Tuple = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
UpperCAmelCase_ : Dict = self.numpy_to_pil(lowercase_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowercase_ )
| 23
| 1
|
from __future__ import annotations
def _snake_case ( lowerCAmelCase : str , lowerCAmelCase : Optional[int] , lowerCAmelCase : Tuple , lowerCAmelCase : Tuple ): # noqa: E741
"""simple docstring"""
while r - l > 1:
SCREAMING_SNAKE_CASE_ : str = (l + r) // 2
if v[m] >= key:
SCREAMING_SNAKE_CASE_ : Optional[Any] = m
else:
SCREAMING_SNAKE_CASE_ : Tuple = m # noqa: E741
return r
def _snake_case ( lowerCAmelCase : list[int] ):
"""simple docstring"""
if len(lowerCAmelCase ) == 0:
return 0
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [0] * len(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : str = 1
SCREAMING_SNAKE_CASE_ : Union[str, Any] = v[0]
for i in range(1 , len(lowerCAmelCase ) ):
if v[i] < tail[0]:
SCREAMING_SNAKE_CASE_ : int = v[i]
elif v[i] > tail[length - 1]:
SCREAMING_SNAKE_CASE_ : str = v[i]
length += 1
else:
SCREAMING_SNAKE_CASE_ : Tuple = v[i]
return length
if __name__ == "__main__":
import doctest
doctest.testmod()
| 18
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase : List[Any] =logging.get_logger(__name__)
__lowerCAmelCase : Union[str, Any] ={
"""s-JoL/Open-Llama-V1""": """https://huggingface.co/s-JoL/Open-Llama-V1/blob/main/config.json""",
}
class _A ( lowerCAmelCase ):
snake_case__ : Union[str, Any] = 'open-llama'
def __init__( self , __lowerCAmelCase=10_0000 , __lowerCAmelCase=4096 , __lowerCAmelCase=1_1008 , __lowerCAmelCase=32 , __lowerCAmelCase=32 , __lowerCAmelCase="silu" , __lowerCAmelCase=2048 , __lowerCAmelCase=0.0_2 , __lowerCAmelCase=1E-6 , __lowerCAmelCase=True , __lowerCAmelCase=0 , __lowerCAmelCase=1 , __lowerCAmelCase=2 , __lowerCAmelCase=False , __lowerCAmelCase=True , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.1 , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=None , **__lowerCAmelCase , ):
"""simple docstring"""
lowercase = vocab_size
lowercase = max_position_embeddings
lowercase = hidden_size
lowercase = intermediate_size
lowercase = num_hidden_layers
lowercase = num_attention_heads
lowercase = hidden_act
lowercase = initializer_range
lowercase = rms_norm_eps
lowercase = use_cache
lowercase = kwargs.pop(
"""use_memorry_efficient_attention""" , __lowerCAmelCase )
lowercase = hidden_dropout_prob
lowercase = attention_dropout_prob
lowercase = use_stable_embedding
lowercase = shared_input_output_embedding
lowercase = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , tie_word_embeddings=__lowerCAmelCase , **__lowerCAmelCase , )
def A__ ( self ):
"""simple docstring"""
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , __lowerCAmelCase ) or len(self.rope_scaling ) != 2:
raise ValueError(
"""`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, """
f'got {self.rope_scaling}' )
lowercase = self.rope_scaling.get("""type""" , __lowerCAmelCase )
lowercase = self.rope_scaling.get("""factor""" , __lowerCAmelCase )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f'`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}' )
if rope_scaling_factor is None or not isinstance(__lowerCAmelCase , __lowerCAmelCase ) or rope_scaling_factor <= 1.0:
raise ValueError(f'`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}' )
| 197
| 0
|
"""simple docstring"""
from __future__ import annotations
import os
from typing import Any
import requests
__A = 'https://api.github.com'
# https://docs.github.com/en/free-pro-team@latest/rest/reference/users#get-the-authenticated-user
__A = BASE_URL + '/user'
# https://github.com/settings/tokens
__A = os.environ.get('USER_TOKEN', '')
def _lowerCamelCase(__UpperCamelCase ) -> dict[Any, Any]:
_lowerCAmelCase ={
"""Authorization""": F'''token {auth_token}''',
"""Accept""": """application/vnd.github.v3+json""",
}
return requests.get(__UpperCamelCase , headers=__UpperCamelCase ).json()
if __name__ == "__main__": # pragma: no cover
if USER_TOKEN:
for key, value in fetch_github_info(USER_TOKEN).items():
print(F"""{key}: {value}""")
else:
raise ValueError('\'USER_TOKEN\' field cannot be empty.')
| 341
|
"""simple docstring"""
import unittest
from transformers import TrOCRConfig
from transformers.testing_utils import is_torch_available, require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.trocr.modeling_trocr import TrOCRDecoder, TrOCRForCausalLM
@require_torch
class lowerCamelCase__ :
'''simple docstring'''
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=99 , __UpperCAmelCase=13 , __UpperCAmelCase=16 , __UpperCAmelCase=7 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase=True , __UpperCAmelCase=2 , __UpperCAmelCase=32 , __UpperCAmelCase=4 , __UpperCAmelCase=4 , __UpperCAmelCase=30 , __UpperCAmelCase=0 , __UpperCAmelCase=1 , __UpperCAmelCase=2 , __UpperCAmelCase=None , ) -> Any:
_lowerCAmelCase =parent
_lowerCAmelCase =batch_size
_lowerCAmelCase =decoder_seq_length
# For common tests
_lowerCAmelCase =self.decoder_seq_length
_lowerCAmelCase =is_training
_lowerCAmelCase =use_attention_mask
_lowerCAmelCase =use_labels
_lowerCAmelCase =vocab_size
_lowerCAmelCase =d_model
_lowerCAmelCase =d_model
_lowerCAmelCase =decoder_layers
_lowerCAmelCase =decoder_layers
_lowerCAmelCase =decoder_ffn_dim
_lowerCAmelCase =decoder_attention_heads
_lowerCAmelCase =decoder_attention_heads
_lowerCAmelCase =eos_token_id
_lowerCAmelCase =bos_token_id
_lowerCAmelCase =pad_token_id
_lowerCAmelCase =decoder_start_token_id
_lowerCAmelCase =use_cache
_lowerCAmelCase =max_position_embeddings
_lowerCAmelCase =None
_lowerCAmelCase =decoder_seq_length
_lowerCAmelCase =2
_lowerCAmelCase =1
def _lowerCAmelCase ( self ) -> Tuple:
_lowerCAmelCase =ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
_lowerCAmelCase =None
if self.use_attention_mask:
_lowerCAmelCase =ids_tensor([self.batch_size, self.decoder_seq_length] , vocab_size=2 )
_lowerCAmelCase =None
if self.use_labels:
_lowerCAmelCase =ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
_lowerCAmelCase =TrOCRConfig(
vocab_size=self.vocab_size , d_model=self.d_model , decoder_layers=self.decoder_layers , decoder_ffn_dim=self.decoder_ffn_dim , decoder_attention_heads=self.decoder_attention_heads , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , use_cache=self.use_cache , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , max_position_embeddings=self.max_position_embeddings , )
return (config, input_ids, attention_mask, lm_labels)
def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) -> List[Any]:
_lowerCAmelCase =True
_lowerCAmelCase =TrOCRDecoder(config=__UpperCAmelCase ).to(__UpperCAmelCase ).eval()
_lowerCAmelCase =input_ids[:2]
input_ids[input_ids == 0] += 1
# first forward pass
_lowerCAmelCase =model(__UpperCAmelCase , use_cache=__UpperCAmelCase )
_lowerCAmelCase =model(__UpperCAmelCase )
_lowerCAmelCase =model(__UpperCAmelCase , use_cache=__UpperCAmelCase )
self.parent.assertTrue(len(__UpperCAmelCase ) == len(__UpperCAmelCase ) )
self.parent.assertTrue(len(__UpperCAmelCase ) == len(__UpperCAmelCase ) + 1 )
_lowerCAmelCase =outputs["""past_key_values"""]
# create hypothetical next token and extent to next_input_ids
_lowerCAmelCase =ids_tensor((2, 1) , config.vocab_size - 1 ) + 1
# append to next input_ids and
_lowerCAmelCase =torch.cat([input_ids, next_tokens] , dim=-1 )
_lowerCAmelCase =model(__UpperCAmelCase )["""last_hidden_state"""]
_lowerCAmelCase =model(__UpperCAmelCase , past_key_values=__UpperCAmelCase )["""last_hidden_state"""]
# select random slice
_lowerCAmelCase =ids_tensor((1,) , output_from_past.shape[-1] ).item()
_lowerCAmelCase =output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach()
_lowerCAmelCase =output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
assert torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1e-3 )
def _lowerCAmelCase ( self ) -> List[str]:
_lowerCAmelCase =self.prepare_config_and_inputs()
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =config_and_inputs
_lowerCAmelCase ={"""input_ids""": input_ids, """attention_mask""": attention_mask}
return config, inputs_dict
@require_torch
class lowerCamelCase__ ( __magic_name__ , __magic_name__ , __magic_name__ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase = (TrOCRDecoder, TrOCRForCausalLM) if is_torch_available() else ()
lowerCamelCase = (TrOCRForCausalLM,) if is_torch_available() else ()
lowerCamelCase = {'''text-generation''': TrOCRForCausalLM} if is_torch_available() else {}
lowerCamelCase = True
lowerCamelCase = False
def _lowerCAmelCase ( self ) -> int:
_lowerCAmelCase =TrOCRStandaloneDecoderModelTester(self , is_training=__UpperCAmelCase )
_lowerCAmelCase =ConfigTester(self , config_class=__UpperCAmelCase )
def _lowerCAmelCase ( self ) -> List[str]:
pass
def _lowerCAmelCase ( self ) -> List[Any]:
pass
def _lowerCAmelCase ( self ) -> Any:
pass
def _lowerCAmelCase ( self ) -> Optional[Any]:
self.config_tester.run_common_tests()
def _lowerCAmelCase ( self ) -> Any:
_lowerCAmelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past(*__UpperCAmelCase )
def _lowerCAmelCase ( self ) -> Tuple:
return
@unittest.skip("""The model doesn't support left padding""" ) # and it's not used enough to be worth fixing :)
def _lowerCAmelCase ( self ) -> str:
pass
| 341
| 1
|
from __future__ import annotations
import pandas as pd
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__a = [0] * no_of_processes
__a = [0] * no_of_processes
# Copy the burst time into remaining_time[]
for i in range(_UpperCAmelCase ):
__a = burst_time[i]
__a = 0
__a = 0
__a = 999999999
__a = 0
__a = False
# Process until all processes are completed
while complete != no_of_processes:
for j in range(_UpperCAmelCase ):
if arrival_time[j] <= increment_time and remaining_time[j] > 0:
if remaining_time[j] < minm:
__a = remaining_time[j]
__a = j
__a = True
if not check:
increment_time += 1
continue
remaining_time[short] -= 1
__a = remaining_time[short]
if minm == 0:
__a = 999999999
if remaining_time[short] == 0:
complete += 1
__a = False
# Find finish time of current process
__a = increment_time + 1
# Calculate waiting time
__a = finish_time - arrival_time[short]
__a = finar - burst_time[short]
if waiting_time[short] < 0:
__a = 0
# Increment time
increment_time += 1
return waiting_time
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__a = [0] * no_of_processes
for i in range(_UpperCAmelCase ):
__a = burst_time[i] + waiting_time[i]
return turn_around_time
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__a = 0
__a = 0
for i in range(_UpperCAmelCase ):
__a = total_waiting_time + waiting_time[i]
__a = total_turn_around_time + turn_around_time[i]
print(f'Average waiting time = {total_waiting_time / no_of_processes:.5f}' )
print('''Average turn around time =''' , total_turn_around_time / no_of_processes )
if __name__ == "__main__":
print('''Enter how many process you want to analyze''')
__snake_case :List[Any] = int(input())
__snake_case :Any = [0] * no_of_processes
__snake_case :List[Any] = [0] * no_of_processes
__snake_case :List[str] = list(range(1, no_of_processes + 1))
for i in range(no_of_processes):
print('''Enter the arrival time and burst time for process:--''' + str(i + 1))
__snake_case ,__snake_case :Union[str, Any] = map(int, input().split())
__snake_case :List[str] = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
__snake_case :Union[str, Any] = burst_time
__snake_case :Union[str, Any] = no_of_processes
__snake_case :List[str] = waiting_time
__snake_case :Dict = calculate_turnaroundtime(bt, n, wt)
calculate_average_times(waiting_time, turn_around_time, no_of_processes)
__snake_case :List[str] = pd.DataFrame(
list(zip(processes, burst_time, arrival_time, waiting_time, turn_around_time)),
columns=[
'''Process''',
'''BurstTime''',
'''ArrivalTime''',
'''WaitingTime''',
'''TurnAroundTime''',
],
)
# Printing the dataFrame
pd.set_option('''display.max_rows''', fcfs.shape[0] + 1)
print(fcfs)
| 49
|
from __future__ import annotations
from typing import Any
def __snake_case ( _UpperCAmelCase ):
if not postfix_notation:
return 0
__a = {'''+''', '''-''', '''*''', '''/'''}
__a = []
for token in postfix_notation:
if token in operations:
__a , __a = stack.pop(), stack.pop()
if token == "+":
stack.append(a + b )
elif token == "-":
stack.append(a - b )
elif token == "*":
stack.append(a * b )
else:
if a * b < 0 and a % b != 0:
stack.append(a // b + 1 )
else:
stack.append(a // b )
else:
stack.append(int(_UpperCAmelCase ) )
return stack.pop()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 49
| 1
|
'''simple docstring'''
import random
from .binary_exp_mod import bin_exp_mod
def _A ( _lowerCAmelCase , _lowerCAmelCase=1_000 ):
"""simple docstring"""
if n < 2:
return False
if n % 2 == 0:
return n == 2
# this means n is odd
__lowercase =n - 1
__lowercase =0
while d % 2 == 0:
d /= 2
exp += 1
# n - 1=d*(2**exp)
__lowercase =0
while count < prec:
__lowercase =random.randint(2 , n - 1 )
__lowercase =bin_exp_mod(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
if b != 1:
__lowercase =True
for _ in range(_lowerCAmelCase ):
if b == n - 1:
__lowercase =False
break
__lowercase =b * b
b %= n
if flag:
return False
count += 1
return True
if __name__ == "__main__":
lowerCamelCase = abs(int(input("""Enter bound : """).strip()))
print("""Here's the list of primes:""")
print(""", """.join(str(i) for i in range(n + 1) if is_prime_big(i)))
| 48
|
'''simple docstring'''
import argparse
import requests
import torch
from PIL import Image
from transformers import SwinConfig, SwinForMaskedImageModeling, ViTImageProcessor
def _A ( _lowerCAmelCase ):
"""simple docstring"""
__lowercase =SwinConfig(image_size=192 )
if "base" in model_name:
__lowercase =6
__lowercase =128
__lowercase =(2, 2, 18, 2)
__lowercase =(4, 8, 16, 32)
elif "large" in model_name:
__lowercase =12
__lowercase =192
__lowercase =(2, 2, 18, 2)
__lowercase =(6, 12, 24, 48)
else:
raise ValueError('Model not supported, only supports base and large variants' )
__lowercase =window_size
__lowercase =embed_dim
__lowercase =depths
__lowercase =num_heads
return config
def _A ( _lowerCAmelCase ):
"""simple docstring"""
if "encoder.mask_token" in name:
__lowercase =name.replace('encoder.mask_token' , 'embeddings.mask_token' )
if "encoder.patch_embed.proj" in name:
__lowercase =name.replace('encoder.patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
if "encoder.patch_embed.norm" in name:
__lowercase =name.replace('encoder.patch_embed.norm' , 'embeddings.norm' )
if "attn.proj" in name:
__lowercase =name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name:
__lowercase =name.replace('attn' , 'attention.self' )
if "norm1" in name:
__lowercase =name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
__lowercase =name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
__lowercase =name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
__lowercase =name.replace('mlp.fc2' , 'output.dense' )
if name == "encoder.norm.weight":
__lowercase ='layernorm.weight'
if name == "encoder.norm.bias":
__lowercase ='layernorm.bias'
if "decoder" in name:
pass
else:
__lowercase ='swin.' + name
return name
def _A ( _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
for key in orig_state_dict.copy().keys():
__lowercase =orig_state_dict.pop(_lowerCAmelCase )
if "attn_mask" in key:
pass
elif "qkv" in key:
__lowercase =key.split('.' )
__lowercase =int(key_split[2] )
__lowercase =int(key_split[4] )
__lowercase =model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
__lowercase =val[:dim, :]
__lowercase =val[
dim : dim * 2, :
]
__lowercase =val[-dim:, :]
else:
__lowercase =val[
:dim
]
__lowercase =val[
dim : dim * 2
]
__lowercase =val[
-dim:
]
else:
__lowercase =val
return orig_state_dict
def _A ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
__lowercase =torch.load(_lowerCAmelCase , map_location='cpu' )['model']
__lowercase =get_swin_config(_lowerCAmelCase )
__lowercase =SwinForMaskedImageModeling(_lowerCAmelCase )
model.eval()
__lowercase =convert_state_dict(_lowerCAmelCase , _lowerCAmelCase )
model.load_state_dict(_lowerCAmelCase )
__lowercase ='http://images.cocodataset.org/val2017/000000039769.jpg'
__lowercase =ViTImageProcessor(size={'height': 192, 'width': 192} )
__lowercase =Image.open(requests.get(_lowerCAmelCase , stream=_lowerCAmelCase ).raw )
__lowercase =image_processor(images=_lowerCAmelCase , return_tensors='pt' )
with torch.no_grad():
__lowercase =model(**_lowerCAmelCase ).logits
print(outputs.keys() )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
print(f"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(_lowerCAmelCase )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(_lowerCAmelCase )
if push_to_hub:
print(f"""Pushing model and image processor for {model_name} to hub""" )
model.push_to_hub(f"""microsoft/{model_name}""" )
image_processor.push_to_hub(f"""microsoft/{model_name}""" )
if __name__ == "__main__":
lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""swin-base-simmim-window6-192""",
type=str,
choices=["""swin-base-simmim-window6-192""", """swin-large-simmim-window12-192"""],
help="""Name of the Swin SimMIM model you'd like to convert.""",
)
parser.add_argument(
"""--checkpoint_path""",
default="""/Users/nielsrogge/Documents/SwinSimMIM/simmim_pretrain__swin_base__img192_window6__100ep.pth""",
type=str,
help="""Path to the original PyTorch checkpoint (.pth file).""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
lowerCamelCase = parser.parse_args()
convert_swin_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 48
| 1
|
'''simple docstring'''
import copy
import os
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
from datasets.arrow_writer import ArrowWriter, OptimizedTypedSequence, ParquetWriter, TypedSequence
from datasets.features import ArrayaD, ClassLabel, Features, Image, Value
from datasets.features.features import ArrayaDExtensionType, cast_to_python_objects
from datasets.keyhash import DuplicatedKeysError, InvalidKeyError
from .utils import require_pil
class _snake_case ( lowercase_ ):
def lowerCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
snake_case_ = pa.array(TypedSequence([1, 2, 3] ) )
self.assertEqual(arr.type , pa.intaa() )
def lowerCAmelCase__ ( self ) -> int:
'''simple docstring'''
with self.assertRaises(a__ ):
snake_case_ = pa.array(TypedSequence([1, 2, 3] ) , type=pa.intaa() )
def lowerCAmelCase__ ( self ) -> str:
'''simple docstring'''
with self.assertRaises(a__ ):
snake_case_ = pa.array(TypedSequence([1, 2, 3] , try_type=Value("bool" ) , type=Value("int64" ) ) )
def lowerCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
snake_case_ = pa.array(TypedSequence([1, 2, 3] , type=Value("int32" ) ) )
self.assertEqual(arr.type , pa.intaa() )
def lowerCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ):
snake_case_ = pa.array(TypedSequence(["foo", "bar"] , type=Value("int64" ) ) )
def lowerCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
snake_case_ = pa.array(TypedSequence([1, 2, 3] , try_type=Value("int32" ) ) )
self.assertEqual(arr.type , pa.intaa() )
def lowerCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
snake_case_ = pa.array(TypedSequence(["foo", "bar"] , try_type=Value("int64" ) ) )
self.assertEqual(arr.type , pa.string() )
def lowerCAmelCase__ ( self ) -> str:
'''simple docstring'''
snake_case_ = pa.array(TypedSequence([[[1, 2, 3]]] , type=ArrayaD((1, 3) , "int64" ) ) )
self.assertEqual(arr.type , ArrayaDExtensionType((1, 3) , "int64" ) )
def lowerCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ):
snake_case_ = pa.array(TypedSequence(["foo", "bar"] , type=ArrayaD((1, 3) , "int64" ) ) )
def lowerCAmelCase__ ( self ) -> str:
'''simple docstring'''
snake_case_ = pa.array(TypedSequence([[[1, 2, 3]]] , try_type=ArrayaD((1, 3) , "int64" ) ) )
self.assertEqual(arr.type , ArrayaDExtensionType((1, 3) , "int64" ) )
def lowerCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
snake_case_ = pa.array(TypedSequence(["foo", "bar"] , try_type=ArrayaD((1, 3) , "int64" ) ) )
self.assertEqual(arr.type , pa.string() )
@require_pil
def lowerCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
import PIL.Image
snake_case_ = PIL.Image.fromarray(np.arange(10 , dtype=np.uinta ).reshape(2 , 5 ) )
with patch(
"datasets.arrow_writer.cast_to_python_objects" , side_effect=a__ ) as mock_cast_to_python_objects:
snake_case_ = pa.array(TypedSequence([{"path": None, "bytes": b"image_bytes"}, pil_image] , type=Image() ) )
snake_case_ , snake_case_ = mock_cast_to_python_objects.call_args_list[-1]
self.assertIn("optimize_list_casting" , a__ )
self.assertFalse(kwargs["optimize_list_casting"] )
def UpperCamelCase_( snake_case : Any , snake_case : int ):
'''simple docstring'''
snake_case_ = pa.BufferReader(snake_case ) if isinstance(snake_case , pa.Buffer ) else pa.memory_map(snake_case )
snake_case_ = pa.ipc.open_stream(snake_case )
snake_case_ = f.read_all()
assert len(pa_table.to_batches() ) == expected_num_chunks
assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]}
del pa_table
@pytest.mark.parametrize("writer_batch_size" , [None, 1, 1_0] )
@pytest.mark.parametrize(
"fields" , [None, {"col_1": pa.string(), "col_2": pa.intaa()}, {"col_1": pa.string(), "col_2": pa.intaa()}] )
def UpperCamelCase_( snake_case : Dict , snake_case : Dict ):
'''simple docstring'''
snake_case_ = pa.BufferOutputStream()
snake_case_ = pa.schema(snake_case ) if fields else None
with ArrowWriter(stream=snake_case , schema=snake_case , writer_batch_size=snake_case ) as writer:
writer.write({"col_1": "foo", "col_2": 1} )
writer.write({"col_1": "bar", "col_2": 2} )
snake_case_ , snake_case_ = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
snake_case_ = {"col_1": pa.string(), "col_2": pa.intaa()}
assert writer._schema == pa.schema(snake_case , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
def UpperCamelCase_( ):
'''simple docstring'''
snake_case_ = pa.BufferOutputStream()
snake_case_ = Features({"labels": ClassLabel(names=["neg", "pos"] )} )
with ArrowWriter(stream=snake_case , features=snake_case ) as writer:
writer.write({"labels": 0} )
writer.write({"labels": 1} )
snake_case_ , snake_case_ = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert writer._schema == features.arrow_schema
assert writer._schema.metadata == features.arrow_schema.metadata
snake_case_ = pa.BufferReader(output.getvalue() )
snake_case_ = pa.ipc.open_stream(snake_case )
snake_case_ = f.read_all()
snake_case_ = pa_table.schema
assert pa_table.num_rows == 2
assert schema == features.arrow_schema
assert schema.metadata == features.arrow_schema.metadata
assert features == Features.from_arrow_schema(snake_case )
@pytest.mark.parametrize("writer_batch_size" , [None, 1, 1_0] )
def UpperCamelCase_( snake_case : Optional[Any] ):
'''simple docstring'''
snake_case_ = pa.BufferOutputStream()
with ArrowWriter(
stream=snake_case , writer_batch_size=snake_case , hash_salt="split_name" , check_duplicates=snake_case , ) as writer:
with pytest.raises(snake_case ):
writer.write({"col_1": "foo", "col_2": 1} , key=[1, 2] )
snake_case_ , snake_case_ = writer.finalize()
@pytest.mark.parametrize("writer_batch_size" , [None, 2, 1_0] )
def UpperCamelCase_( snake_case : Dict ):
'''simple docstring'''
snake_case_ = pa.BufferOutputStream()
with ArrowWriter(
stream=snake_case , writer_batch_size=snake_case , hash_salt="split_name" , check_duplicates=snake_case , ) as writer:
with pytest.raises(snake_case ):
writer.write({"col_1": "foo", "col_2": 1} , key=1_0 )
writer.write({"col_1": "bar", "col_2": 2} , key=1_0 )
snake_case_ , snake_case_ = writer.finalize()
@pytest.mark.parametrize("writer_batch_size" , [None, 2, 1_0] )
def UpperCamelCase_( snake_case : Any ):
'''simple docstring'''
snake_case_ = pa.BufferOutputStream()
with ArrowWriter(
stream=snake_case , writer_batch_size=snake_case , hash_salt="split_name" , check_duplicates=snake_case , ) as writer:
writer.write({"col_1": "foo", "col_2": 1} , key=1 )
writer.write({"col_1": "bar", "col_2": 2} , key=2 )
snake_case_ , snake_case_ = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize("writer_batch_size" , [None, 1, 1_0] )
@pytest.mark.parametrize(
"fields" , [None, {"col_1": pa.string(), "col_2": pa.intaa()}, {"col_1": pa.string(), "col_2": pa.intaa()}] )
def UpperCamelCase_( snake_case : Union[str, Any] , snake_case : List[str] ):
'''simple docstring'''
snake_case_ = pa.BufferOutputStream()
snake_case_ = pa.schema(snake_case ) if fields else None
with ArrowWriter(stream=snake_case , schema=snake_case , writer_batch_size=snake_case ) as writer:
writer.write_batch({"col_1": ["foo", "bar"], "col_2": [1, 2]} )
writer.write_batch({"col_1": [], "col_2": []} )
snake_case_ , snake_case_ = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
snake_case_ = {"col_1": pa.string(), "col_2": pa.intaa()}
assert writer._schema == pa.schema(snake_case , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize("writer_batch_size" , [None, 1, 1_0] )
@pytest.mark.parametrize(
"fields" , [None, {"col_1": pa.string(), "col_2": pa.intaa()}, {"col_1": pa.string(), "col_2": pa.intaa()}] )
def UpperCamelCase_( snake_case : str , snake_case : Dict ):
'''simple docstring'''
snake_case_ = pa.BufferOutputStream()
snake_case_ = pa.schema(snake_case ) if fields else None
with ArrowWriter(stream=snake_case , schema=snake_case , writer_batch_size=snake_case ) as writer:
writer.write_table(pa.Table.from_pydict({"col_1": ["foo", "bar"], "col_2": [1, 2]} ) )
snake_case_ , snake_case_ = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
snake_case_ = {"col_1": pa.string(), "col_2": pa.intaa()}
assert writer._schema == pa.schema(snake_case , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize("writer_batch_size" , [None, 1, 1_0] )
@pytest.mark.parametrize(
"fields" , [None, {"col_1": pa.string(), "col_2": pa.intaa()}, {"col_1": pa.string(), "col_2": pa.intaa()}] )
def UpperCamelCase_( snake_case : Optional[Any] , snake_case : Optional[int] ):
'''simple docstring'''
snake_case_ = pa.BufferOutputStream()
snake_case_ = pa.schema(snake_case ) if fields else None
with ArrowWriter(stream=snake_case , schema=snake_case , writer_batch_size=snake_case ) as writer:
writer.write_row(pa.Table.from_pydict({"col_1": ["foo"], "col_2": [1]} ) )
writer.write_row(pa.Table.from_pydict({"col_1": ["bar"], "col_2": [2]} ) )
snake_case_ , snake_case_ = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
snake_case_ = {"col_1": pa.string(), "col_2": pa.intaa()}
assert writer._schema == pa.schema(snake_case , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
def UpperCamelCase_( ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case_ = {"col_1": pa.string(), "col_2": pa.intaa()}
snake_case_ = os.path.join(snake_case , "test.arrow" )
with ArrowWriter(path=snake_case , schema=pa.schema(snake_case ) ) as writer:
writer.write_batch({"col_1": ["foo", "bar"], "col_2": [1, 2]} )
snake_case_ , snake_case_ = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert writer._schema == pa.schema(snake_case , metadata=writer._schema.metadata )
_check_output(snake_case , 1 )
def UpperCamelCase_( snake_case : Tuple ):
'''simple docstring'''
if pa.types.is_list(snake_case ):
return get_base_dtype(arr_type.value_type )
else:
return arr_type
def UpperCamelCase_( snake_case : Tuple , snake_case : Dict ):
'''simple docstring'''
if isinstance(lst[0] , snake_case ):
change_first_primitive_element_in_list(lst[0] , snake_case )
else:
snake_case_ = value
@pytest.mark.parametrize("optimized_int_type, expected_dtype" , [(None, pa.intaa()), (Value("int32" ), pa.intaa())] )
@pytest.mark.parametrize("sequence" , [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] )
def UpperCamelCase_( snake_case : List[Any] , snake_case : Dict , snake_case : int ):
'''simple docstring'''
snake_case_ = pa.array(TypedSequence(snake_case , optimized_int_type=snake_case ) )
assert get_base_dtype(arr.type ) == expected_dtype
@pytest.mark.parametrize(
"col, expected_dtype" , [
("attention_mask", pa.inta()),
("special_tokens_mask", pa.inta()),
("token_type_ids", pa.inta()),
("input_ids", pa.intaa()),
("other", pa.intaa()),
] , )
@pytest.mark.parametrize("sequence" , [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] )
def UpperCamelCase_( snake_case : Optional[int] , snake_case : Any , snake_case : Dict ):
'''simple docstring'''
snake_case_ = pa.array(OptimizedTypedSequence(snake_case , col=snake_case ) )
assert get_base_dtype(arr.type ) == expected_dtype
# not in range
if col != "other":
# avoids errors due to in-place modifications
snake_case_ = copy.deepcopy(snake_case )
snake_case_ = np.iinfo(expected_dtype.to_pandas_dtype() ).max + 1
change_first_primitive_element_in_list(snake_case , snake_case )
snake_case_ = pa.array(OptimizedTypedSequence(snake_case , col=snake_case ) )
assert get_base_dtype(arr.type ) == pa.intaa()
@pytest.mark.parametrize("raise_exception" , [False, True] )
def UpperCamelCase_( snake_case : List[str] , snake_case : Union[str, Any] ):
'''simple docstring'''
snake_case_ = str(tmp_path / "dataset-train.arrow" )
try:
with ArrowWriter(path=snake_case ) as writer:
if raise_exception:
raise pa.lib.ArrowInvalid()
else:
writer.stream.close()
except pa.lib.ArrowInvalid:
pass
finally:
assert writer.stream.closed
def UpperCamelCase_( snake_case : Union[str, Any] ):
'''simple docstring'''
snake_case_ = "mock://dataset-train.arrow"
with ArrowWriter(path=snake_case , storage_options=mockfs.storage_options ) as writer:
assert isinstance(writer._fs , type(snake_case ) )
assert writer._fs.storage_options == mockfs.storage_options
writer.write({"col_1": "foo", "col_2": 1} )
writer.write({"col_1": "bar", "col_2": 2} )
snake_case_ , snake_case_ = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert mockfs.exists(snake_case )
def UpperCamelCase_( ):
'''simple docstring'''
snake_case_ = pa.BufferOutputStream()
with ParquetWriter(stream=snake_case ) as writer:
writer.write({"col_1": "foo", "col_2": 1} )
writer.write({"col_1": "bar", "col_2": 2} )
snake_case_ , snake_case_ = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
snake_case_ = pa.BufferReader(output.getvalue() )
snake_case_ = pq.read_table(snake_case )
assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]}
@require_pil
@pytest.mark.parametrize("embed_local_files" , [False, True] )
def UpperCamelCase_( snake_case : Optional[int] , snake_case : List[str] ):
'''simple docstring'''
import PIL.Image
snake_case_ = str(tmp_path / "test_image_rgb.jpg" )
PIL.Image.fromarray(np.zeros((5, 5) , dtype=np.uinta ) ).save(snake_case , format="png" )
snake_case_ = pa.BufferOutputStream()
with ParquetWriter(
stream=snake_case , features=Features({"image": Image()} ) , embed_local_files=snake_case ) as writer:
writer.write({"image": image_path} )
writer.finalize()
snake_case_ = pa.BufferReader(output.getvalue() )
snake_case_ = pq.read_table(snake_case )
snake_case_ = pa_table.to_pydict()
if embed_local_files:
assert isinstance(out["image"][0]["path"] , snake_case )
with open(snake_case , "rb" ) as f:
assert out["image"][0]["bytes"] == f.read()
else:
assert out["image"][0]["path"] == image_path
assert out["image"][0]["bytes"] is None
def UpperCamelCase_( ):
'''simple docstring'''
snake_case_ = pa.schema([pa.field("col_1" , pa.string() , nullable=snake_case )] )
snake_case_ = pa.BufferOutputStream()
with ArrowWriter(stream=snake_case ) as writer:
writer._build_writer(inferred_schema=snake_case )
assert writer._schema == pa.schema([pa.field("col_1" , pa.string() )] )
| 85
|
from math import loga
def A_ ( A__ ) -> int:
if a < 0:
raise ValueError('Input value must be a positive integer' )
elif isinstance(A__ , A__ ):
raise TypeError('Input value must be a \'int\' type' )
return 0 if (a == 0) else int(loga(a & -a ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 99
| 0
|
from packaging import version
from .import_utils import is_accelerate_available
if is_accelerate_available():
import accelerate
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> int:
if not is_accelerate_available():
return method
__lowerCamelCase : Tuple = version.parse(accelerate.__version__ ).base_version
if version.parse(__lowerCamelCase ) < version.parse('0.17.0' ):
return method
def wrapper(self , *lowerCamelCase__ , **lowerCamelCase__ ):
if hasattr(self , '_hf_hook' ) and hasattr(self._hf_hook , 'pre_forward' ):
self._hf_hook.pre_forward(self )
return method(self , *__lowerCamelCase , **__lowerCamelCase )
return wrapper
| 368
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a ={
"""configuration_roformer""": ["""ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """RoFormerConfig""", """RoFormerOnnxConfig"""],
"""tokenization_roformer""": ["""RoFormerTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a =["""RoFormerTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a =[
"""ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""RoFormerForCausalLM""",
"""RoFormerForMaskedLM""",
"""RoFormerForMultipleChoice""",
"""RoFormerForQuestionAnswering""",
"""RoFormerForSequenceClassification""",
"""RoFormerForTokenClassification""",
"""RoFormerLayer""",
"""RoFormerModel""",
"""RoFormerPreTrainedModel""",
"""load_tf_weights_in_roformer""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a =[
"""TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFRoFormerForCausalLM""",
"""TFRoFormerForMaskedLM""",
"""TFRoFormerForMultipleChoice""",
"""TFRoFormerForQuestionAnswering""",
"""TFRoFormerForSequenceClassification""",
"""TFRoFormerForTokenClassification""",
"""TFRoFormerLayer""",
"""TFRoFormerModel""",
"""TFRoFormerPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a =[
"""FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FlaxRoFormerForMaskedLM""",
"""FlaxRoFormerForMultipleChoice""",
"""FlaxRoFormerForQuestionAnswering""",
"""FlaxRoFormerForSequenceClassification""",
"""FlaxRoFormerForTokenClassification""",
"""FlaxRoFormerModel""",
"""FlaxRoFormerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig
from .tokenization_roformer import RoFormerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roformer_fast import RoFormerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roformer import (
ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
RoFormerForCausalLM,
RoFormerForMaskedLM,
RoFormerForMultipleChoice,
RoFormerForQuestionAnswering,
RoFormerForSequenceClassification,
RoFormerForTokenClassification,
RoFormerLayer,
RoFormerModel,
RoFormerPreTrainedModel,
load_tf_weights_in_roformer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roformer import (
TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerLayer,
TFRoFormerModel,
TFRoFormerPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roformer import (
FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
FlaxRoFormerPreTrainedModel,
)
else:
import sys
a =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 113
| 0
|
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_mbart import MBartTokenizer
else:
UpperCamelCase__: Union[str, Any] = None
UpperCamelCase__: List[Any] = logging.get_logger(__name__)
UpperCamelCase__: Dict = {"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"}
UpperCamelCase__: int = {
"vocab_file": {
"facebook/mbart-large-en-ro": (
"https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model"
),
"facebook/mbart-large-cc25": (
"https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model"
),
},
"tokenizer_file": {
"facebook/mbart-large-en-ro": "https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json",
"facebook/mbart-large-cc25": "https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json",
},
}
UpperCamelCase__: List[str] = {
"facebook/mbart-large-en-ro": 1024,
"facebook/mbart-large-cc25": 1024,
}
# fmt: off
UpperCamelCase__: Optional[Any] = ["ar_AR", "cs_CZ", "de_DE", "en_XX", "es_XX", "et_EE", "fi_FI", "fr_XX", "gu_IN", "hi_IN", "it_IT", "ja_XX", "kk_KZ", "ko_KR", "lt_LT", "lv_LV", "my_MM", "ne_NP", "nl_XX", "ro_RO", "ru_RU", "si_LK", "tr_TR", "vi_VN", "zh_CN"]
class SCREAMING_SNAKE_CASE( A__ ):
"""simple docstring"""
lowerCamelCase__ = VOCAB_FILES_NAMES
lowerCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ = ["""input_ids""", """attention_mask"""]
lowerCamelCase__ = MBartTokenizer
lowerCamelCase__ = []
lowerCamelCase__ = []
def __init__( self : int , __snake_case : List[Any]=None , __snake_case : Optional[Any]=None , __snake_case : Optional[int]="<s>" , __snake_case : List[Any]="</s>" , __snake_case : Optional[int]="</s>" , __snake_case : List[str]="<s>" , __snake_case : List[str]="<unk>" , __snake_case : Union[str, Any]="<pad>" , __snake_case : Optional[Any]="<mask>" , __snake_case : Tuple=None , __snake_case : List[str]=None , __snake_case : Any=None , **__snake_case : Tuple , ) -> int:
# Mask token behave like a normal word, i.e. include the space before it
UpperCAmelCase : Optional[int] = AddedToken(__snake_case , lstrip=__snake_case , rstrip=__snake_case ) if isinstance(__snake_case , __snake_case ) else mask_token
super().__init__(
vocab_file=__snake_case , tokenizer_file=__snake_case , bos_token=__snake_case , eos_token=__snake_case , sep_token=__snake_case , cls_token=__snake_case , unk_token=__snake_case , pad_token=__snake_case , mask_token=__snake_case , src_lang=__snake_case , tgt_lang=__snake_case , additional_special_tokens=__snake_case , **__snake_case , )
UpperCAmelCase : Tuple = vocab_file
UpperCAmelCase : Union[str, Any] = False if not self.vocab_file else True
UpperCAmelCase : Dict = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({'''additional_special_tokens''': _additional_special_tokens} )
UpperCAmelCase : int = {
lang_code: self.convert_tokens_to_ids(__snake_case ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
UpperCAmelCase : Dict = src_lang if src_lang is not None else '''en_XX'''
UpperCAmelCase : Optional[Any] = self.convert_tokens_to_ids(self._src_lang )
UpperCAmelCase : Tuple = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def A ( self : Tuple ) -> str:
return self._src_lang
@src_lang.setter
def A ( self : List[Any] , __snake_case : str ) -> None:
UpperCAmelCase : Optional[Any] = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def A ( self : Union[str, Any] , __snake_case : List[int] , __snake_case : Optional[List[int]] = None ) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def A ( self : Optional[int] , __snake_case : List[int] , __snake_case : Optional[List[int]] = None ) -> List[int]:
UpperCAmelCase : Union[str, Any] = [self.sep_token_id]
UpperCAmelCase : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def A ( self : Tuple , __snake_case : Any , __snake_case : str , __snake_case : Optional[str] , __snake_case : Optional[str] , **__snake_case : Any ) -> Tuple:
if src_lang is None or tgt_lang is None:
raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' )
UpperCAmelCase : List[Any] = src_lang
UpperCAmelCase : Union[str, Any] = self(__snake_case , add_special_tokens=__snake_case , return_tensors=__snake_case , **__snake_case )
UpperCAmelCase : str = self.convert_tokens_to_ids(__snake_case )
UpperCAmelCase : Union[str, Any] = tgt_lang_id
return inputs
def A ( self : str , __snake_case : List[str] , __snake_case : str = "en_XX" , __snake_case : Optional[List[str]] = None , __snake_case : str = "ro_RO" , **__snake_case : List[Any] , ) -> BatchEncoding:
UpperCAmelCase : Dict = src_lang
UpperCAmelCase : List[str] = tgt_lang
return super().prepare_seqaseq_batch(__snake_case , __snake_case , **__snake_case )
def A ( self : Any ) -> str:
return self.set_src_lang_special_tokens(self.src_lang )
def A ( self : Optional[int] ) -> Optional[int]:
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def A ( self : Any , __snake_case : str ) -> None:
UpperCAmelCase : Optional[int] = self.convert_tokens_to_ids(__snake_case )
UpperCAmelCase : Dict = []
UpperCAmelCase : Optional[int] = [self.eos_token_id, self.cur_lang_code]
UpperCAmelCase : Dict = self.convert_ids_to_tokens(self.prefix_tokens )
UpperCAmelCase : str = self.convert_ids_to_tokens(self.suffix_tokens )
UpperCAmelCase : Optional[int] = processors.TemplateProcessing(
single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str , pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def A ( self : Dict , __snake_case : str ) -> None:
UpperCAmelCase : Optional[Any] = self.convert_tokens_to_ids(__snake_case )
UpperCAmelCase : Dict = []
UpperCAmelCase : Optional[Any] = [self.eos_token_id, self.cur_lang_code]
UpperCAmelCase : Tuple = self.convert_ids_to_tokens(self.prefix_tokens )
UpperCAmelCase : Any = self.convert_ids_to_tokens(self.suffix_tokens )
UpperCAmelCase : Tuple = processors.TemplateProcessing(
single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str , pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def A ( self : Any , __snake_case : str , __snake_case : Optional[str] = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(__snake_case ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory.""" )
return
UpperCAmelCase : List[str] = os.path.join(
__snake_case , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__snake_case ):
copyfile(self.vocab_file , __snake_case )
return (out_vocab_file,)
| 23
|
'''simple docstring'''
from math import isclose, sqrt
def snake_case_ ( _lowerCAmelCase : float , _lowerCAmelCase : float , _lowerCAmelCase : float ) -> tuple[float, float, float]:
UpperCAmelCase : Optional[int] = point_y / 4 / point_x
UpperCAmelCase : str = 2 * normal_gradient / (1 + normal_gradient * normal_gradient)
UpperCAmelCase : Any = (1 - normal_gradient * normal_gradient) / (
1 + normal_gradient * normal_gradient
)
UpperCAmelCase : Union[str, Any] = (sa - ca * incoming_gradient) / (ca + sa * incoming_gradient)
# to find the next point, solve the simultaeneous equations:
# y^2 + 4x^2 = 100
# y - b = m * (x - a)
# ==> A x^2 + B x + C = 0
UpperCAmelCase : Union[str, Any] = outgoing_gradient**2 + 4
UpperCAmelCase : Dict = 2 * outgoing_gradient * (point_y - outgoing_gradient * point_x)
UpperCAmelCase : List[Any] = (point_y - outgoing_gradient * point_x) ** 2 - 100
UpperCAmelCase : List[str] = (
-linear_term - sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
UpperCAmelCase : Optional[int] = (
-linear_term + sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
# two solutions, one of which is our input point
UpperCAmelCase : Optional[Any] = x_minus if isclose(_lowerCAmelCase , _lowerCAmelCase ) else x_plus
UpperCAmelCase : Union[str, Any] = point_y + outgoing_gradient * (next_x - point_x)
return next_x, next_y, outgoing_gradient
def snake_case_ ( _lowerCAmelCase : float = 1.4 , _lowerCAmelCase : float = -9.6 ) -> int:
UpperCAmelCase : int = 0
UpperCAmelCase : float = first_x_coord
UpperCAmelCase : float = first_y_coord
UpperCAmelCase : float = (1_0.1 - point_y) / (0.0 - point_x)
while not (-0.0_1 <= point_x <= 0.0_1 and point_y > 0):
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : str = next_point(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
num_reflections += 1
return num_reflections
if __name__ == "__main__":
print(F"{solution() = }")
| 23
| 1
|
from ...processing_utils import ProcessorMixin
class a_ ( a_ ):
'''simple docstring'''
__a: Any = '''SpeechT5FeatureExtractor'''
__a: Tuple = '''SpeechT5Tokenizer'''
def __init__( self , lowercase_ , lowercase_ ) -> Dict:
'''simple docstring'''
super().__init__(lowercase_ , lowercase_ )
def __call__( self , *lowercase_ , **lowercase_ ) -> Any:
'''simple docstring'''
lowerCAmelCase_ = kwargs.pop('audio' , lowercase_ )
lowerCAmelCase_ = kwargs.pop('text' , lowercase_ )
lowerCAmelCase_ = kwargs.pop('text_target' , lowercase_ )
lowerCAmelCase_ = kwargs.pop('audio_target' , lowercase_ )
lowerCAmelCase_ = kwargs.pop('sampling_rate' , lowercase_ )
if audio is not None and text is not None:
raise ValueError(
'Cannot process both `audio` and `text` inputs. Did you mean `audio_target` or `text_target`?' )
if audio_target is not None and text_target is not None:
raise ValueError(
'Cannot process both `audio_target` and `text_target` inputs. Did you mean `audio` or `text`?' )
if audio is None and audio_target is None and text is None and text_target is None:
raise ValueError(
'You need to specify either an `audio`, `audio_target`, `text`, or `text_target` input to process.' )
if audio is not None:
lowerCAmelCase_ = self.feature_extractor(lowercase_ , *lowercase_ , sampling_rate=lowercase_ , **lowercase_ )
elif text is not None:
lowerCAmelCase_ = self.tokenizer(lowercase_ , **lowercase_ )
else:
lowerCAmelCase_ = None
if audio_target is not None:
lowerCAmelCase_ = self.feature_extractor(audio_target=lowercase_ , *lowercase_ , sampling_rate=lowercase_ , **lowercase_ )
lowerCAmelCase_ = targets['input_values']
elif text_target is not None:
lowerCAmelCase_ = self.tokenizer(lowercase_ , **lowercase_ )
lowerCAmelCase_ = targets['input_ids']
else:
lowerCAmelCase_ = None
if inputs is None:
return targets
if targets is not None:
lowerCAmelCase_ = labels
lowerCAmelCase_ = targets.get('attention_mask' )
if decoder_attention_mask is not None:
lowerCAmelCase_ = decoder_attention_mask
return inputs
def _lowercase ( self , *lowercase_ , **lowercase_ ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase_ = kwargs.pop('input_values' , lowercase_ )
lowerCAmelCase_ = kwargs.pop('input_ids' , lowercase_ )
lowerCAmelCase_ = kwargs.pop('labels' , lowercase_ )
if input_values is not None and input_ids is not None:
raise ValueError('Cannot process both `input_values` and `input_ids` inputs.' )
if input_values is None and input_ids is None and labels is None:
raise ValueError(
'You need to specify either an `input_values`, `input_ids`, or `labels` input to be padded.' )
if input_values is not None:
lowerCAmelCase_ = self.feature_extractor.pad(lowercase_ , *lowercase_ , **lowercase_ )
elif input_ids is not None:
lowerCAmelCase_ = self.tokenizer.pad(lowercase_ , **lowercase_ )
else:
lowerCAmelCase_ = None
if labels is not None:
if "input_ids" in labels or (isinstance(lowercase_ , lowercase_ ) and "input_ids" in labels[0]):
lowerCAmelCase_ = self.tokenizer.pad(lowercase_ , **lowercase_ )
lowerCAmelCase_ = targets['input_ids']
else:
lowerCAmelCase_ = self.feature_extractor.feature_size
lowerCAmelCase_ = self.feature_extractor.num_mel_bins
lowerCAmelCase_ = self.feature_extractor.pad(lowercase_ , *lowercase_ , **lowercase_ )
lowerCAmelCase_ = feature_size_hack
lowerCAmelCase_ = targets['input_values']
else:
lowerCAmelCase_ = None
if inputs is None:
return targets
if targets is not None:
lowerCAmelCase_ = labels
lowerCAmelCase_ = targets.get('attention_mask' )
if decoder_attention_mask is not None:
lowerCAmelCase_ = decoder_attention_mask
return inputs
def _lowercase ( self , *lowercase_ , **lowercase_ ) -> Any:
'''simple docstring'''
return self.tokenizer.batch_decode(*lowercase_ , **lowercase_ )
def _lowercase ( self , *lowercase_ , **lowercase_ ) -> str:
'''simple docstring'''
return self.tokenizer.decode(*lowercase_ , **lowercase_ )
| 351
|
import math
from typing import Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
get_image_size,
is_torch_available,
is_torch_tensor,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_torch_available():
import torch
if is_vision_available():
import PIL
lowerCamelCase_ = logging.get_logger(__name__)
def lowerCamelCase ( a_ , a_ , a_ , a_ ) -> Tuple[int, int]:
def constraint_to_multiple_of(a_ , a_ , a_=0 , a_=None ):
lowerCAmelCase_ = round(val / multiple ) * multiple
if max_val is not None and x > max_val:
lowerCAmelCase_ = math.floor(val / multiple ) * multiple
if x < min_val:
lowerCAmelCase_ = math.ceil(val / multiple ) * multiple
return x
lowerCAmelCase_ = (output_size, output_size) if isinstance(a_ , a_ ) else output_size
lowerCAmelCase_ , lowerCAmelCase_ = get_image_size(a_ )
lowerCAmelCase_ , lowerCAmelCase_ = output_size
# determine new height and width
lowerCAmelCase_ = output_height / input_height
lowerCAmelCase_ = output_width / input_width
if keep_aspect_ratio:
# scale as little as possible
if abs(1 - scale_width ) < abs(1 - scale_height ):
# fit width
lowerCAmelCase_ = scale_width
else:
# fit height
lowerCAmelCase_ = scale_height
lowerCAmelCase_ = constraint_to_multiple_of(scale_height * input_height , multiple=a_ )
lowerCAmelCase_ = constraint_to_multiple_of(scale_width * input_width , multiple=a_ )
return (new_height, new_width)
class a_ ( a_ ):
'''simple docstring'''
__a: Union[str, Any] = ['''pixel_values''']
def __init__( self , lowercase_ = True , lowercase_ = None , lowercase_ = PILImageResampling.BILINEAR , lowercase_ = False , lowercase_ = 1 , lowercase_ = True , lowercase_ = 1 / 2_5_5 , lowercase_ = True , lowercase_ = None , lowercase_ = None , **lowercase_ , ) -> None:
'''simple docstring'''
super().__init__(**lowercase_ )
lowerCAmelCase_ = size if size is not None else {'height': 3_8_4, 'width': 3_8_4}
lowerCAmelCase_ = get_size_dict(lowercase_ )
lowerCAmelCase_ = do_resize
lowerCAmelCase_ = size
lowerCAmelCase_ = keep_aspect_ratio
lowerCAmelCase_ = ensure_multiple_of
lowerCAmelCase_ = resample
lowerCAmelCase_ = do_rescale
lowerCAmelCase_ = rescale_factor
lowerCAmelCase_ = do_normalize
lowerCAmelCase_ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowerCAmelCase_ = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _lowercase ( self , lowercase_ , lowercase_ , lowercase_ = False , lowercase_ = 1 , lowercase_ = PILImageResampling.BICUBIC , lowercase_ = None , **lowercase_ , ) -> np.ndarray:
'''simple docstring'''
lowerCAmelCase_ = get_size_dict(lowercase_ )
if "height" not in size or "width" not in size:
raise ValueError(f'''The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}''' )
lowerCAmelCase_ = get_resize_output_image_size(
lowercase_ , output_size=(size['height'], size['width']) , keep_aspect_ratio=lowercase_ , multiple=lowercase_ , )
return resize(lowercase_ , size=lowercase_ , resample=lowercase_ , data_format=lowercase_ , **lowercase_ )
def _lowercase ( self , lowercase_ , lowercase_ , lowercase_ = None , **lowercase_ , ) -> Dict:
'''simple docstring'''
return rescale(lowercase_ , scale=lowercase_ , data_format=lowercase_ , **lowercase_ )
def _lowercase ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ = None , **lowercase_ , ) -> np.ndarray:
'''simple docstring'''
return normalize(lowercase_ , mean=lowercase_ , std=lowercase_ , data_format=lowercase_ , **lowercase_ )
def _lowercase ( self , lowercase_ , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = ChannelDimension.FIRST , **lowercase_ , ) -> PIL.Image.Image:
'''simple docstring'''
lowerCAmelCase_ = do_resize if do_resize is not None else self.do_resize
lowerCAmelCase_ = size if size is not None else self.size
lowerCAmelCase_ = get_size_dict(lowercase_ )
lowerCAmelCase_ = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio
lowerCAmelCase_ = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of
lowerCAmelCase_ = resample if resample is not None else self.resample
lowerCAmelCase_ = do_rescale if do_rescale is not None else self.do_rescale
lowerCAmelCase_ = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCAmelCase_ = do_normalize if do_normalize is not None else self.do_normalize
lowerCAmelCase_ = image_mean if image_mean is not None else self.image_mean
lowerCAmelCase_ = image_std if image_std is not None else self.image_std
lowerCAmelCase_ = make_list_of_images(lowercase_ )
if not valid_images(lowercase_ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
lowerCAmelCase_ = [to_numpy_array(lowercase_ ) for image in images]
if do_resize:
lowerCAmelCase_ = [self.resize(image=lowercase_ , size=lowercase_ , resample=lowercase_ ) for image in images]
if do_rescale:
lowerCAmelCase_ = [self.rescale(image=lowercase_ , scale=lowercase_ ) for image in images]
if do_normalize:
lowerCAmelCase_ = [self.normalize(image=lowercase_ , mean=lowercase_ , std=lowercase_ ) for image in images]
lowerCAmelCase_ = [to_channel_dimension_format(lowercase_ , lowercase_ ) for image in images]
lowerCAmelCase_ = {'pixel_values': images}
return BatchFeature(data=lowercase_ , tensor_type=lowercase_ )
def _lowercase ( self , lowercase_ , lowercase_ = None ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase_ = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(lowercase_ ) != len(lowercase_ ):
raise ValueError(
'Make sure that you pass in as many target sizes as the batch dimension of the logits' )
if is_torch_tensor(lowercase_ ):
lowerCAmelCase_ = target_sizes.numpy()
lowerCAmelCase_ = []
for idx in range(len(lowercase_ ) ):
lowerCAmelCase_ = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='bilinear' , align_corners=lowercase_ )
lowerCAmelCase_ = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(lowercase_ )
else:
lowerCAmelCase_ = logits.argmax(dim=1 )
lowerCAmelCase_ = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 14
| 0
|
'''simple docstring'''
from __future__ import annotations
import os
from typing import Any
import requests
__lowerCAmelCase = 'https://api.github.com'
# https://docs.github.com/en/free-pro-team@latest/rest/reference/users#get-the-authenticated-user
__lowerCAmelCase = BASE_URL + '/user'
# https://github.com/settings/tokens
__lowerCAmelCase = os.environ.get('USER_TOKEN', '')
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ):
_snake_case = {
"""Authorization""": f"""token {auth_token}""",
"""Accept""": """application/vnd.github.v3+json""",
}
return requests.get(_SCREAMING_SNAKE_CASE , headers=_SCREAMING_SNAKE_CASE ).json()
if __name__ == "__main__": # pragma: no cover
if USER_TOKEN:
for key, value in fetch_github_info(USER_TOKEN).items():
print(f'''{key}: {value}''')
else:
raise ValueError('\'USER_TOKEN\' field cannot be empty.')
| 341
|
'''simple docstring'''
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ConvNextConfig, UperNetConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import UperNetForSemanticSegmentation
from transformers.models.upernet.modeling_upernet import UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _lowerCAmelCase :
'''simple docstring'''
def __init__(self , UpperCAmelCase , UpperCAmelCase=13 , UpperCAmelCase=32 , UpperCAmelCase=3 , UpperCAmelCase=4 , UpperCAmelCase=[10, 20, 30, 40] , UpperCAmelCase=[2, 2, 3, 2] , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=37 , UpperCAmelCase="gelu" , UpperCAmelCase=10 , UpperCAmelCase=0.02 , UpperCAmelCase=["stage2", "stage3", "stage4"] , UpperCAmelCase=3 , UpperCAmelCase=None , ) -> List[Any]:
_snake_case = parent
_snake_case = batch_size
_snake_case = image_size
_snake_case = num_channels
_snake_case = num_stages
_snake_case = hidden_sizes
_snake_case = depths
_snake_case = is_training
_snake_case = use_labels
_snake_case = intermediate_size
_snake_case = hidden_act
_snake_case = type_sequence_label_size
_snake_case = initializer_range
_snake_case = out_features
_snake_case = num_labels
_snake_case = scope
_snake_case = num_stages
def lowercase (self ) -> List[Any]:
_snake_case = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_snake_case = None
if self.use_labels:
_snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_snake_case = self.get_config()
return config, pixel_values, labels
def lowercase (self ) -> Tuple:
return ConvNextConfig(
num_channels=self.num_channels , num_stages=self.num_stages , hidden_sizes=self.hidden_sizes , depths=self.depths , is_training=self.is_training , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , out_features=self.out_features , )
def lowercase (self ) -> Any:
return UperNetConfig(
backbone_config=self.get_backbone_config() , hidden_size=512 , pool_scales=[1, 2, 3, 6] , use_auxiliary_head=UpperCAmelCase , auxiliary_loss_weight=0.4 , auxiliary_in_channels=40 , auxiliary_channels=256 , auxiliary_num_convs=1 , auxiliary_concat_input=UpperCAmelCase , loss_ignore_index=255 , num_labels=self.num_labels , )
def lowercase (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> str:
_snake_case = UperNetForSemanticSegmentation(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
_snake_case = model(UpperCAmelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def lowercase (self ) -> Tuple:
_snake_case = self.prepare_config_and_inputs()
(
(
_snake_case
), (
_snake_case
), (
_snake_case
),
) = config_and_inputs
_snake_case = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class _lowerCAmelCase ( __snake_case , __snake_case , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ = (UperNetForSemanticSegmentation,) if is_torch_available() else ()
lowerCAmelCase_ = {"image-segmentation": UperNetForSemanticSegmentation} if is_torch_available() else {}
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
def lowercase (self ) -> Optional[Any]:
_snake_case = UperNetModelTester(self )
_snake_case = ConfigTester(self , config_class=UpperCAmelCase , has_text_modality=UpperCAmelCase , hidden_size=37 )
def lowercase (self ) -> str:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowercase (self ) -> Union[str, Any]:
return
def lowercase (self ) -> Union[str, Any]:
_snake_case, _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case = model_class(UpperCAmelCase )
_snake_case = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_snake_case = [*signature.parameters.keys()]
_snake_case = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , UpperCAmelCase )
def lowercase (self ) -> int:
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*UpperCAmelCase )
@unittest.skip(reason="""UperNet does not use inputs_embeds""" )
def lowercase (self ) -> int:
pass
@unittest.skip(reason="""UperNet does not support input and output embeddings""" )
def lowercase (self ) -> List[str]:
pass
@unittest.skip(reason="""UperNet does not have a base model""" )
def lowercase (self ) -> Union[str, Any]:
pass
@unittest.skip(reason="""UperNet does not have a base model""" )
def lowercase (self ) -> Union[str, Any]:
pass
@require_torch_multi_gpu
@unittest.skip(reason="""UperNet has some layers using `add_module` which doesn't work well with `nn.DataParallel`""" )
def lowercase (self ) -> str:
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def lowercase (self ) -> int:
pass
def lowercase (self ) -> List[str]:
def check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
_snake_case = model_class(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
with torch.no_grad():
_snake_case = model(**self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) )
_snake_case = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_snake_case = self.model_tester.num_stages
self.assertEqual(len(UpperCAmelCase ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
_snake_case, _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case = True
check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_snake_case = True
check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
def lowercase (self ) -> List[str]:
_snake_case, _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
_snake_case = _config_zero_init(UpperCAmelCase )
_snake_case = _config_zero_init(configs_no_init.backbone_config )
for model_class in self.all_model_classes:
_snake_case = model_class(config=UpperCAmelCase )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@unittest.skip(reason="""UperNet does not have tied weights""" )
def lowercase (self ) -> Optional[Any]:
pass
@slow
def lowercase (self ) -> Tuple:
for model_name in UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case = UperNetForSemanticSegmentation.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
def __SCREAMING_SNAKE_CASE ( ):
_snake_case = hf_hub_download(
repo_id="""hf-internal-testing/fixtures_ade20k""" , repo_type="""dataset""" , filename="""ADE_val_00000001.jpg""" )
_snake_case = Image.open(_SCREAMING_SNAKE_CASE ).convert("""RGB""" )
return image
@require_torch
@require_vision
@slow
class _lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def lowercase (self ) -> Any:
_snake_case = AutoImageProcessor.from_pretrained("""openmmlab/upernet-swin-tiny""" )
_snake_case = UperNetForSemanticSegmentation.from_pretrained("""openmmlab/upernet-swin-tiny""" ).to(UpperCAmelCase )
_snake_case = prepare_img()
_snake_case = processor(images=UpperCAmelCase , return_tensors="""pt""" ).to(UpperCAmelCase )
with torch.no_grad():
_snake_case = model(**UpperCAmelCase )
_snake_case = torch.Size((1, model.config.num_labels, 512, 512) )
self.assertEqual(outputs.logits.shape , UpperCAmelCase )
_snake_case = torch.tensor(
[[-7.5958, -7.5958, -7.4302], [-7.5958, -7.5958, -7.4302], [-7.4797, -7.4797, -7.3068]] ).to(UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , UpperCAmelCase , atol=1e-4 ) )
def lowercase (self ) -> Any:
_snake_case = AutoImageProcessor.from_pretrained("""openmmlab/upernet-convnext-tiny""" )
_snake_case = UperNetForSemanticSegmentation.from_pretrained("""openmmlab/upernet-convnext-tiny""" ).to(UpperCAmelCase )
_snake_case = prepare_img()
_snake_case = processor(images=UpperCAmelCase , return_tensors="""pt""" ).to(UpperCAmelCase )
with torch.no_grad():
_snake_case = model(**UpperCAmelCase )
_snake_case = torch.Size((1, model.config.num_labels, 512, 512) )
self.assertEqual(outputs.logits.shape , UpperCAmelCase )
_snake_case = torch.tensor(
[[-8.8110, -8.8110, -8.6521], [-8.8110, -8.8110, -8.6521], [-8.7746, -8.7746, -8.6130]] ).to(UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , UpperCAmelCase , atol=1e-4 ) )
| 341
| 1
|
'''simple docstring'''
from __future__ import annotations
import time
from math import sqrt
# 1 for manhattan, 0 for euclidean
UpperCamelCase__ = 0
UpperCamelCase__ = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
UpperCamelCase__ = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
UpperCamelCase__ = tuple[int, int]
class lowerCamelCase_ :
def __init__( self : int , _A : int , _A : int , _A : int , _A : int , _A : int , _A : Node | None , ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = pos_x
UpperCAmelCase__ : Dict = pos_y
UpperCAmelCase__ : List[str] = (pos_y, pos_x)
UpperCAmelCase__ : List[str] = goal_x
UpperCAmelCase__ : List[Any] = goal_y
UpperCAmelCase__ : int = g_cost
UpperCAmelCase__ : str = parent
UpperCAmelCase__ : Optional[Any] = self.calculate_heuristic()
UpperCAmelCase__ : str = self.g_cost + self.h_cost
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = self.pos_x - self.goal_x
UpperCAmelCase__ : Any = self.pos_y - self.goal_y
if HEURISTIC == 1:
return abs(_A ) + abs(_A )
else:
return sqrt(dy**2 + dx**2 )
def __lt__( self : str , _A : Node ):
'''simple docstring'''
return self.f_cost < other.f_cost
class lowerCamelCase_ :
def __init__( self : Dict , _A : TPosition , _A : TPosition ):
'''simple docstring'''
UpperCAmelCase__ : str = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , _A )
UpperCAmelCase__ : Optional[int] = Node(goal[1] , goal[0] , goal[1] , goal[0] , 99_999 , _A )
UpperCAmelCase__ : str = [self.start]
UpperCAmelCase__ : list[Node] = []
UpperCAmelCase__ : Any = False
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
UpperCAmelCase__ : Union[str, Any] = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
return self.retrace_path(_A )
self.closed_nodes.append(_A )
UpperCAmelCase__ : Dict = self.get_successors(_A )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(_A )
else:
# retrieve the best current path
UpperCAmelCase__ : str = self.open_nodes.pop(self.open_nodes.index(_A ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(_A )
else:
self.open_nodes.append(_A )
return [self.start.pos]
def lowercase_ ( self : Union[str, Any] , _A : Node ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = []
for action in delta:
UpperCAmelCase__ : int = parent.pos_x + action[1]
UpperCAmelCase__ : Tuple = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(_A ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
_A , _A , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , _A , ) )
return successors
def lowercase_ ( self : str , _A : Node | None ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = node
UpperCAmelCase__ : Optional[Any] = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
UpperCAmelCase__ : str = current_node.parent
path.reverse()
return path
class lowerCamelCase_ :
def __init__( self : Dict , _A : TPosition , _A : TPosition ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = AStar(_A , _A )
UpperCAmelCase__ : List[Any] = AStar(_A , _A )
UpperCAmelCase__ : int = False
def lowercase_ ( self : int ):
'''simple docstring'''
while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes:
self.fwd_astar.open_nodes.sort()
self.bwd_astar.open_nodes.sort()
UpperCAmelCase__ : str = self.fwd_astar.open_nodes.pop(0 )
UpperCAmelCase__ : int = self.bwd_astar.open_nodes.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
return self.retrace_bidirectional_path(
_A , _A )
self.fwd_astar.closed_nodes.append(_A )
self.bwd_astar.closed_nodes.append(_A )
UpperCAmelCase__ : List[Any] = current_bwd_node
UpperCAmelCase__ : Any = current_fwd_node
UpperCAmelCase__ : Any = {
self.fwd_astar: self.fwd_astar.get_successors(_A ),
self.bwd_astar: self.bwd_astar.get_successors(_A ),
}
for astar in [self.fwd_astar, self.bwd_astar]:
for child_node in successors[astar]:
if child_node in astar.closed_nodes:
continue
if child_node not in astar.open_nodes:
astar.open_nodes.append(_A )
else:
# retrieve the best current path
UpperCAmelCase__ : List[str] = astar.open_nodes.pop(
astar.open_nodes.index(_A ) )
if child_node.g_cost < better_node.g_cost:
astar.open_nodes.append(_A )
else:
astar.open_nodes.append(_A )
return [self.fwd_astar.start.pos]
def lowercase_ ( self : Any , _A : Node , _A : Node ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = self.fwd_astar.retrace_path(_A )
UpperCAmelCase__ : List[Any] = self.bwd_astar.retrace_path(_A )
bwd_path.pop()
bwd_path.reverse()
UpperCAmelCase__ : Dict = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
UpperCamelCase__ = (0, 0)
UpperCamelCase__ = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
UpperCamelCase__ = time.time()
UpperCamelCase__ = AStar(init, goal)
UpperCamelCase__ = a_star.search()
UpperCamelCase__ = time.time() - start_time
print(F"""AStar execution time = {end_time:f} seconds""")
UpperCamelCase__ = time.time()
UpperCamelCase__ = BidirectionalAStar(init, goal)
UpperCamelCase__ = time.time() - bd_start_time
print(F"""BidirectionalAStar execution time = {bd_end_time:f} seconds""")
| 299
|
'''simple docstring'''
from collections.abc import Iterable
from typing import Any
class lowerCamelCase_ :
def __init__( self : List[Any] , _A : int | None = None ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = value
UpperCAmelCase__ : Node | None = None # Added in order to delete a node easier
UpperCAmelCase__ : Node | None = None
UpperCAmelCase__ : Node | None = None
def __repr__( self : Optional[Any] ):
'''simple docstring'''
from pprint import pformat
if self.left is None and self.right is None:
return str(self.value )
return pformat({f"""{self.value}""": (self.left, self.right)} , indent=1 )
class lowerCamelCase_ :
def __init__( self : Optional[Any] , _A : Node | None = None ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = root
def __str__( self : Union[str, Any] ):
'''simple docstring'''
return str(self.root )
def lowercase_ ( self : str , _A : Node , _A : Node | None ):
'''simple docstring'''
if new_children is not None: # reset its kids
UpperCAmelCase__ : Dict = node.parent
if node.parent is not None: # reset its parent
if self.is_right(_A ): # If it is the right children
UpperCAmelCase__ : str = new_children
else:
UpperCAmelCase__ : Optional[int] = new_children
else:
UpperCAmelCase__ : Union[str, Any] = new_children
def lowercase_ ( self : Union[str, Any] , _A : Node ):
'''simple docstring'''
if node.parent and node.parent.right:
return node == node.parent.right
return False
def lowercase_ ( self : int ):
'''simple docstring'''
return self.root is None
def lowercase_ ( self : List[str] , _A : Any ):
'''simple docstring'''
UpperCAmelCase__ : Dict = Node(_A ) # create a new Node
if self.empty(): # if Tree is empty
UpperCAmelCase__ : List[Any] = new_node # set its root
else: # Tree is not empty
UpperCAmelCase__ : str = self.root # from root
if parent_node is None:
return
while True: # While we don't get to a leaf
if value < parent_node.value: # We go left
if parent_node.left is None:
UpperCAmelCase__ : Optional[Any] = new_node # We insert the new node in a leaf
break
else:
UpperCAmelCase__ : Any = parent_node.left
else:
if parent_node.right is None:
UpperCAmelCase__ : str = new_node
break
else:
UpperCAmelCase__ : List[str] = parent_node.right
UpperCAmelCase__ : Tuple = parent_node
def lowercase_ ( self : Optional[Any] , *_A : Tuple ):
'''simple docstring'''
for value in values:
self.__insert(_A )
def lowercase_ ( self : Union[str, Any] , _A : int ):
'''simple docstring'''
if self.empty():
raise IndexError('''Warning: Tree is empty! please use another.''' )
else:
UpperCAmelCase__ : List[Any] = self.root
# use lazy evaluation here to avoid NoneType Attribute error
while node is not None and node.value is not value:
UpperCAmelCase__ : str = node.left if value < node.value else node.right
return node
def lowercase_ ( self : List[Any] , _A : Node | None = None ):
'''simple docstring'''
if node is None:
if self.root is None:
return None
UpperCAmelCase__ : int = self.root
if not self.empty():
while node.right is not None:
UpperCAmelCase__ : Tuple = node.right
return node
def lowercase_ ( self : List[Any] , _A : Node | None = None ):
'''simple docstring'''
if node is None:
UpperCAmelCase__ : Optional[int] = self.root
if self.root is None:
return None
if not self.empty():
UpperCAmelCase__ : Optional[int] = self.root
while node.left is not None:
UpperCAmelCase__ : Tuple = node.left
return node
def lowercase_ ( self : List[Any] , _A : int ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = self.search(_A ) # Look for the node with that label
if node is not None:
if node.left is None and node.right is None: # If it has no children
self.__reassign_nodes(_A , _A )
elif node.left is None: # Has only right children
self.__reassign_nodes(_A , node.right )
elif node.right is None: # Has only left children
self.__reassign_nodes(_A , node.left )
else:
UpperCAmelCase__ : Union[str, Any] = self.get_max(
node.left ) # Gets the max value of the left branch
self.remove(tmp_node.value ) # type: ignore
UpperCAmelCase__ : Optional[Any] = (
tmp_node.value # type: ignore
) # Assigns the value to the node to delete and keep tree structure
def lowercase_ ( self : List[str] , _A : Node | None ):
'''simple docstring'''
if node is not None:
yield node # Preorder Traversal
yield from self.preorder_traverse(node.left )
yield from self.preorder_traverse(node.right )
def lowercase_ ( self : str , _A : Any=None ):
'''simple docstring'''
if traversal_function is None:
return self.preorder_traverse(self.root )
else:
return traversal_function(self.root )
def lowercase_ ( self : Dict , _A : list , _A : Node | None ):
'''simple docstring'''
if node:
self.inorder(_A , node.left )
arr.append(node.value )
self.inorder(_A , node.right )
def lowercase_ ( self : Optional[Any] , _A : int , _A : Node ):
'''simple docstring'''
UpperCAmelCase__ : list[int] = []
self.inorder(_A , _A ) # append all values to list using inorder traversal
return arr[k - 1]
def a__ ( lowerCAmelCase__ ) -> list[Node]:
UpperCAmelCase__ : Union[str, Any] = []
if curr_node is not None:
UpperCAmelCase__ : str = postorder(curr_node.left ) + postorder(curr_node.right ) + [curr_node]
return node_list
def a__ ( ) -> None:
UpperCAmelCase__ : List[Any] = (8, 3, 6, 1, 10, 14, 13, 4, 7)
UpperCAmelCase__ : str = BinarySearchTree()
for i in testlist:
t.insert(lowerCAmelCase__ )
# Prints all the elements of the list in order traversal
print(lowerCAmelCase__ )
if t.search(6 ) is not None:
print('''The value 6 exists''' )
else:
print('''The value 6 doesn\'t exist''' )
if t.search(-1 ) is not None:
print('''The value -1 exists''' )
else:
print('''The value -1 doesn\'t exist''' )
if not t.empty():
print('''Max Value: ''' , t.get_max().value ) # type: ignore
print('''Min Value: ''' , t.get_min().value ) # type: ignore
for i in testlist:
t.remove(lowerCAmelCase__ )
print(lowerCAmelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 299
| 1
|
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Any:
# "extended trapezoidal rule"
# int(f) = dx/2 * (f1 + 2f2 + ... + fn)
lowerCamelCase : str = (boundary[1] - boundary[0]) / steps
lowerCamelCase : List[str] = boundary[0]
lowerCamelCase : Union[str, Any] = boundary[1]
lowerCamelCase : int = make_points(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
lowerCamelCase : List[str] = 0.0
y += (h / 2.0) * f(_SCREAMING_SNAKE_CASE )
for i in x_i:
# print(i)
y += h * f(_SCREAMING_SNAKE_CASE )
y += (h / 2.0) * f(_SCREAMING_SNAKE_CASE )
return y
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> int:
lowerCamelCase : int = a + h
while x < (b - h):
yield x
lowerCamelCase : List[str] = x + h
def A ( _SCREAMING_SNAKE_CASE ) -> Optional[Any]: # enter your function here
lowerCamelCase : str = (x - 0) * (x - 0)
return y
def A ( ) -> int:
lowerCamelCase : int = 0.0 # Lower bound of integration
lowerCamelCase : int = 1.0 # Upper bound of integration
lowerCamelCase : Dict = 10.0 # define number of steps or resolution
lowerCamelCase : int = [a, b] # define boundary of integration
lowerCamelCase : str = method_a(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
print(f'''y = {y}''' )
if __name__ == "__main__":
main()
| 48
|
import argparse
import os
import re
SCREAMING_SNAKE_CASE__ : List[Any] = 'src/transformers/models/auto'
# re pattern that matches mapping introductions:
# SUPER_MODEL_MAPPING_NAMES = OrderedDict or SUPER_MODEL_MAPPING = OrderedDict
SCREAMING_SNAKE_CASE__ : Optional[int] = re.compile(r'[A-Z_]+_MAPPING(\s+|_[A-Z_]+\s+)=\s+OrderedDict')
# re pattern that matches identifiers in mappings
SCREAMING_SNAKE_CASE__ : Tuple = re.compile(r'\s*\(\s*"(\S[^"]+)"')
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = False ) -> int:
with open(_SCREAMING_SNAKE_CASE ,"r" ,encoding="utf-8" ) as f:
lowerCamelCase : List[Any] = f.read()
lowerCamelCase : str = content.split("\n" )
lowerCamelCase : int = []
lowerCamelCase : List[Any] = 0
while line_idx < len(_SCREAMING_SNAKE_CASE ):
if _re_intro_mapping.search(lines[line_idx] ) is not None:
lowerCamelCase : Optional[int] = len(re.search(r"^(\s*)\S" ,lines[line_idx] ).groups()[0] ) + 8
# Start of a new mapping!
while not lines[line_idx].startswith(" " * indent + "(" ):
new_lines.append(lines[line_idx] )
line_idx += 1
lowerCamelCase : Optional[int] = []
while lines[line_idx].strip() != "]":
# Blocks either fit in one line or not
if lines[line_idx].strip() == "(":
lowerCamelCase : List[str] = line_idx
while not lines[line_idx].startswith(" " * indent + ")" ):
line_idx += 1
blocks.append("\n".join(lines[start_idx : line_idx + 1] ) )
else:
blocks.append(lines[line_idx] )
line_idx += 1
# Sort blocks by their identifiers
lowerCamelCase : Union[str, Any] = sorted(_SCREAMING_SNAKE_CASE ,key=lambda _SCREAMING_SNAKE_CASE : _re_identifier.search(_SCREAMING_SNAKE_CASE ).groups()[0] )
new_lines += blocks
else:
new_lines.append(lines[line_idx] )
line_idx += 1
if overwrite:
with open(_SCREAMING_SNAKE_CASE ,"w" ,encoding="utf-8" ) as f:
f.write("\n".join(_SCREAMING_SNAKE_CASE ) )
elif "\n".join(_SCREAMING_SNAKE_CASE ) != content:
return True
def A ( _SCREAMING_SNAKE_CASE = False ) -> List[str]:
lowerCamelCase : str = [os.path.join(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) for f in os.listdir(_SCREAMING_SNAKE_CASE ) if f.endswith(".py" )]
lowerCamelCase : Union[str, Any] = [sort_auto_mapping(_SCREAMING_SNAKE_CASE ,overwrite=_SCREAMING_SNAKE_CASE ) for fname in fnames]
if not overwrite and any(_SCREAMING_SNAKE_CASE ):
lowerCamelCase : str = [f for f, d in zip(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) if d]
raise ValueError(
f'''The following files have auto mappings that need sorting: {", ".join(_SCREAMING_SNAKE_CASE )}. Run `make style` to fix'''
" this." )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : List[str] = argparse.ArgumentParser()
parser.add_argument('--check_only', action='store_true', help='Whether to only check or fix style.')
SCREAMING_SNAKE_CASE__ : List[str] = parser.parse_args()
sort_all_auto_mappings(not args.check_only)
| 48
| 1
|
from queue import PriorityQueue
from typing import Any
import numpy as np
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , ):
"""simple docstring"""
for nxt, d in graph[v]:
if nxt in visited_forward:
continue
lowercase__ : str = cst_fwd.get(lowerCamelCase__ , np.inf )
lowercase__ : int = cst_fwd[v] + d
if new_cost_f < old_cost_f:
queue.put((new_cost_f, nxt) )
lowercase__ : Union[str, Any] = new_cost_f
lowercase__ : Tuple = v
if nxt in visited_backward:
if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance:
lowercase__ : Optional[Any] = cst_fwd[v] + d + cst_bwd[nxt]
return shortest_distance
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : Optional[int] = -1
lowercase__ : Dict = set()
lowercase__ : int = set()
lowercase__ : List[Any] = {source: 0}
lowercase__ : List[Any] = {destination: 0}
lowercase__ : Optional[int] = {source: None}
lowercase__ : Dict = {destination: None}
lowercase__ : List[Any] = PriorityQueue()
lowercase__ : str = PriorityQueue()
lowercase__ : Optional[int] = np.inf
queue_forward.put((0, source) )
queue_backward.put((0, destination) )
if source == destination:
return 0
while not queue_forward.empty() and not queue_backward.empty():
lowercase__ , lowercase__ : int = queue_forward.get()
visited_forward.add(lowerCamelCase__ )
lowercase__ , lowercase__ : Any = queue_backward.get()
visited_backward.add(lowerCamelCase__ )
lowercase__ : List[Any] = pass_and_relaxation(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , )
lowercase__ : Union[str, Any] = pass_and_relaxation(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , )
if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance:
break
if shortest_distance != np.inf:
lowercase__ : Dict = shortest_distance
return shortest_path_distance
lowerCAmelCase__ = {
'''B''': [['''C''', 1]],
'''C''': [['''D''', 1]],
'''D''': [['''F''', 1]],
'''E''': [['''B''', 1], ['''G''', 2]],
'''F''': [],
'''G''': [['''F''', 1]],
}
lowerCAmelCase__ = {
'''B''': [['''E''', 1]],
'''C''': [['''B''', 1]],
'''D''': [['''C''', 1]],
'''F''': [['''D''', 1], ['''G''', 1]],
'''E''': [[None, np.inf]],
'''G''': [['''E''', 2]],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 361
|
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''BridgeTower/bridgetower-base''': '''https://huggingface.co/BridgeTower/bridgetower-base/blob/main/config.json''',
'''BridgeTower/bridgetower-base-itm-mlm''': (
'''https://huggingface.co/BridgeTower/bridgetower-base-itm-mlm/blob/main/config.json'''
),
}
class snake_case__(_UpperCamelCase ):
"""simple docstring"""
lowercase_ = """bridgetower_vision_model"""
def __init__( self : str , SCREAMING_SNAKE_CASE : Dict=768 , SCREAMING_SNAKE_CASE : Union[str, Any]=12 , SCREAMING_SNAKE_CASE : Dict=3 , SCREAMING_SNAKE_CASE : Dict=16 , SCREAMING_SNAKE_CASE : List[Any]=288 , SCREAMING_SNAKE_CASE : List[Any]=1 , SCREAMING_SNAKE_CASE : List[str]=1E-0_5 , SCREAMING_SNAKE_CASE : str=False , SCREAMING_SNAKE_CASE : Any=True , SCREAMING_SNAKE_CASE : Tuple=False , **SCREAMING_SNAKE_CASE : List[str] , ):
super().__init__(**SCREAMING_SNAKE_CASE )
lowercase__ : Union[str, Any] = hidden_size
lowercase__ : str = num_hidden_layers
lowercase__ : str = num_channels
lowercase__ : Optional[int] = patch_size
lowercase__ : Dict = image_size
lowercase__ : List[Any] = initializer_factor
lowercase__ : int = layer_norm_eps
lowercase__ : List[str] = stop_gradient
lowercase__ : Optional[int] = share_layernorm
lowercase__ : Optional[int] = remove_last_layer
@classmethod
def snake_case ( cls : Tuple , SCREAMING_SNAKE_CASE : Union[str, os.PathLike] , **SCREAMING_SNAKE_CASE : str ):
lowercase__ , lowercase__ : Optional[Any] = cls.get_config_dict(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
if config_dict.get("model_type" ) == "bridgetower":
lowercase__ : Optional[int] = config_dict["text_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
class snake_case__(_UpperCamelCase ):
"""simple docstring"""
lowercase_ = """bridgetower_text_model"""
def __init__( self : Tuple , SCREAMING_SNAKE_CASE : Union[str, Any]=50_265 , SCREAMING_SNAKE_CASE : Optional[int]=768 , SCREAMING_SNAKE_CASE : List[str]=12 , SCREAMING_SNAKE_CASE : Union[str, Any]=12 , SCREAMING_SNAKE_CASE : List[Any]=1 , SCREAMING_SNAKE_CASE : Any=3_072 , SCREAMING_SNAKE_CASE : Union[str, Any]="gelu" , SCREAMING_SNAKE_CASE : Optional[int]=0.1 , SCREAMING_SNAKE_CASE : List[str]=0.1 , SCREAMING_SNAKE_CASE : Tuple=514 , SCREAMING_SNAKE_CASE : List[str]=1 , SCREAMING_SNAKE_CASE : Dict=1E-0_5 , SCREAMING_SNAKE_CASE : List[str]=1 , SCREAMING_SNAKE_CASE : str=0 , SCREAMING_SNAKE_CASE : List[Any]=2 , SCREAMING_SNAKE_CASE : Optional[Any]="absolute" , SCREAMING_SNAKE_CASE : int=True , **SCREAMING_SNAKE_CASE : int , ):
super().__init__(**SCREAMING_SNAKE_CASE )
lowercase__ : Any = vocab_size
lowercase__ : Optional[Any] = hidden_size
lowercase__ : Optional[int] = num_hidden_layers
lowercase__ : Optional[int] = num_attention_heads
lowercase__ : int = hidden_act
lowercase__ : Optional[Any] = initializer_factor
lowercase__ : Dict = intermediate_size
lowercase__ : Tuple = hidden_dropout_prob
lowercase__ : List[Any] = attention_probs_dropout_prob
lowercase__ : Optional[int] = max_position_embeddings
lowercase__ : str = type_vocab_size
lowercase__ : str = layer_norm_eps
lowercase__ : Dict = position_embedding_type
lowercase__ : Optional[Any] = use_cache
lowercase__ : List[str] = pad_token_id
lowercase__ : Optional[Any] = bos_token_id
lowercase__ : Optional[Any] = eos_token_id
@classmethod
def snake_case ( cls : Optional[int] , SCREAMING_SNAKE_CASE : Union[str, os.PathLike] , **SCREAMING_SNAKE_CASE : str ):
lowercase__ , lowercase__ : Optional[Any] = cls.get_config_dict(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
if config_dict.get("model_type" ) == "bridgetower":
lowercase__ : List[Any] = config_dict["text_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
class snake_case__(_UpperCamelCase ):
"""simple docstring"""
lowercase_ = """bridgetower"""
def __init__( self : Tuple , SCREAMING_SNAKE_CASE : Optional[int]=True , SCREAMING_SNAKE_CASE : Dict="gelu" , SCREAMING_SNAKE_CASE : Any=768 , SCREAMING_SNAKE_CASE : Dict=1 , SCREAMING_SNAKE_CASE : Union[str, Any]=1E-0_5 , SCREAMING_SNAKE_CASE : Tuple=False , SCREAMING_SNAKE_CASE : List[Any]="add" , SCREAMING_SNAKE_CASE : Any=12 , SCREAMING_SNAKE_CASE : Any=6 , SCREAMING_SNAKE_CASE : Any=False , SCREAMING_SNAKE_CASE : List[Any]=False , SCREAMING_SNAKE_CASE : str=None , SCREAMING_SNAKE_CASE : Dict=None , **SCREAMING_SNAKE_CASE : List[Any] , ):
# TODO: remove this once the Hub files are updated.
lowercase__ : int = kwargs.pop("text_config_dict" , SCREAMING_SNAKE_CASE )
lowercase__ : int = kwargs.pop("vision_config_dict" , SCREAMING_SNAKE_CASE )
super().__init__(**SCREAMING_SNAKE_CASE )
lowercase__ : Dict = share_cross_modal_transformer_layers
lowercase__ : int = hidden_act
lowercase__ : int = hidden_size
lowercase__ : Optional[int] = initializer_factor
lowercase__ : Optional[int] = layer_norm_eps
lowercase__ : str = share_link_tower_layers
lowercase__ : Optional[int] = link_tower_type
lowercase__ : List[Any] = num_attention_heads
lowercase__ : Dict = num_hidden_layers
lowercase__ : Dict = tie_word_embeddings
lowercase__ : Optional[Any] = init_layernorm_from_vision_encoder
if text_config is None:
lowercase__ : Optional[Any] = {}
logger.info("`text_config` is `None`. Initializing the `BridgeTowerTextConfig` with default values." )
if vision_config is None:
lowercase__ : List[Any] = {}
logger.info("`vision_config` is `None`. Initializing the `BridgeTowerVisionConfig` with default values." )
lowercase__ : Any = BridgeTowerTextConfig(**SCREAMING_SNAKE_CASE )
lowercase__ : Dict = BridgeTowerVisionConfig(**SCREAMING_SNAKE_CASE )
@classmethod
def snake_case ( cls : Optional[int] , SCREAMING_SNAKE_CASE : BridgeTowerTextConfig , SCREAMING_SNAKE_CASE : BridgeTowerVisionConfig , **SCREAMING_SNAKE_CASE : List[str] ):
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **SCREAMING_SNAKE_CASE )
def snake_case ( self : Optional[int] ):
lowercase__ : List[str] = copy.deepcopy(self.__dict__ )
lowercase__ : Dict = self.text_config.to_dict()
lowercase__ : Tuple = self.vision_config.to_dict()
lowercase__ : Optional[Any] = self.__class__.model_type
return output
| 121
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.