code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
class UpperCAmelCase_ :
def __init__( self):
'''simple docstring'''
_lowerCAmelCase : List[str] = {}
def snake_case__ ( self):
'''simple docstring'''
print(self.vertex)
for i in self.vertex:
print(__a, " -> ", " -> ".join([str(__a) for j in self.vertex[i]]))
def snake_case__ ( self, __a, __a):
'''simple docstring'''
if from_vertex in self.vertex:
self.vertex[from_vertex].append(__a)
else:
# else make a new vertex
_lowerCAmelCase : Any = [to_vertex]
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[Any] = [False] * len(self.vertex)
# call the recursive helper function
for i in range(len(self.vertex)):
if not visited[i]:
self.dfs_recursive(__a, __a)
def snake_case__ ( self, __a, __a):
'''simple docstring'''
_lowerCAmelCase : List[Any] = True
print(__a, end=" ")
# Recur for all the vertices that are adjacent to this node
for i in self.vertex:
if not visited[i]:
self.dfs_recursive(__a, __a)
if __name__ == "__main__":
_snake_case = Graph()
g.add_edge(0, 1)
g.add_edge(0, 2)
g.add_edge(1, 2)
g.add_edge(2, 0)
g.add_edge(2, 3)
g.add_edge(3, 3)
g.print_graph()
print("DFS:")
g.dfs()
# OUTPUT:
# 0 -> 1 -> 2
# 1 -> 2
# 2 -> 0 -> 3
# 3 -> 3
# DFS:
# 0 1 2 3
| 500
|
from operator import delitem, getitem, setitem
import pytest
from data_structures.hashing.hash_map import HashMap
def _a ( UpperCAmelCase ) -> Any:
"""simple docstring"""
return getitem, k
def _a ( UpperCAmelCase , UpperCAmelCase ) -> Union[str, Any]:
"""simple docstring"""
return setitem, k, v
def _a ( UpperCAmelCase ) -> Any:
"""simple docstring"""
return delitem, k
def _a ( UpperCAmelCase , UpperCAmelCase , *UpperCAmelCase ) -> Optional[int]:
"""simple docstring"""
try:
return fun(UpperCAmelCase , *UpperCAmelCase ), None
except Exception as e:
return None, e
_A : List[str] = (
_set('key_a', 'val_a'),
_set('key_b', 'val_b'),
)
_A : Optional[Any] = [
_set('key_a', 'val_a'),
_set('key_a', 'val_b'),
]
_A : str = [
_set('key_a', 'val_a'),
_set('key_b', 'val_b'),
_del('key_a'),
_del('key_b'),
_set('key_a', 'val_a'),
_del('key_a'),
]
_A : Any = [
_get('key_a'),
_del('key_a'),
_set('key_a', 'val_a'),
_del('key_a'),
_del('key_a'),
_get('key_a'),
]
_A : Dict = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
]
_A : List[Any] = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
*[_del(x) for x in range(5)],
_set('key_a', 'val_b'),
]
@pytest.mark.parametrize(
'''operations''' , (
pytest.param(_add_items , id='''add items''' ),
pytest.param(_overwrite_items , id='''overwrite items''' ),
pytest.param(_delete_items , id='''delete items''' ),
pytest.param(_access_absent_items , id='''access absent items''' ),
pytest.param(_add_with_resize_up , id='''add with resize up''' ),
pytest.param(_add_with_resize_down , id='''add with resize down''' ),
) , )
def _a ( UpperCAmelCase ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase__ : Dict = HashMap(initial_block_size=4 )
lowerCamelCase__ : List[str] = {}
for _, (fun, *args) in enumerate(UpperCAmelCase ):
lowerCamelCase__ , lowerCamelCase__ : List[str] = _run_operation(UpperCAmelCase , UpperCAmelCase , *UpperCAmelCase )
lowerCamelCase__ , lowerCamelCase__ : Tuple = _run_operation(UpperCAmelCase , UpperCAmelCase , *UpperCAmelCase )
assert my_res == py_res
assert str(UpperCAmelCase ) == str(UpperCAmelCase )
assert set(UpperCAmelCase ) == set(UpperCAmelCase )
assert len(UpperCAmelCase ) == len(UpperCAmelCase )
assert set(my.items() ) == set(py.items() )
def _a ( ) -> Any:
"""simple docstring"""
def is_public(UpperCAmelCase ) -> bool:
return not name.startswith('''_''' )
lowerCamelCase__ : List[Any] = {name for name in dir({} ) if is_public(UpperCAmelCase )}
lowerCamelCase__ : Dict = {name for name in dir(HashMap() ) if is_public(UpperCAmelCase )}
assert dict_public_names > hash_public_names
| 315
| 0
|
import argparse
import hashlib # hashlib is only used inside the Test class
import struct
class __UpperCamelCase :
'''simple docstring'''
def __init__( self , UpperCAmelCase_ ):
lowerCAmelCase = data
lowerCAmelCase = [0X67_452_301, 0XEF_CDA_B89, 0X98_BAD_CFE, 0X10_325_476, 0XC3_D2E_1F0]
@staticmethod
def __snake_case ( UpperCAmelCase_ , UpperCAmelCase_ ):
return ((n << b) | (n >> (32 - b))) & 0XFF_FFF_FFF
def __snake_case ( self ):
lowerCAmelCase = b'''\x80''' + b'''\x00''' * (63 - (len(self.data ) + 8) % 64)
lowerCAmelCase = self.data + padding + struct.pack('''>Q''' , 8 * len(self.data ) )
return padded_data
def __snake_case ( self ):
return [
self.padded_data[i : i + 64] for i in range(0 , len(self.padded_data ) , 64 )
]
def __snake_case ( self , UpperCAmelCase_ ):
lowerCAmelCase = list(struct.unpack('''>16L''' , UpperCAmelCase_ ) ) + [0] * 64
for i in range(16 , 80 ):
lowerCAmelCase = self.rotate((w[i - 3] ^ w[i - 8] ^ w[i - 14] ^ w[i - 16]) , 1 )
return w
def __snake_case ( self ):
lowerCAmelCase = self.padding()
lowerCAmelCase = self.split_blocks()
for block in self.blocks:
lowerCAmelCase = self.expand_block(UpperCAmelCase_ )
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = self.h
for i in range(0 , 80 ):
if 0 <= i < 20:
lowerCAmelCase = (b & c) | ((~b) & d)
lowerCAmelCase = 0X5A_827_999
elif 20 <= i < 40:
lowerCAmelCase = b ^ c ^ d
lowerCAmelCase = 0X6E_D9E_BA1
elif 40 <= i < 60:
lowerCAmelCase = (b & c) | (b & d) | (c & d)
lowerCAmelCase = 0X8F_1BB_CDC
elif 60 <= i < 80:
lowerCAmelCase = b ^ c ^ d
lowerCAmelCase = 0XCA_62C_1D6
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = (
self.rotate(UpperCAmelCase_ , 5 ) + f + e + k + expanded_block[i] & 0XFF_FFF_FFF,
a,
self.rotate(UpperCAmelCase_ , 30 ),
c,
d,
)
lowerCAmelCase = (
self.h[0] + a & 0XFF_FFF_FFF,
self.h[1] + b & 0XFF_FFF_FFF,
self.h[2] + c & 0XFF_FFF_FFF,
self.h[3] + d & 0XFF_FFF_FFF,
self.h[4] + e & 0XFF_FFF_FFF,
)
return ("{:08x}" * 5).format(*self.h )
def UpperCAmelCase ( ):
lowerCAmelCase = B'''Test String'''
assert SHAaHash(_snake_case ).final_hash() == hashlib.shaa(_snake_case ).hexdigest() # noqa: S324
def UpperCAmelCase ( ):
lowerCAmelCase = argparse.ArgumentParser(description='''Process some strings or files''' )
parser.add_argument(
'''--string''' , dest='''input_string''' , default='''Hello World!! Welcome to Cryptography''' , help='''Hash the string''' , )
parser.add_argument('''--file''' , dest='''input_file''' , help='''Hash contents of a file''' )
lowerCAmelCase = parser.parse_args()
lowerCAmelCase = args.input_string
# In any case hash input should be a bytestring
if args.input_file:
with open(args.input_file , '''rb''' ) as f:
lowerCAmelCase = f.read()
else:
lowerCAmelCase = bytes(_snake_case , '''utf-8''' )
print(SHAaHash(_snake_case ).final_hash() )
if __name__ == "__main__":
main()
import doctest
doctest.testmod()
| 33
|
import random
import unittest
import numpy as np
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionImgaImgPipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class __UpperCamelCase ( __UpperCAmelCase , unittest.TestCase ):
'''simple docstring'''
__a : Any ="""hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline"""
def __snake_case ( self , UpperCAmelCase_=0 ):
lowerCAmelCase = floats_tensor((1, 3, 1_28, 1_28) , rng=random.Random(UpperCAmelCase_ ) )
lowerCAmelCase = np.random.RandomState(UpperCAmelCase_ )
lowerCAmelCase = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 3,
'''strength''': 0.75,
'''guidance_scale''': 7.5,
'''output_type''': '''numpy''',
}
return inputs
def __snake_case ( self ):
lowerCAmelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
lowerCAmelCase = self.get_dummy_inputs()
lowerCAmelCase = pipe(**UpperCAmelCase_ ).images
lowerCAmelCase = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 1_28, 1_28, 3)
lowerCAmelCase = np.array([0.69643, 0.58484, 0.50314, 0.58760, 0.55368, 0.59643, 0.51529, 0.41217, 0.49087] )
assert np.abs(image_slice - expected_slice ).max() < 1E-1
def __snake_case ( self ):
lowerCAmelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
lowerCAmelCase = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=UpperCAmelCase_ )
pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
lowerCAmelCase = self.get_dummy_inputs()
lowerCAmelCase = pipe(**UpperCAmelCase_ ).images
lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
lowerCAmelCase = np.array([0.61737, 0.54642, 0.53183, 0.54465, 0.52742, 0.60525, 0.49969, 0.40655, 0.48154] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def __snake_case ( self ):
lowerCAmelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
lowerCAmelCase = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
# warmup pass to apply optimizations
lowerCAmelCase = pipe(**self.get_dummy_inputs() )
lowerCAmelCase = self.get_dummy_inputs()
lowerCAmelCase = pipe(**UpperCAmelCase_ ).images
lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
lowerCAmelCase = np.array([0.52761, 0.59977, 0.49033, 0.49619, 0.54282, 0.50311, 0.47600, 0.40918, 0.45203] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def __snake_case ( self ):
lowerCAmelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
lowerCAmelCase = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
lowerCAmelCase = self.get_dummy_inputs()
lowerCAmelCase = pipe(**UpperCAmelCase_ ).images
lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
lowerCAmelCase = np.array([0.52911, 0.60004, 0.49229, 0.49805, 0.54502, 0.50680, 0.47777, 0.41028, 0.45304] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def __snake_case ( self ):
lowerCAmelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
lowerCAmelCase = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
lowerCAmelCase = self.get_dummy_inputs()
lowerCAmelCase = pipe(**UpperCAmelCase_ ).images
lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
lowerCAmelCase = np.array([0.52911, 0.60004, 0.49229, 0.49805, 0.54502, 0.50680, 0.47777, 0.41028, 0.45304] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def __snake_case ( self ):
lowerCAmelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
lowerCAmelCase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
lowerCAmelCase = self.get_dummy_inputs()
lowerCAmelCase = pipe(**UpperCAmelCase_ ).images
lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
lowerCAmelCase = np.array([0.65331, 0.58277, 0.48204, 0.56059, 0.53665, 0.56235, 0.50969, 0.40009, 0.46552] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
@nightly
@require_onnxruntime
@require_torch_gpu
class __UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@property
def __snake_case ( self ):
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def __snake_case ( self ):
lowerCAmelCase = ort.SessionOptions()
lowerCAmelCase = False
return options
def __snake_case ( self ):
lowerCAmelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
lowerCAmelCase = init_image.resize((7_68, 5_12) )
# using the PNDM scheduler by default
lowerCAmelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''onnx''' , safety_checker=UpperCAmelCase_ , feature_extractor=UpperCAmelCase_ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
lowerCAmelCase = '''A fantasy landscape, trending on artstation'''
lowerCAmelCase = np.random.RandomState(0 )
lowerCAmelCase = pipe(
prompt=UpperCAmelCase_ , image=UpperCAmelCase_ , strength=0.75 , guidance_scale=7.5 , num_inference_steps=10 , generator=UpperCAmelCase_ , output_type='''np''' , )
lowerCAmelCase = output.images
lowerCAmelCase = images[0, 2_55:2_58, 3_83:3_86, -1]
assert images.shape == (1, 5_12, 7_68, 3)
lowerCAmelCase = np.array([0.4909, 0.5059, 0.5372, 0.4623, 0.4876, 0.5049, 0.4820, 0.4956, 0.5019] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
def __snake_case ( self ):
lowerCAmelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
lowerCAmelCase = init_image.resize((7_68, 5_12) )
lowerCAmelCase = LMSDiscreteScheduler.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , subfolder='''scheduler''' , revision='''onnx''' )
lowerCAmelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , revision='''onnx''' , scheduler=UpperCAmelCase_ , safety_checker=UpperCAmelCase_ , feature_extractor=UpperCAmelCase_ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
lowerCAmelCase = '''A fantasy landscape, trending on artstation'''
lowerCAmelCase = np.random.RandomState(0 )
lowerCAmelCase = pipe(
prompt=UpperCAmelCase_ , image=UpperCAmelCase_ , strength=0.75 , guidance_scale=7.5 , num_inference_steps=20 , generator=UpperCAmelCase_ , output_type='''np''' , )
lowerCAmelCase = output.images
lowerCAmelCase = images[0, 2_55:2_58, 3_83:3_86, -1]
assert images.shape == (1, 5_12, 7_68, 3)
lowerCAmelCase = np.array([0.8043, 0.926, 0.9581, 0.8119, 0.8954, 0.913, 0.7209, 0.7463, 0.7431] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
| 33
| 1
|
"""simple docstring"""
import os
def lowerCAmelCase_ ( ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE : Dict = os.path.dirname(os.path.realpath(lowercase_ ) )
__SCREAMING_SNAKE_CASE : Optional[int] = os.path.join(lowercase_ , '''triangle.txt''' )
with open(lowercase_ ) as f:
__SCREAMING_SNAKE_CASE : Dict = f.readlines()
__SCREAMING_SNAKE_CASE : Optional[int] = []
for line in triangle:
__SCREAMING_SNAKE_CASE : Union[str, Any] = []
for number in line.strip().split(''' ''' ):
numbers_from_line.append(int(lowercase_ ) )
a.append(lowercase_ )
for i in range(1 , len(lowercase_ ) ):
for j in range(len(a[i] ) ):
__SCREAMING_SNAKE_CASE : Optional[int] = a[i - 1][j] if j != len(a[i - 1] ) else 0
__SCREAMING_SNAKE_CASE : Optional[Any] = a[i - 1][j - 1] if j > 0 else 0
a[i][j] += max(lowercase_ , lowercase_ )
return max(a[-1] )
if __name__ == "__main__":
print(solution())
| 674
|
"""simple docstring"""
import os
import pytest
import yaml
from datasets.features.features import Features, Value
from datasets.info import DatasetInfo, DatasetInfosDict
@pytest.mark.parametrize(
'''files''' , [
['''full:README.md''', '''dataset_infos.json'''],
['''empty:README.md''', '''dataset_infos.json'''],
['''dataset_infos.json'''],
['''full:README.md'''],
] , )
def lowerCAmelCase_ ( lowercase_ : List[str] , lowercase_ : str ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE : Optional[int] = tmp_path_factory.mktemp('''dset_infos_dir''' )
if "full:README.md" in files:
with open(dataset_infos_dir / '''README.md''' , '''w''' ) as f:
f.write('''---\ndataset_info:\n dataset_size: 42\n---''' )
if "empty:README.md" in files:
with open(dataset_infos_dir / '''README.md''' , '''w''' ) as f:
f.write('''''' )
# we want to support dataset_infos.json for backward compatibility
if "dataset_infos.json" in files:
with open(dataset_infos_dir / '''dataset_infos.json''' , '''w''' ) as f:
f.write('''{"default": {"dataset_size": 42}}''' )
__SCREAMING_SNAKE_CASE : Optional[Any] = DatasetInfosDict.from_directory(lowercase_ )
assert dataset_infos
assert dataset_infos["default"].dataset_size == 42
@pytest.mark.parametrize(
'''dataset_info''' , [
DatasetInfo(),
DatasetInfo(
description='''foo''' , features=Features({'''a''': Value('''int32''' )} ) , builder_name='''builder''' , config_name='''config''' , version='''1.0.0''' , splits=[{'''name''': '''train'''}] , download_size=42 , ),
] , )
def lowerCAmelCase_ ( lowercase_ : Any , lowercase_ : DatasetInfo ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE : List[str] = str(lowercase_ )
dataset_info.write_to_directory(lowercase_ )
__SCREAMING_SNAKE_CASE : Dict = DatasetInfo.from_directory(lowercase_ )
assert dataset_info == reloaded
assert os.path.exists(os.path.join(lowercase_ , '''dataset_info.json''' ) )
def lowerCAmelCase_ ( ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE : Tuple = DatasetInfo(
description='''foo''' , citation='''bar''' , homepage='''https://foo.bar''' , license='''CC0''' , features=Features({'''a''': Value('''int32''' )} ) , post_processed={} , supervised_keys=() , task_templates=[] , builder_name='''builder''' , config_name='''config''' , version='''1.0.0''' , splits=[{'''name''': '''train''', '''num_examples''': 42}] , download_checksums={} , download_size=1337 , post_processing_size=442 , dataset_size=1234 , size_in_bytes=1337 + 442 + 1234 , )
__SCREAMING_SNAKE_CASE : Optional[int] = dataset_info._to_yaml_dict()
assert sorted(lowercase_ ) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML )
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
assert key in dataset_info_yaml_dict
assert isinstance(dataset_info_yaml_dict[key] , (list, dict, int, str) )
__SCREAMING_SNAKE_CASE : int = yaml.safe_dump(lowercase_ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = yaml.safe_load(lowercase_ )
assert dataset_info_yaml_dict == reloaded
def lowerCAmelCase_ ( ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE : Union[str, Any] = DatasetInfo()
__SCREAMING_SNAKE_CASE : Union[str, Any] = dataset_info._to_yaml_dict()
assert dataset_info_yaml_dict == {}
@pytest.mark.parametrize(
'''dataset_infos_dict''' , [
DatasetInfosDict(),
DatasetInfosDict({'''default''': DatasetInfo()} ),
DatasetInfosDict({'''my_config_name''': DatasetInfo()} ),
DatasetInfosDict(
{
'''default''': DatasetInfo(
description='''foo''' , features=Features({'''a''': Value('''int32''' )} ) , builder_name='''builder''' , config_name='''config''' , version='''1.0.0''' , splits=[{'''name''': '''train'''}] , download_size=42 , )
} ),
DatasetInfosDict(
{
'''v1''': DatasetInfo(dataset_size=42 ),
'''v2''': DatasetInfo(dataset_size=1337 ),
} ),
] , )
def lowerCAmelCase_ ( lowercase_ : List[Any] , lowercase_ : DatasetInfosDict ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE : Optional[int] = str(lowercase_ )
dataset_infos_dict.write_to_directory(lowercase_ )
__SCREAMING_SNAKE_CASE : Optional[Any] = DatasetInfosDict.from_directory(lowercase_ )
# the config_name of the dataset_infos_dict take over the attribute
for config_name, dataset_info in dataset_infos_dict.items():
__SCREAMING_SNAKE_CASE : Optional[int] = config_name
# the yaml representation doesn't include fields like description or citation
# so we just test that we can recover what we can from the yaml
__SCREAMING_SNAKE_CASE : Tuple = DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict() )
assert dataset_infos_dict == reloaded
if dataset_infos_dict:
assert os.path.exists(os.path.join(lowercase_ , '''README.md''' ) )
| 674
| 1
|
import inspect
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
A : List[str] = "src/transformers"
# This is to make sure the transformers module imported is the one in the repo.
A : Any = direct_transformers_import(PATH_TO_TRANSFORMERS)
A : str = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
A : int = re.compile(R"\[(.+?)\]\((https://huggingface\.co/.+?)\)")
A : Union[str, Any] = {
"DecisionTransformerConfig",
"EncoderDecoderConfig",
"MusicgenConfig",
"RagConfig",
"SpeechEncoderDecoderConfig",
"TimmBackboneConfig",
"VisionEncoderDecoderConfig",
"VisionTextDualEncoderConfig",
"LlamaConfig",
}
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = None
# source code of `config_class`
__lowerCAmelCase = inspect.getsource(lowercase__ )
__lowerCAmelCase = _re_checkpoint.findall(lowercase__ )
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
for ckpt_name, ckpt_link in checkpoints:
# allow the link to end with `/`
if ckpt_link.endswith("/" ):
__lowerCAmelCase = ckpt_link[:-1]
# verify the checkpoint name corresponds to the checkpoint link
__lowerCAmelCase = f"https://huggingface.co/{ckpt_name}"
if ckpt_link == ckpt_link_from_name:
__lowerCAmelCase = ckpt_name
break
return checkpoint
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase = []
for config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in config_class.__module__:
continue
__lowerCAmelCase = get_checkpoint_from_config_class(lowercase__ )
__lowerCAmelCase = config_class.__name__
if checkpoint is None and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(lowercase__ )
if len(lowercase__ ) > 0:
__lowerCAmelCase = "\n".join(sorted(lowercase__ ) )
raise ValueError(f"The following configurations don't contain any valid checkpoint:\n{message}" )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 719
|
"""simple docstring"""
import torch
from torch import nn
from transformers import CLIPPreTrainedModel, CLIPVisionModel
from ...models.attention import BasicTransformerBlock
from ...utils import logging
A : Dict = logging.get_logger(__name__) # pylint: disable=invalid-name
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
def __init__( self , __a , __a=7_68 ):
super().__init__(__a )
__lowerCAmelCase = proj_size
__lowerCAmelCase = CLIPVisionModel(__a )
__lowerCAmelCase = PaintByExampleMapper(__a )
__lowerCAmelCase = nn.LayerNorm(config.hidden_size )
__lowerCAmelCase = nn.Linear(config.hidden_size , self.proj_size )
# uncondition for scaling
__lowerCAmelCase = nn.Parameter(torch.randn((1, 1, self.proj_size) ) )
def snake_case ( self , __a , __a=False ):
__lowerCAmelCase = self.model(pixel_values=__a )
__lowerCAmelCase = clip_output.pooler_output
__lowerCAmelCase = self.mapper(latent_states[:, None] )
__lowerCAmelCase = self.final_layer_norm(__a )
__lowerCAmelCase = self.proj_out(__a )
if return_uncond_vector:
return latent_states, self.uncond_vector
return latent_states
class _UpperCamelCase ( nn.Module ):
'''simple docstring'''
def __init__( self , __a ):
super().__init__()
__lowerCAmelCase = (config.num_hidden_layers + 1) // 5
__lowerCAmelCase = config.hidden_size
__lowerCAmelCase = 1
__lowerCAmelCase = nn.ModuleList(
[
BasicTransformerBlock(__a , __a , __a , activation_fn="gelu" , attention_bias=__a )
for _ in range(__a )
] )
def snake_case ( self , __a ):
for block in self.blocks:
__lowerCAmelCase = block(__a )
return hidden_states
| 282
| 0
|
'''simple docstring'''
from typing import Optional
from torch import nn
from .transformer_ad import TransformeraDModel, TransformeraDModelOutput
class a__ ( nn.Module ):
def __init__(self : str, __UpperCAmelCase : int = 16, __UpperCAmelCase : int = 88, __UpperCAmelCase : Optional[int] = None, __UpperCAmelCase : int = 1, __UpperCAmelCase : float = 0.0, __UpperCAmelCase : int = 32, __UpperCAmelCase : Optional[int] = None, __UpperCAmelCase : bool = False, __UpperCAmelCase : Optional[int] = None, __UpperCAmelCase : Optional[int] = None, __UpperCAmelCase : str = "geglu", __UpperCAmelCase : Optional[int] = None, ) -> Tuple:
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE : Tuple = nn.ModuleList(
[
TransformeraDModel(
num_attention_heads=__UpperCAmelCase, attention_head_dim=__UpperCAmelCase, in_channels=__UpperCAmelCase, num_layers=__UpperCAmelCase, dropout=__UpperCAmelCase, norm_num_groups=__UpperCAmelCase, cross_attention_dim=__UpperCAmelCase, attention_bias=__UpperCAmelCase, sample_size=__UpperCAmelCase, num_vector_embeds=__UpperCAmelCase, activation_fn=__UpperCAmelCase, num_embeds_ada_norm=__UpperCAmelCase, )
for _ in range(2 )
] )
# Variables that can be set by a pipeline:
# The ratio of transformer1 to transformer2's output states to be combined during inference
SCREAMING_SNAKE_CASE : int = 0.5
# The shape of `encoder_hidden_states` is expected to be
# `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)`
SCREAMING_SNAKE_CASE : Dict = [77, 257]
# Which transformer to use to encode which condition.
# E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])`
SCREAMING_SNAKE_CASE : int = [1, 0]
def lowercase__ (self : Optional[Any], __UpperCAmelCase : int, __UpperCAmelCase : Dict, __UpperCAmelCase : List[Any]=None, __UpperCAmelCase : Optional[int]=None, __UpperCAmelCase : str=None, __UpperCAmelCase : bool = True, ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = hidden_states
SCREAMING_SNAKE_CASE : Optional[Any] = []
SCREAMING_SNAKE_CASE : Dict = 0
# attention_mask is not used yet
for i in range(2 ):
# for each of the two transformers, pass the corresponding condition tokens
SCREAMING_SNAKE_CASE : List[str] = encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]]
SCREAMING_SNAKE_CASE : int = self.transformer_index_for_condition[i]
SCREAMING_SNAKE_CASE : List[Any] = self.transformers[transformer_index](
__UpperCAmelCase, encoder_hidden_states=__UpperCAmelCase, timestep=__UpperCAmelCase, cross_attention_kwargs=__UpperCAmelCase, return_dict=__UpperCAmelCase, )[0]
encoded_states.append(encoded_state - input_states )
tokens_start += self.condition_lengths[i]
SCREAMING_SNAKE_CASE : Tuple = encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio)
SCREAMING_SNAKE_CASE : str = output_states + input_states
if not return_dict:
return (output_states,)
return TransformeraDModelOutput(sample=__UpperCAmelCase )
| 507
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_lowerCAmelCase = {
'configuration_mobilebert': [
'MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'MobileBertConfig',
'MobileBertOnnxConfig',
],
'tokenization_mobilebert': ['MobileBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = ['MobileBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = [
'MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'MobileBertForMaskedLM',
'MobileBertForMultipleChoice',
'MobileBertForNextSentencePrediction',
'MobileBertForPreTraining',
'MobileBertForQuestionAnswering',
'MobileBertForSequenceClassification',
'MobileBertForTokenClassification',
'MobileBertLayer',
'MobileBertModel',
'MobileBertPreTrainedModel',
'load_tf_weights_in_mobilebert',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = [
'TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFMobileBertForMaskedLM',
'TFMobileBertForMultipleChoice',
'TFMobileBertForNextSentencePrediction',
'TFMobileBertForPreTraining',
'TFMobileBertForQuestionAnswering',
'TFMobileBertForSequenceClassification',
'TFMobileBertForTokenClassification',
'TFMobileBertMainLayer',
'TFMobileBertModel',
'TFMobileBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mobilebert import (
MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileBertConfig,
MobileBertOnnxConfig,
)
from .tokenization_mobilebert import MobileBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mobilebert_fast import MobileBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilebert import (
MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertLayer,
MobileBertModel,
MobileBertPreTrainedModel,
load_tf_weights_in_mobilebert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilebert import (
TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertMainLayer,
TFMobileBertModel,
TFMobileBertPreTrainedModel,
)
else:
import sys
_lowerCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 264
| 0
|
'''simple docstring'''
from __future__ import annotations
def UpperCamelCase_ ( A__ ): # This function is recursive
a_ = len(A__ )
# If the array contains only one element, we return it (it's the stop condition of
# recursion)
if array_length <= 1:
return array
# Else
a_ = array[0]
a_ = False
a_ = 1
a_ = []
while not is_found and i < array_length:
if array[i] < pivot:
a_ = True
a_ = [element for element in array[i:] if element >= array[i]]
a_ = longest_subsequence(A__ )
if len(A__ ) > len(A__ ):
a_ = temp_array
else:
i += 1
a_ = [element for element in array[1:] if element >= pivot]
a_ = [pivot, *longest_subsequence(A__ )]
if len(A__ ) > len(A__ ):
return temp_array
else:
return longest_subseq
if __name__ == "__main__":
import doctest
doctest.testmod()
| 705
|
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer
@dataclass
class a_ ( UpperCamelCase__ ):
lowerCamelCase__ : torch.FloatTensor
class a_ ( UpperCamelCase__ , UpperCamelCase__ ):
@register_to_config
def __init__( self , UpperCAmelCase = 3 , UpperCAmelCase = 3 , UpperCAmelCase = ("DownEncoderBlock2D",) , UpperCAmelCase = ("UpDecoderBlock2D",) , UpperCAmelCase = (64,) , UpperCAmelCase = 1 , UpperCAmelCase = "silu" , UpperCAmelCase = 3 , UpperCAmelCase = 32 , UpperCAmelCase = 2_56 , UpperCAmelCase = 32 , UpperCAmelCase = None , UpperCAmelCase = 0.1_82_15 , UpperCAmelCase = "group" , ):
super().__init__()
# pass init params to Encoder
a_ = Encoder(
in_channels=UpperCAmelCase , out_channels=UpperCAmelCase , down_block_types=UpperCAmelCase , block_out_channels=UpperCAmelCase , layers_per_block=UpperCAmelCase , act_fn=UpperCAmelCase , norm_num_groups=UpperCAmelCase , double_z=UpperCAmelCase , )
a_ = vq_embed_dim if vq_embed_dim is not None else latent_channels
a_ = nn.Convad(UpperCAmelCase , UpperCAmelCase , 1 )
a_ = VectorQuantizer(UpperCAmelCase , UpperCAmelCase , beta=0.25 , remap=UpperCAmelCase , sane_index_shape=UpperCAmelCase )
a_ = nn.Convad(UpperCAmelCase , UpperCAmelCase , 1 )
# pass init params to Decoder
a_ = Decoder(
in_channels=UpperCAmelCase , out_channels=UpperCAmelCase , up_block_types=UpperCAmelCase , block_out_channels=UpperCAmelCase , layers_per_block=UpperCAmelCase , act_fn=UpperCAmelCase , norm_num_groups=UpperCAmelCase , norm_type=UpperCAmelCase , )
@apply_forward_hook
def lowerCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase = True ):
a_ = self.encoder(UpperCAmelCase )
a_ = self.quant_conv(UpperCAmelCase )
if not return_dict:
return (h,)
return VQEncoderOutput(latents=UpperCAmelCase )
@apply_forward_hook
def lowerCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase = False , UpperCAmelCase = True ):
# also go through quantization layer
if not force_not_quantize:
a_ , a_ , a_ = self.quantize(UpperCAmelCase )
else:
a_ = h
a_ = self.post_quant_conv(UpperCAmelCase )
a_ = self.decoder(UpperCAmelCase , quant if self.config.norm_type == """spatial""" else None )
if not return_dict:
return (dec,)
return DecoderOutput(sample=UpperCAmelCase )
def lowerCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase = True ):
a_ = sample
a_ = self.encode(UpperCAmelCase ).latents
a_ = self.decode(UpperCAmelCase ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=UpperCAmelCase )
| 511
| 0
|
"""simple docstring"""
import gc
import unittest
from transformers import CTRLConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
)
class _UpperCAmelCase :
def __init__( self : List[str] , _lowercase : Union[str, Any] , _lowercase : Tuple=14 , _lowercase : List[Any]=7 , _lowercase : Optional[Any]=True , _lowercase : str=True , _lowercase : Union[str, Any]=True , _lowercase : Optional[Any]=True , _lowercase : List[str]=True , _lowercase : Union[str, Any]=99 , _lowercase : Any=32 , _lowercase : Union[str, Any]=5 , _lowercase : List[Any]=4 , _lowercase : Optional[Any]=37 , _lowercase : Dict="gelu" , _lowercase : Tuple=0.1 , _lowercase : List[str]=0.1 , _lowercase : Optional[Any]=5_12 , _lowercase : str=16 , _lowercase : List[Any]=2 , _lowercase : Union[str, Any]=0.02 , _lowercase : int=3 , _lowercase : List[Any]=4 , _lowercase : Union[str, Any]=None , ):
__UpperCAmelCase = parent
__UpperCAmelCase = batch_size
__UpperCAmelCase = seq_length
__UpperCAmelCase = is_training
__UpperCAmelCase = use_token_type_ids
__UpperCAmelCase = use_input_mask
__UpperCAmelCase = use_labels
__UpperCAmelCase = use_mc_token_ids
__UpperCAmelCase = vocab_size
__UpperCAmelCase = hidden_size
__UpperCAmelCase = num_hidden_layers
__UpperCAmelCase = num_attention_heads
__UpperCAmelCase = intermediate_size
__UpperCAmelCase = hidden_act
__UpperCAmelCase = hidden_dropout_prob
__UpperCAmelCase = attention_probs_dropout_prob
__UpperCAmelCase = max_position_embeddings
__UpperCAmelCase = type_vocab_size
__UpperCAmelCase = type_sequence_label_size
__UpperCAmelCase = initializer_range
__UpperCAmelCase = num_labels
__UpperCAmelCase = num_choices
__UpperCAmelCase = scope
__UpperCAmelCase = self.vocab_size - 1
def a ( self : Optional[Any] ):
__UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCAmelCase = None
if self.use_input_mask:
__UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
__UpperCAmelCase = None
if self.use_token_type_ids:
__UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__UpperCAmelCase = None
if self.use_mc_token_ids:
__UpperCAmelCase = ids_tensor([self.batch_size, self.num_choices] , self.seq_length )
__UpperCAmelCase = None
__UpperCAmelCase = None
__UpperCAmelCase = None
if self.use_labels:
__UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
__UpperCAmelCase = self.get_config()
__UpperCAmelCase = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
token_type_ids,
mc_token_ids,
sequence_labels,
token_labels,
choice_labels,
)
def a ( self : Tuple ):
return CTRLConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
def a ( self : str , _lowercase : str , _lowercase : Optional[Any] , _lowercase : str , _lowercase : Tuple , _lowercase : int , *_lowercase : Tuple ):
__UpperCAmelCase = CTRLModel(config=_lowercase )
model.to(_lowercase )
model.eval()
model(_lowercase , token_type_ids=_lowercase , head_mask=_lowercase )
model(_lowercase , token_type_ids=_lowercase )
__UpperCAmelCase = model(_lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(len(result.past_key_values ) , config.n_layer )
def a ( self : Any , _lowercase : Any , _lowercase : List[str] , _lowercase : str , _lowercase : Optional[Any] , _lowercase : Any , *_lowercase : str ):
__UpperCAmelCase = CTRLLMHeadModel(_lowercase )
model.to(_lowercase )
model.eval()
__UpperCAmelCase = model(_lowercase , token_type_ids=_lowercase , labels=_lowercase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def a ( self : Optional[Any] ):
__UpperCAmelCase = self.prepare_config_and_inputs()
(
(
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) ,
) = config_and_inputs
__UpperCAmelCase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''head_mask''': head_mask}
return config, inputs_dict
def a ( self : Tuple , _lowercase : List[Any] , _lowercase : List[str] , _lowercase : Tuple , _lowercase : List[str] , *_lowercase : Optional[int] ):
__UpperCAmelCase = self.num_labels
__UpperCAmelCase = CTRLForSequenceClassification(_lowercase )
model.to(_lowercase )
model.eval()
__UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCAmelCase = model(_lowercase , token_type_ids=_lowercase , labels=_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
@require_torch
class _UpperCAmelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
a__ : List[str] = (CTRLModel, CTRLLMHeadModel, CTRLForSequenceClassification) if is_torch_available() else ()
a__ : int = (CTRLLMHeadModel,) if is_torch_available() else ()
a__ : str = (
{
"feature-extraction": CTRLModel,
"text-classification": CTRLForSequenceClassification,
"text-generation": CTRLLMHeadModel,
"zero-shot": CTRLForSequenceClassification,
}
if is_torch_available()
else {}
)
a__ : List[str] = True
a__ : Dict = False
a__ : int = False
def a ( self : Optional[int] , _lowercase : Union[str, Any] , _lowercase : List[Any] , _lowercase : Tuple , _lowercase : Union[str, Any] , _lowercase : Optional[Any] ):
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `CTRLConfig` was never used in pipeline tests, either because of a missing checkpoint or because a tiny
# config could not be created.
return True
return False
def a ( self : List[Any] ):
__UpperCAmelCase = CTRLModelTester(self )
__UpperCAmelCase = ConfigTester(self , config_class=_lowercase , n_embd=37 )
def a ( self : List[Any] ):
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
def a ( self : Tuple ):
self.config_tester.run_common_tests()
def a ( self : Dict ):
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_ctrl_model(*_lowercase )
def a ( self : Union[str, Any] ):
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*_lowercase )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def a ( self : Tuple ):
pass
@slow
def a ( self : List[Any] ):
for model_name in CTRL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCAmelCase = CTRLModel.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
@unittest.skip('''The model doesn\'t support left padding''' ) # and it's not used enough to be worth fixing :)
def a ( self : Union[str, Any] ):
pass
@require_torch
class _UpperCAmelCase ( unittest.TestCase ):
def a ( self : Optional[Any] ):
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
@slow
def a ( self : Union[str, Any] ):
__UpperCAmelCase = CTRLLMHeadModel.from_pretrained('''ctrl''' )
model.to(_lowercase )
__UpperCAmelCase = torch.tensor(
[[1_18_59, 0, 16_11, 8]] , dtype=torch.long , device=_lowercase ) # Legal the president is
__UpperCAmelCase = [
1_18_59,
0,
16_11,
8,
5,
1_50,
2_64_49,
2,
19,
3_48,
4_69,
3,
25_95,
48,
2_07_40,
24_65_33,
24_65_33,
19,
30,
5,
] # Legal the president is a good guy and I don't want to lose my job. \n \n I have a
__UpperCAmelCase = model.generate(_lowercase , do_sample=_lowercase )
self.assertListEqual(output_ids[0].tolist() , _lowercase )
| 49
|
import random
def lowerCAmelCase_ ( lowercase: int , lowercase: float , lowercase: bool = False ) -> dict:
'''simple docstring'''
_UpperCamelCase: dict = {i: [] for i in range(lowercase )}
# if probability is greater or equal than 1, then generate a complete graph
if probability >= 1:
return complete_graph(lowercase )
# if probability is lower or equal than 0, then return a graph without edges
if probability <= 0:
return graph
# for each couple of nodes, add an edge from u to v
# if the number randomly generated is greater than probability probability
for i in range(lowercase ):
for j in range(i + 1 , lowercase ):
if random.random() < probability:
graph[i].append(lowercase )
if not directed:
# if the graph is undirected, add an edge in from j to i, either
graph[j].append(lowercase )
return graph
def lowerCAmelCase_ ( lowercase: int ) -> dict:
'''simple docstring'''
return {
i: [j for j in range(lowercase ) if i != j] for i in range(lowercase )
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 271
| 0
|
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.test_utils import execute_subprocess_async
def UpperCamelCase_ ( __SCREAMING_SNAKE_CASE=None ):
"""simple docstring"""
if subparsers is not None:
snake_case_ : Tuple = subparsers.add_parser("test" )
else:
snake_case_ : List[str] = argparse.ArgumentParser("Accelerate test command" )
parser.add_argument(
"--config_file", default=__SCREAMING_SNAKE_CASE, help=(
"The path to use to store the config file. Will default to a file named default_config.yaml in the cache "
"location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have "
"such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed "
"with 'huggingface'."
), )
if subparsers is not None:
parser.set_defaults(func=__SCREAMING_SNAKE_CASE )
return parser
def UpperCamelCase_ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
snake_case_ : Tuple = os.path.sep.join(__file__.split(os.path.sep )[:-2] + ["test_utils", "scripts", "test_script.py"] )
if args.config_file is None:
snake_case_ : Union[str, Any] = script_name
else:
snake_case_ : Tuple = f'--config_file={args.config_file} {script_name}'
snake_case_ : Optional[Any] = ["accelerate-launch"] + test_args.split()
snake_case_ : Union[str, Any] = execute_subprocess_async(__SCREAMING_SNAKE_CASE, env=os.environ.copy() )
if result.returncode == 0:
print("Test is a success! You are ready for your distributed training!" )
def UpperCamelCase_ ( ):
"""simple docstring"""
snake_case_ : Optional[int] = test_command_parser()
snake_case_ : Tuple = parser.parse_args()
test_command(__SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 92
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
a_ = {
"configuration_biogpt": ["BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP", "BioGptConfig"],
"tokenization_biogpt": ["BioGptTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
"BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST",
"BioGptForCausalLM",
"BioGptForTokenClassification",
"BioGptForSequenceClassification",
"BioGptModel",
"BioGptPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_biogpt import BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, BioGptConfig
from .tokenization_biogpt import BioGptTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_biogpt import (
BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST,
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 92
| 1
|
import contextlib
import csv
import json
import os
import sqlitea
import tarfile
import textwrap
import zipfile
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
import datasets
import datasets.config
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( ) -> Any:
SCREAMING_SNAKE_CASE_ = 10
SCREAMING_SNAKE_CASE_ = datasets.Features(
{
'tokens': datasets.Sequence(datasets.Value('string' ) ),
'labels': datasets.Sequence(datasets.ClassLabel(names=['negative', 'positive'] ) ),
'answers': datasets.Sequence(
{
'text': datasets.Value('string' ),
'answer_start': datasets.Value('int32' ),
} ),
'id': datasets.Value('int64' ),
} )
SCREAMING_SNAKE_CASE_ = datasets.Dataset.from_dict(
{
'tokens': [['foo'] * 5] * n,
'labels': [[1] * 5] * n,
'answers': [{'answer_start': [97], 'text': ['1976']}] * 10,
'id': list(range(__UpperCAmelCase ) ),
} , features=__UpperCAmelCase , )
return dataset
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( __UpperCAmelCase : List[str] , __UpperCAmelCase : Dict ) -> Tuple:
SCREAMING_SNAKE_CASE_ = str(tmp_path_factory.mktemp('data' ) / 'file.arrow' )
dataset.map(cache_file_name=__UpperCAmelCase )
return filename
# FILE_CONTENT + files
lowerCamelCase__ : Optional[Any] = '\\n Text data.\n Second line of data.'
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( __UpperCAmelCase : Dict ) -> str:
SCREAMING_SNAKE_CASE_ = tmp_path_factory.mktemp('data' ) / 'file.txt'
SCREAMING_SNAKE_CASE_ = FILE_CONTENT
with open(__UpperCAmelCase , 'w' ) as f:
f.write(__UpperCAmelCase )
return filename
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( __UpperCAmelCase : str ) -> Any:
import bza
SCREAMING_SNAKE_CASE_ = tmp_path_factory.mktemp('data' ) / 'file.txt.bz2'
SCREAMING_SNAKE_CASE_ = bytes(__UpperCAmelCase , 'utf-8' )
with bza.open(__UpperCAmelCase , 'wb' ) as f:
f.write(__UpperCAmelCase )
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( __UpperCAmelCase : str ) -> List[Any]:
import gzip
SCREAMING_SNAKE_CASE_ = str(tmp_path_factory.mktemp('data' ) / 'file.txt.gz' )
SCREAMING_SNAKE_CASE_ = bytes(__UpperCAmelCase , 'utf-8' )
with gzip.open(__UpperCAmelCase , 'wb' ) as f:
f.write(__UpperCAmelCase )
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( __UpperCAmelCase : str ) -> Any:
if datasets.config.LZ4_AVAILABLE:
import lza.frame
SCREAMING_SNAKE_CASE_ = tmp_path_factory.mktemp('data' ) / 'file.txt.lz4'
SCREAMING_SNAKE_CASE_ = bytes(__UpperCAmelCase , 'utf-8' )
with lza.frame.open(__UpperCAmelCase , 'wb' ) as f:
f.write(__UpperCAmelCase )
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( __UpperCAmelCase : str , __UpperCAmelCase : List[str] ) -> List[Any]:
if datasets.config.PY7ZR_AVAILABLE:
import pyazr
SCREAMING_SNAKE_CASE_ = tmp_path_factory.mktemp('data' ) / 'file.txt.7z'
with pyazr.SevenZipFile(__UpperCAmelCase , 'w' ) as archive:
archive.write(__UpperCAmelCase , arcname=os.path.basename(__UpperCAmelCase ) )
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( __UpperCAmelCase : List[Any] , __UpperCAmelCase : Union[str, Any] ) -> Optional[int]:
import tarfile
SCREAMING_SNAKE_CASE_ = tmp_path_factory.mktemp('data' ) / 'file.txt.tar'
with tarfile.TarFile(__UpperCAmelCase , 'w' ) as f:
f.add(__UpperCAmelCase , arcname=os.path.basename(__UpperCAmelCase ) )
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( __UpperCAmelCase : Union[str, Any] ) -> Optional[Any]:
import lzma
SCREAMING_SNAKE_CASE_ = tmp_path_factory.mktemp('data' ) / 'file.txt.xz'
SCREAMING_SNAKE_CASE_ = bytes(__UpperCAmelCase , 'utf-8' )
with lzma.open(__UpperCAmelCase , 'wb' ) as f:
f.write(__UpperCAmelCase )
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( __UpperCAmelCase : Optional[int] , __UpperCAmelCase : List[str] ) -> Union[str, Any]:
import zipfile
SCREAMING_SNAKE_CASE_ = tmp_path_factory.mktemp('data' ) / 'file.txt.zip'
with zipfile.ZipFile(__UpperCAmelCase , 'w' ) as f:
f.write(__UpperCAmelCase , arcname=os.path.basename(__UpperCAmelCase ) )
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( __UpperCAmelCase : List[Any] ) -> str:
if datasets.config.ZSTANDARD_AVAILABLE:
import zstandard as zstd
SCREAMING_SNAKE_CASE_ = tmp_path_factory.mktemp('data' ) / 'file.txt.zst'
SCREAMING_SNAKE_CASE_ = bytes(__UpperCAmelCase , 'utf-8' )
with zstd.open(__UpperCAmelCase , 'wb' ) as f:
f.write(__UpperCAmelCase )
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( __UpperCAmelCase : Union[str, Any] ) -> Any:
SCREAMING_SNAKE_CASE_ = tmp_path_factory.mktemp('data' ) / 'file.xml'
SCREAMING_SNAKE_CASE_ = textwrap.dedent(
'\\n <?xml version="1.0" encoding="UTF-8" ?>\n <tmx version="1.4">\n <header segtype="sentence" srclang="ca" />\n <body>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 1</seg></tuv>\n <tuv xml:lang="en"><seg>Content 1</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 2</seg></tuv>\n <tuv xml:lang="en"><seg>Content 2</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 3</seg></tuv>\n <tuv xml:lang="en"><seg>Content 3</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 4</seg></tuv>\n <tuv xml:lang="en"><seg>Content 4</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 5</seg></tuv>\n <tuv xml:lang="en"><seg>Content 5</seg></tuv>\n </tu>\n </body>\n </tmx>' )
with open(__UpperCAmelCase , 'w' ) as f:
f.write(__UpperCAmelCase )
return filename
lowerCamelCase__ : Dict = [
{'col_1': '0', 'col_2': 0, 'col_3': 0.0},
{'col_1': '1', 'col_2': 1, 'col_3': 1.0},
{'col_1': '2', 'col_2': 2, 'col_3': 2.0},
{'col_1': '3', 'col_2': 3, 'col_3': 3.0},
]
lowerCamelCase__ : Optional[Any] = [
{'col_1': '4', 'col_2': 4, 'col_3': 4.0},
{'col_1': '5', 'col_2': 5, 'col_3': 5.0},
]
lowerCamelCase__ : List[str] = {
'col_1': ['0', '1', '2', '3'],
'col_2': [0, 1, 2, 3],
'col_3': [0.0, 1.0, 2.0, 3.0],
}
lowerCamelCase__ : str = [
{'col_3': 0.0, 'col_1': '0', 'col_2': 0},
{'col_3': 1.0, 'col_1': '1', 'col_2': 1},
]
lowerCamelCase__ : Union[str, Any] = [
{'col_1': 's0', 'col_2': 0, 'col_3': 0.0},
{'col_1': 's1', 'col_2': 1, 'col_3': 1.0},
{'col_1': 's2', 'col_2': 2, 'col_3': 2.0},
{'col_1': 's3', 'col_2': 3, 'col_3': 3.0},
]
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( ) -> Any:
return DATA_DICT_OF_LISTS
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( __UpperCAmelCase : str ) -> List[str]:
SCREAMING_SNAKE_CASE_ = datasets.Dataset.from_dict(__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = str(tmp_path_factory.mktemp('data' ) / 'dataset.arrow' )
dataset.map(cache_file_name=__UpperCAmelCase )
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( __UpperCAmelCase : Dict ) -> List[str]:
SCREAMING_SNAKE_CASE_ = str(tmp_path_factory.mktemp('data' ) / 'dataset.sqlite' )
with contextlib.closing(sqlitea.connect(__UpperCAmelCase ) ) as con:
SCREAMING_SNAKE_CASE_ = con.cursor()
cur.execute('CREATE TABLE dataset(col_1 text, col_2 int, col_3 real)' )
for item in DATA:
cur.execute('INSERT INTO dataset(col_1, col_2, col_3) VALUES (?, ?, ?)' , tuple(item.values() ) )
con.commit()
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( __UpperCAmelCase : Optional[Any] ) -> Tuple:
SCREAMING_SNAKE_CASE_ = str(tmp_path_factory.mktemp('data' ) / 'dataset.csv' )
with open(__UpperCAmelCase , 'w' , newline='' ) as f:
SCREAMING_SNAKE_CASE_ = csv.DictWriter(__UpperCAmelCase , fieldnames=['col_1', 'col_2', 'col_3'] )
writer.writeheader()
for item in DATA:
writer.writerow(__UpperCAmelCase )
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( __UpperCAmelCase : Dict ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = str(tmp_path_factory.mktemp('data' ) / 'dataset2.csv' )
with open(__UpperCAmelCase , 'w' , newline='' ) as f:
SCREAMING_SNAKE_CASE_ = csv.DictWriter(__UpperCAmelCase , fieldnames=['col_1', 'col_2', 'col_3'] )
writer.writeheader()
for item in DATA:
writer.writerow(__UpperCAmelCase )
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( __UpperCAmelCase : str , __UpperCAmelCase : Any ) -> List[str]:
import bza
SCREAMING_SNAKE_CASE_ = tmp_path_factory.mktemp('data' ) / 'dataset.csv.bz2'
with open(__UpperCAmelCase , 'rb' ) as f:
SCREAMING_SNAKE_CASE_ = f.read()
# data = bytes(FILE_CONTENT, "utf-8")
with bza.open(__UpperCAmelCase , 'wb' ) as f:
f.write(__UpperCAmelCase )
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( __UpperCAmelCase : List[Any] , __UpperCAmelCase : Tuple , __UpperCAmelCase : Tuple ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = tmp_path_factory.mktemp('data' ) / 'dataset.csv.zip'
with zipfile.ZipFile(__UpperCAmelCase , 'w' ) as f:
f.write(__UpperCAmelCase , arcname=os.path.basename(__UpperCAmelCase ) )
f.write(__UpperCAmelCase , arcname=os.path.basename(__UpperCAmelCase ) )
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( __UpperCAmelCase : List[Any] , __UpperCAmelCase : Any , __UpperCAmelCase : Dict ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = tmp_path_factory.mktemp('data' ) / 'dataset.csv.zip'
with zipfile.ZipFile(__UpperCAmelCase , 'w' ) as f:
f.write(__UpperCAmelCase , arcname=os.path.basename(csv_path.replace('.csv' , '.CSV' ) ) )
f.write(__UpperCAmelCase , arcname=os.path.basename(csva_path.replace('.csv' , '.CSV' ) ) )
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( __UpperCAmelCase : Any , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : List[str] ) -> Any:
SCREAMING_SNAKE_CASE_ = tmp_path_factory.mktemp('data' ) / 'dataset_with_dir.csv.zip'
with zipfile.ZipFile(__UpperCAmelCase , 'w' ) as f:
f.write(__UpperCAmelCase , arcname=os.path.join('main_dir' , os.path.basename(__UpperCAmelCase ) ) )
f.write(__UpperCAmelCase , arcname=os.path.join('main_dir' , os.path.basename(__UpperCAmelCase ) ) )
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( __UpperCAmelCase : int ) -> int:
SCREAMING_SNAKE_CASE_ = str(tmp_path_factory.mktemp('data' ) / 'dataset.parquet' )
SCREAMING_SNAKE_CASE_ = pa.schema(
{
'col_1': pa.string(),
'col_2': pa.intaa(),
'col_3': pa.floataa(),
} )
with open(__UpperCAmelCase , 'wb' ) as f:
SCREAMING_SNAKE_CASE_ = pq.ParquetWriter(__UpperCAmelCase , schema=__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = pa.Table.from_pydict({k: [DATA[i][k] for i in range(len(__UpperCAmelCase ) )] for k in DATA[0]} , schema=__UpperCAmelCase )
writer.write_table(__UpperCAmelCase )
writer.close()
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( __UpperCAmelCase : Any ) -> List[str]:
SCREAMING_SNAKE_CASE_ = str(tmp_path_factory.mktemp('data' ) / 'dataset.json' )
SCREAMING_SNAKE_CASE_ = {'data': DATA}
with open(__UpperCAmelCase , 'w' ) as f:
json.dump(__UpperCAmelCase , __UpperCAmelCase )
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( __UpperCAmelCase : Optional[int] ) -> Tuple:
SCREAMING_SNAKE_CASE_ = str(tmp_path_factory.mktemp('data' ) / 'dataset.json' )
SCREAMING_SNAKE_CASE_ = {'data': DATA_DICT_OF_LISTS}
with open(__UpperCAmelCase , 'w' ) as f:
json.dump(__UpperCAmelCase , __UpperCAmelCase )
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( __UpperCAmelCase : Optional[int] ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = str(tmp_path_factory.mktemp('data' ) / 'dataset.jsonl' )
with open(__UpperCAmelCase , 'w' ) as f:
for item in DATA:
f.write(json.dumps(__UpperCAmelCase ) + '\n' )
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( __UpperCAmelCase : Union[str, Any] ) -> int:
SCREAMING_SNAKE_CASE_ = str(tmp_path_factory.mktemp('data' ) / 'dataset2.jsonl' )
with open(__UpperCAmelCase , 'w' ) as f:
for item in DATA:
f.write(json.dumps(__UpperCAmelCase ) + '\n' )
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( __UpperCAmelCase : List[str] ) -> Dict:
SCREAMING_SNAKE_CASE_ = str(tmp_path_factory.mktemp('data' ) / 'dataset_312.jsonl' )
with open(__UpperCAmelCase , 'w' ) as f:
for item in DATA_312:
f.write(json.dumps(__UpperCAmelCase ) + '\n' )
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( __UpperCAmelCase : Tuple ) -> int:
SCREAMING_SNAKE_CASE_ = str(tmp_path_factory.mktemp('data' ) / 'dataset-str.jsonl' )
with open(__UpperCAmelCase , 'w' ) as f:
for item in DATA_STR:
f.write(json.dumps(__UpperCAmelCase ) + '\n' )
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( __UpperCAmelCase : str , __UpperCAmelCase : Optional[int] ) -> Any:
import gzip
SCREAMING_SNAKE_CASE_ = str(tmp_path_factory.mktemp('data' ) / 'dataset.txt.gz' )
with open(__UpperCAmelCase , 'rb' ) as orig_file:
with gzip.open(__UpperCAmelCase , 'wb' ) as zipped_file:
zipped_file.writelines(__UpperCAmelCase )
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( __UpperCAmelCase : int , __UpperCAmelCase : Optional[int] ) -> Any:
import gzip
SCREAMING_SNAKE_CASE_ = str(tmp_path_factory.mktemp('data' ) / 'dataset.jsonl.gz' )
with open(__UpperCAmelCase , 'rb' ) as orig_file:
with gzip.open(__UpperCAmelCase , 'wb' ) as zipped_file:
zipped_file.writelines(__UpperCAmelCase )
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( __UpperCAmelCase : int , __UpperCAmelCase : List[str] , __UpperCAmelCase : List[str] ) -> Any:
SCREAMING_SNAKE_CASE_ = tmp_path_factory.mktemp('data' ) / 'dataset.jsonl.zip'
with zipfile.ZipFile(__UpperCAmelCase , 'w' ) as f:
f.write(__UpperCAmelCase , arcname=os.path.basename(__UpperCAmelCase ) )
f.write(__UpperCAmelCase , arcname=os.path.basename(__UpperCAmelCase ) )
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( __UpperCAmelCase : Dict , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Tuple , __UpperCAmelCase : Tuple ) -> int:
SCREAMING_SNAKE_CASE_ = tmp_path_factory.mktemp('data' ) / 'dataset_nested.jsonl.zip'
with zipfile.ZipFile(__UpperCAmelCase , 'w' ) as f:
f.write(__UpperCAmelCase , arcname=os.path.join('nested' , os.path.basename(__UpperCAmelCase ) ) )
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( __UpperCAmelCase : int , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Optional[int] ) -> Tuple:
SCREAMING_SNAKE_CASE_ = tmp_path_factory.mktemp('data' ) / 'dataset_with_dir.jsonl.zip'
with zipfile.ZipFile(__UpperCAmelCase , 'w' ) as f:
f.write(__UpperCAmelCase , arcname=os.path.join('main_dir' , os.path.basename(__UpperCAmelCase ) ) )
f.write(__UpperCAmelCase , arcname=os.path.join('main_dir' , os.path.basename(__UpperCAmelCase ) ) )
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( __UpperCAmelCase : str , __UpperCAmelCase : str , __UpperCAmelCase : List[str] ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = tmp_path_factory.mktemp('data' ) / 'dataset.jsonl.tar'
with tarfile.TarFile(__UpperCAmelCase , 'w' ) as f:
f.add(__UpperCAmelCase , arcname=os.path.basename(__UpperCAmelCase ) )
f.add(__UpperCAmelCase , arcname=os.path.basename(__UpperCAmelCase ) )
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( __UpperCAmelCase : Any , __UpperCAmelCase : List[Any] , __UpperCAmelCase : List[Any] , __UpperCAmelCase : List[str] ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = tmp_path_factory.mktemp('data' ) / 'dataset_nested.jsonl.tar'
with tarfile.TarFile(__UpperCAmelCase , 'w' ) as f:
f.add(__UpperCAmelCase , arcname=os.path.join('nested' , os.path.basename(__UpperCAmelCase ) ) )
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( __UpperCAmelCase : List[str] ) -> Dict:
SCREAMING_SNAKE_CASE_ = ['0', '1', '2', '3']
SCREAMING_SNAKE_CASE_ = str(tmp_path_factory.mktemp('data' ) / 'dataset.txt' )
with open(__UpperCAmelCase , 'w' ) as f:
for item in data:
f.write(item + '\n' )
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( __UpperCAmelCase : Tuple ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = ['0', '1', '2', '3']
SCREAMING_SNAKE_CASE_ = str(tmp_path_factory.mktemp('data' ) / 'dataset2.txt' )
with open(__UpperCAmelCase , 'w' ) as f:
for item in data:
f.write(item + '\n' )
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( __UpperCAmelCase : Optional[int] ) -> Any:
SCREAMING_SNAKE_CASE_ = ['0', '1', '2', '3']
SCREAMING_SNAKE_CASE_ = tmp_path_factory.mktemp('data' ) / 'dataset.abc'
with open(__UpperCAmelCase , 'w' ) as f:
for item in data:
f.write(item + '\n' )
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( __UpperCAmelCase : Dict , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : List[str] ) -> Dict:
SCREAMING_SNAKE_CASE_ = tmp_path_factory.mktemp('data' ) / 'dataset.text.zip'
with zipfile.ZipFile(__UpperCAmelCase , 'w' ) as f:
f.write(__UpperCAmelCase , arcname=os.path.basename(__UpperCAmelCase ) )
f.write(__UpperCAmelCase , arcname=os.path.basename(__UpperCAmelCase ) )
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( __UpperCAmelCase : Any , __UpperCAmelCase : List[str] , __UpperCAmelCase : Optional[int] ) -> Tuple:
SCREAMING_SNAKE_CASE_ = tmp_path_factory.mktemp('data' ) / 'dataset_with_dir.text.zip'
with zipfile.ZipFile(__UpperCAmelCase , 'w' ) as f:
f.write(__UpperCAmelCase , arcname=os.path.join('main_dir' , os.path.basename(__UpperCAmelCase ) ) )
f.write(__UpperCAmelCase , arcname=os.path.join('main_dir' , os.path.basename(__UpperCAmelCase ) ) )
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( __UpperCAmelCase : List[Any] , __UpperCAmelCase : List[str] , __UpperCAmelCase : Optional[int] ) -> Tuple:
SCREAMING_SNAKE_CASE_ = tmp_path_factory.mktemp('data' ) / 'dataset.ext.zip'
with zipfile.ZipFile(__UpperCAmelCase , 'w' ) as f:
f.write(__UpperCAmelCase , arcname=os.path.basename('unsupported.ext' ) )
f.write(__UpperCAmelCase , arcname=os.path.basename('unsupported_2.ext' ) )
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( __UpperCAmelCase : int ) -> Dict:
SCREAMING_SNAKE_CASE_ = '\n'.join(['First', 'Second\u2029with Unicode new line', 'Third'] )
SCREAMING_SNAKE_CASE_ = str(tmp_path_factory.mktemp('data' ) / 'dataset_with_unicode_new_lines.txt' )
with open(__UpperCAmelCase , 'w' , encoding='utf-8' ) as f:
f.write(__UpperCAmelCase )
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( ) -> str:
return os.path.join('tests' , 'features' , 'data' , 'test_image_rgb.jpg' )
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( ) -> int:
return os.path.join('tests' , 'features' , 'data' , 'test_audio_44100.wav' )
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( __UpperCAmelCase : Tuple , __UpperCAmelCase : List[str] ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = tmp_path_factory.mktemp('data' ) / 'dataset.img.zip'
with zipfile.ZipFile(__UpperCAmelCase , 'w' ) as f:
f.write(__UpperCAmelCase , arcname=os.path.basename(__UpperCAmelCase ) )
f.write(__UpperCAmelCase , arcname=os.path.basename(__UpperCAmelCase ).replace('.jpg' , '2.jpg' ) )
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( __UpperCAmelCase : List[str] ) -> Dict:
SCREAMING_SNAKE_CASE_ = tmp_path_factory.mktemp('data_dir' )
(data_dir / "subdir").mkdir()
with open(data_dir / 'subdir' / 'train.txt' , 'w' ) as f:
f.write('foo\n' * 10 )
with open(data_dir / 'subdir' / 'test.txt' , 'w' ) as f:
f.write('bar\n' * 10 )
# hidden file
with open(data_dir / 'subdir' / '.test.txt' , 'w' ) as f:
f.write('bar\n' * 10 )
# hidden directory
(data_dir / ".subdir").mkdir()
with open(data_dir / '.subdir' / 'train.txt' , 'w' ) as f:
f.write('foo\n' * 10 )
with open(data_dir / '.subdir' / 'test.txt' , 'w' ) as f:
f.write('bar\n' * 10 )
return data_dir
| 31
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__snake_case = {
"""configuration_git""": ["""GIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GitConfig""", """GitVisionConfig"""],
"""processing_git""": ["""GitProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
"""GIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GitForCausalLM""",
"""GitModel""",
"""GitPreTrainedModel""",
"""GitVisionModel""",
]
if TYPE_CHECKING:
from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig
from .processing_git import GitProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_git import (
GIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GitForCausalLM,
GitModel,
GitPreTrainedModel,
GitVisionModel,
)
else:
import sys
__snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 451
| 0
|
'''simple docstring'''
from manim import *
class UpperCAmelCase__ ( snake_case_ ):
"""simple docstring"""
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
_a : Tuple = Rectangle(height=0.5 ,width=0.5 )
_a : List[str] = Rectangle(height=0.46 ,width=0.46 ).set_stroke(width=0 )
_a : Union[str, Any] = Rectangle(height=0.25 ,width=0.25 )
_a : str = [mem.copy() for i in range(6 )]
_a : Tuple = [mem.copy() for i in range(6 )]
_a : Dict = VGroup(*_a ).arrange(_a ,buff=0 )
_a : List[str] = VGroup(*_a ).arrange(_a ,buff=0 )
_a : Dict = VGroup(_a ,_a ).arrange(_a ,buff=0 )
_a : int = Text('CPU' ,font_size=24 )
_a : List[str] = Group(_a ,_a ).arrange(_a ,buff=0.5 ,aligned_edge=_a )
cpu.move_to([-2.5, -0.5, 0] )
self.add(_a )
_a : int = [mem.copy() for i in range(4 )]
_a : List[Any] = VGroup(*_a ).arrange(_a ,buff=0 )
_a : Dict = Text('GPU' ,font_size=24 )
_a : int = Group(_a ,_a ).arrange(_a ,buff=0.5 ,aligned_edge=_a )
gpu.move_to([-1, -1, 0] )
self.add(_a )
_a : Union[str, Any] = [mem.copy() for i in range(6 )]
_a : Any = VGroup(*_a ).arrange(_a ,buff=0 )
_a : Any = Text('Model' ,font_size=24 )
_a : List[Any] = Group(_a ,_a ).arrange(_a ,buff=0.5 ,aligned_edge=_a )
model.move_to([3, -1.0, 0] )
self.add(_a )
_a : List[Any] = []
_a : Optional[Any] = []
for i, rect in enumerate(_a ):
_a : List[Any] = fill.copy().set_fill(_a ,opacity=0.8 )
target.move_to(_a )
model_arr.append(_a )
_a : Optional[Any] = Rectangle(height=0.46 ,width=0.46 ).set_stroke(width=0.0 ).set_fill(_a ,opacity=0.8 )
cpu_target.move_to(cpu_left_col_base[i] )
model_cpu_arr.append(_a )
self.add(*_a ,*_a )
_a : int = [meta_mem.copy() for i in range(6 )]
_a : List[Any] = [meta_mem.copy() for i in range(6 )]
_a : List[str] = VGroup(*_a ).arrange(_a ,buff=0 )
_a : str = VGroup(*_a ).arrange(_a ,buff=0 )
_a : Any = VGroup(_a ,_a ).arrange(_a ,buff=0 )
_a : Optional[Any] = Text('Disk' ,font_size=24 )
_a : Optional[int] = Group(_a ,_a ).arrange(_a ,buff=0.5 ,aligned_edge=_a )
disk.move_to([-4, -1.25, 0] )
self.add(_a ,_a )
_a : Optional[Any] = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
_a : Union[str, Any] = MarkupText(
F"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" ,font_size=18 ,)
key_text.move_to([-5, 2.4, 0] )
self.add(_a ,_a )
_a : int = MarkupText(
F"""<span fgcolor='{BLUE}'>●</span> Checkpoint""" ,font_size=18 ,)
blue_text.next_to(_a ,DOWN * 2.4 ,aligned_edge=key_text.get_left() )
self.add(_a )
_a : Tuple = MarkupText(
F"""Now watch as an input is passed through the model\nand how the memory is utilized and handled.""" ,font_size=24 ,)
step_a.move_to([2, 2, 0] )
self.play(Write(_a ) )
_a : str = Square(0.3 )
input.set_fill(_a ,opacity=1.0 )
input.set_stroke(width=0.0 )
input.next_to(model_base[0] ,_a ,buff=0.5 )
self.play(Write(_a ) )
input.generate_target()
input.target.next_to(model_arr[0] ,direction=_a ,buff=0.02 )
self.play(MoveToTarget(_a ) )
self.play(FadeOut(_a ) )
_a : Optional[Any] = Arrow(start=_a ,end=_a ,color=_a ,buff=0.5 )
a.next_to(model_arr[0].get_left() ,_a ,buff=0.2 )
model_cpu_arr[0].generate_target()
model_cpu_arr[0].target.move_to(gpu_rect[0] )
_a : Union[str, Any] = MarkupText(
F"""As the input reaches a layer, the hook triggers\nand weights are moved from the CPU\nto the GPU and back.""" ,font_size=24 ,)
step_a.move_to([2, 2, 0] )
self.play(Write(_a ,run_time=3 ) )
_a : Any = {'run_time': 1, 'fade_in': True, 'fade_out': True, 'buff': 0.02}
self.play(
Write(_a ) ,Circumscribe(model_arr[0] ,color=_a ,**_a ) ,Circumscribe(model_cpu_arr[0] ,color=_a ,**_a ) ,Circumscribe(gpu_rect[0] ,color=_a ,**_a ) ,)
self.play(MoveToTarget(model_cpu_arr[0] ) )
_a : Tuple = a.copy()
for i in range(6 ):
a_c.next_to(model_arr[i].get_right() + 0.02 ,_a ,buff=0.2 )
input.generate_target()
input.target.move_to(model_arr[i].get_right() + 0.02 )
_a : List[str] = AnimationGroup(
FadeOut(_a ,run_time=0.5 ) ,MoveToTarget(_a ,run_time=0.5 ) ,FadeIn(_a ,run_time=0.5 ) ,lag_ratio=0.2 )
self.play(_a )
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[i] )
if i < 5:
model_cpu_arr[i + 1].generate_target()
model_cpu_arr[i + 1].target.move_to(gpu_rect[0] )
if i >= 1:
_a : Tuple = 0.7
self.play(
Circumscribe(model_arr[i] ,**_a ) ,Circumscribe(cpu_left_col_base[i] ,**_a ) ,Circumscribe(cpu_left_col_base[i + 1] ,color=_a ,**_a ) ,Circumscribe(gpu_rect[0] ,color=_a ,**_a ) ,Circumscribe(model_arr[i + 1] ,color=_a ,**_a ) ,)
if i < 1:
self.play(
MoveToTarget(model_cpu_arr[i] ) ,MoveToTarget(model_cpu_arr[i + 1] ) ,)
else:
self.play(
MoveToTarget(model_cpu_arr[i] ,run_time=0.7 ) ,MoveToTarget(model_cpu_arr[i + 1] ,run_time=0.7 ) ,)
else:
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[-1] )
input.generate_target()
input.target.next_to(model_arr[-1].get_right() ,RIGHT + 0.02 ,buff=0.2 )
self.play(
Circumscribe(model_arr[-1] ,color=_a ,**_a ) ,Circumscribe(cpu_left_col_base[-1] ,color=_a ,**_a ) ,Circumscribe(gpu_rect[0] ,color=_a ,**_a ) ,)
self.play(MoveToTarget(model_cpu_arr[i] ) )
_a : Dict = a_c
_a : Dict = a_c.copy()
input.generate_target()
input.target.next_to(model_base[-1] ,RIGHT + 0.02 ,buff=0.5 )
self.play(
FadeOut(_a ) ,FadeOut(_a ,run_time=0.5 ) ,)
_a : Tuple = MarkupText(F"""Inference on a model too large for GPU memory\nis successfully completed.""" ,font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(_a ,run_time=3 ) ,MoveToTarget(_a ) )
self.wait()
| 701
|
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MgpstrProcessor, ViTImageProcessor
@require_torch
@require_vision
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Optional[int] = ViTImageProcessor if is_vision_available() else None
@property
def __lowercase ( self : List[str] ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
_a : int = (3, 32, 128)
_a : Any = tempfile.mkdtemp()
# fmt: off
_a : Optional[int] = ['[GO]', '[s]', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z']
# fmt: on
_a : Any = dict(zip(_a ,range(len(_a ) ) ) )
_a : List[Any] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file ,'w' ,encoding='utf-8' ) as fp:
fp.write(json.dumps(_a ) + '\n' )
_a : Dict = {
'do_normalize': False,
'do_resize': True,
'image_processor_type': 'ViTImageProcessor',
'resample': 3,
'size': {'height': 32, 'width': 128},
}
_a : Dict = os.path.join(self.tmpdirname ,_a )
with open(self.image_processor_file ,'w' ,encoding='utf-8' ) as fp:
json.dump(_a ,_a )
def __lowercase ( self : Dict ,**_a : Any ):
'''simple docstring'''
return MgpstrTokenizer.from_pretrained(self.tmpdirname ,**_a )
def __lowercase ( self : Optional[int] ,**_a : Optional[Any] ):
'''simple docstring'''
return ViTImageProcessor.from_pretrained(self.tmpdirname ,**_a )
def __lowercase ( self : List[str] ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
_a : Union[str, Any] = np.random.randint(255 ,size=(3, 30, 400) ,dtype=np.uinta )
_a : int = Image.fromarray(np.moveaxis(_a ,0 ,-1 ) )
return image_input
def __lowercase ( self : Any ):
'''simple docstring'''
_a : str = self.get_tokenizer()
_a : Optional[int] = self.get_image_processor()
_a : int = MgpstrProcessor(tokenizer=_a ,image_processor=_a )
processor.save_pretrained(self.tmpdirname )
_a : List[Any] = MgpstrProcessor.from_pretrained(self.tmpdirname ,use_fast=_a )
self.assertEqual(processor.char_tokenizer.get_vocab() ,tokenizer.get_vocab() )
self.assertIsInstance(processor.char_tokenizer ,_a )
self.assertEqual(processor.image_processor.to_json_string() ,image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor ,_a )
def __lowercase ( self : Any ):
'''simple docstring'''
_a : Union[str, Any] = self.get_tokenizer()
_a : Any = self.get_image_processor()
_a : List[str] = MgpstrProcessor(tokenizer=_a ,image_processor=_a )
processor.save_pretrained(self.tmpdirname )
_a : int = self.get_tokenizer(bos_token='(BOS)' ,eos_token='(EOS)' )
_a : Dict = self.get_image_processor(do_normalize=_a ,padding_value=1.0 )
_a : Optional[int] = MgpstrProcessor.from_pretrained(
self.tmpdirname ,bos_token='(BOS)' ,eos_token='(EOS)' ,do_normalize=_a ,padding_value=1.0 )
self.assertEqual(processor.char_tokenizer.get_vocab() ,tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.char_tokenizer ,_a )
self.assertEqual(processor.image_processor.to_json_string() ,image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor ,_a )
def __lowercase ( self : List[Any] ):
'''simple docstring'''
_a : Dict = self.get_image_processor()
_a : int = self.get_tokenizer()
_a : int = MgpstrProcessor(tokenizer=_a ,image_processor=_a )
_a : Optional[int] = self.prepare_image_inputs()
_a : Optional[int] = image_processor(_a ,return_tensors='np' )
_a : str = processor(images=_a ,return_tensors='np' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() ,input_processor[key].sum() ,delta=1E-2 )
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
_a : Union[str, Any] = self.get_image_processor()
_a : List[str] = self.get_tokenizer()
_a : Optional[Any] = MgpstrProcessor(tokenizer=_a ,image_processor=_a )
_a : Dict = 'test'
_a : Optional[Any] = processor(text=_a )
_a : Any = tokenizer(_a )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] ,encoded_processor[key] )
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
_a : Union[str, Any] = self.get_image_processor()
_a : Any = self.get_tokenizer()
_a : List[str] = MgpstrProcessor(tokenizer=_a ,image_processor=_a )
_a : Optional[int] = 'test'
_a : Optional[Any] = self.prepare_image_inputs()
_a : Tuple = processor(text=_a ,images=_a )
self.assertListEqual(list(inputs.keys() ) ,['pixel_values', 'labels'] )
# test if it raises when no input is passed
with pytest.raises(_a ):
processor()
def __lowercase ( self : int ):
'''simple docstring'''
_a : Any = self.get_image_processor()
_a : List[str] = self.get_tokenizer()
_a : Any = MgpstrProcessor(tokenizer=_a ,image_processor=_a )
_a : List[str] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]]
_a : Any = processor.char_decode(_a )
_a : int = tokenizer.batch_decode(_a )
_a : List[Any] = [seq.replace(' ' ,'' ) for seq in decoded_tok]
self.assertListEqual(_a ,_a )
def __lowercase ( self : str ):
'''simple docstring'''
_a : Tuple = self.get_image_processor()
_a : Dict = self.get_tokenizer()
_a : int = MgpstrProcessor(tokenizer=_a ,image_processor=_a )
_a : List[str] = None
_a : int = self.prepare_image_inputs()
_a : List[str] = processor(text=_a ,images=_a )
self.assertListEqual(list(inputs.keys() ) ,processor.model_input_names )
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
_a : Union[str, Any] = self.get_image_processor()
_a : List[str] = self.get_tokenizer()
_a : Optional[Any] = MgpstrProcessor(tokenizer=_a ,image_processor=_a )
_a : Tuple = torch.randn(1 ,27 ,38 )
_a : Optional[int] = torch.randn(1 ,27 ,5_0257 )
_a : List[str] = torch.randn(1 ,27 ,3_0522 )
_a : List[str] = processor.batch_decode([char_input, bpe_input, wp_input] )
self.assertListEqual(list(results.keys() ) ,['generated_text', 'scores', 'char_preds', 'bpe_preds', 'wp_preds'] )
| 319
| 0
|
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers import (
TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
BertConfig,
DPRConfig,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__=13 , snake_case__=7 , snake_case__=True , snake_case__=True , snake_case__=True , snake_case__=True , snake_case__=99 , snake_case__=32 , snake_case__=2 , snake_case__=4 , snake_case__=37 , snake_case__="gelu" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=512 , snake_case__=16 , snake_case__=2 , snake_case__=0.02 , snake_case__=3 , snake_case__=4 , snake_case__=None , snake_case__=0 , ):
"""simple docstring"""
lowerCAmelCase : Any = parent
lowerCAmelCase : str = batch_size
lowerCAmelCase : Union[str, Any] = seq_length
lowerCAmelCase : Optional[Any] = is_training
lowerCAmelCase : List[Any] = use_input_mask
lowerCAmelCase : List[str] = use_token_type_ids
lowerCAmelCase : Union[str, Any] = use_labels
lowerCAmelCase : Tuple = vocab_size
lowerCAmelCase : Dict = hidden_size
lowerCAmelCase : Tuple = num_hidden_layers
lowerCAmelCase : str = num_attention_heads
lowerCAmelCase : Dict = intermediate_size
lowerCAmelCase : Optional[Any] = hidden_act
lowerCAmelCase : Optional[int] = hidden_dropout_prob
lowerCAmelCase : Optional[int] = attention_probs_dropout_prob
lowerCAmelCase : List[str] = max_position_embeddings
lowerCAmelCase : Optional[int] = type_vocab_size
lowerCAmelCase : Any = type_sequence_label_size
lowerCAmelCase : List[Any] = initializer_range
lowerCAmelCase : Optional[int] = num_labels
lowerCAmelCase : Dict = num_choices
lowerCAmelCase : Tuple = scope
lowerCAmelCase : Optional[int] = projection_dim
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase : List[Any] = None
if self.use_input_mask:
# follow test_modeling_tf_ctrl.py
lowerCAmelCase : int = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase : List[Any] = None
if self.use_token_type_ids:
lowerCAmelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase : List[Any] = None
lowerCAmelCase : Any = None
lowerCAmelCase : Any = None
if self.use_labels:
lowerCAmelCase : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase : Optional[int] = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase : Dict = BertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case__ , initializer_range=self.initializer_range , )
lowerCAmelCase : Tuple = DPRConfig(projection_dim=self.projection_dim , **config.to_dict() )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : Optional[Any] = TFDPRContextEncoder(config=snake_case__ )
lowerCAmelCase : int = model(snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ )
lowerCAmelCase : str = model(snake_case__ , token_type_ids=snake_case__ )
lowerCAmelCase : str = model(snake_case__ )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) )
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : str = TFDPRQuestionEncoder(config=snake_case__ )
lowerCAmelCase : Tuple = model(snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ )
lowerCAmelCase : Optional[int] = model(snake_case__ , token_type_ids=snake_case__ )
lowerCAmelCase : List[Any] = model(snake_case__ )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) )
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : List[Any] = TFDPRReader(config=snake_case__ )
lowerCAmelCase : Tuple = model(snake_case__ , attention_mask=snake_case__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.relevance_logits.shape , (self.batch_size,) )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : List[str] = self.prepare_config_and_inputs()
(
(
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) ,
) : Any = config_and_inputs
lowerCAmelCase : Union[str, Any] = {"input_ids": input_ids}
return config, inputs_dict
@require_tf
class SCREAMING_SNAKE_CASE__ ( lowercase , lowercase , unittest.TestCase ):
"""simple docstring"""
a : Optional[int] =(
(
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
if is_tf_available()
else ()
)
a : List[Any] ={"feature-extraction": TFDPRQuestionEncoder} if is_tf_available() else {}
a : Dict =False
a : List[str] =False
a : Any =False
a : int =False
a : Union[str, Any] =False
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Dict = TFDPRModelTester(self )
lowerCAmelCase : List[str] = ConfigTester(self , config_class=snake_case__ , hidden_size=37 )
def lowercase__ ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_context_encoder(*snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_question_encoder(*snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_reader(*snake_case__ )
@slow
def lowercase__ ( self ):
"""simple docstring"""
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase : List[str] = TFDPRContextEncoder.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase : List[str] = TFDPRContextEncoder.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
for model_name in TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase : Dict = TFDPRQuestionEncoder.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
for model_name in TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase : List[str] = TFDPRReader.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
@require_tf
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : List[Any] = TFDPRQuestionEncoder.from_pretrained("facebook/dpr-question_encoder-single-nq-base" )
lowerCAmelCase : Optional[Any] = tf.constant(
[[101, 7_592, 1_010, 2_003, 2_026, 3_899, 10_140, 1_029, 102]] ) # [CLS] hello, is my dog cute? [SEP]
lowerCAmelCase : List[str] = model(snake_case__ )[0] # embedding shape = (1, 768)
# compare the actual values for a slice.
lowerCAmelCase : Dict = tf.constant(
[
[
0.03236253,
0.12753335,
0.16818509,
0.00279786,
0.3896933,
0.24264945,
0.2178971,
-0.02335227,
-0.08481959,
-0.14324117,
]
] )
self.assertTrue(numpy.allclose(output[:, :10].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 645
|
"""simple docstring"""
import dataclasses
import json
import sys
import types
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, ArgumentTypeError
from copy import copy
from enum import Enum
from inspect import isclass
from pathlib import Path
from typing import Any, Callable, Dict, Iterable, List, Literal, NewType, Optional, Tuple, Union, get_type_hints
import yaml
lowerCAmelCase__ = NewType('''DataClass''', Any)
lowerCAmelCase__ = NewType('''DataClassType''', Any)
def a__ ( SCREAMING_SNAKE_CASE : List[str] ):
'''simple docstring'''
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise ArgumentTypeError(
f"""Truthy value expected: got {v} but expected one of yes/no, true/false, t/f, y/n, 1/0 (case insensitive).""" )
def a__ ( SCREAMING_SNAKE_CASE : list ):
'''simple docstring'''
lowerCAmelCase : Any = {str(SCREAMING_SNAKE_CASE ): choice for choice in choices}
return lambda SCREAMING_SNAKE_CASE : str_to_choice.get(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def a__ ( *,
SCREAMING_SNAKE_CASE : Union[str, List[str]] = None , SCREAMING_SNAKE_CASE : str = None , SCREAMING_SNAKE_CASE : Any = dataclasses.MISSING , SCREAMING_SNAKE_CASE : Callable[[], Any] = dataclasses.MISSING , SCREAMING_SNAKE_CASE : dict = None , **SCREAMING_SNAKE_CASE : Dict , ):
'''simple docstring'''
if metadata is None:
# Important, don't use as default param in function signature because dict is mutable and shared across function calls
lowerCAmelCase : Tuple = {}
if aliases is not None:
lowerCAmelCase : Union[str, Any] = aliases
if help is not None:
lowerCAmelCase : Optional[Any] = help
return dataclasses.field(metadata=SCREAMING_SNAKE_CASE , default=SCREAMING_SNAKE_CASE , default_factory=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
class SCREAMING_SNAKE_CASE__ ( lowercase ):
"""simple docstring"""
a : Iterable[DataClassType]
def __init__( self , snake_case__ , **snake_case__ ):
"""simple docstring"""
if "formatter_class" not in kwargs:
lowerCAmelCase : Optional[int] = ArgumentDefaultsHelpFormatter
super().__init__(**snake_case__ )
if dataclasses.is_dataclass(snake_case__ ):
lowerCAmelCase : List[Any] = [dataclass_types]
lowerCAmelCase : List[str] = list(snake_case__ )
for dtype in self.dataclass_types:
self._add_dataclass_arguments(snake_case__ )
@staticmethod
def lowercase__ ( snake_case__ , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : Union[str, Any] = f"""--{field.name}"""
lowerCAmelCase : Optional[Any] = field.metadata.copy()
# field.metadata is not used at all by Data Classes,
# it is provided as a third-party extension mechanism.
if isinstance(field.type , snake_case__ ):
raise RuntimeError(
"Unresolved type detected, which should have been done with the help of "
"`typing.get_type_hints` method by default" )
lowerCAmelCase : List[str] = kwargs.pop("aliases" , [] )
if isinstance(snake_case__ , snake_case__ ):
lowerCAmelCase : int = [aliases]
lowerCAmelCase : int = getattr(field.type , "__origin__" , field.type )
if origin_type is Union or (hasattr(snake_case__ , "UnionType" ) and isinstance(snake_case__ , types.UnionType )):
if str not in field.type.__args__ and (
len(field.type.__args__ ) != 2 or type(snake_case__ ) not in field.type.__args__
):
raise ValueError(
"Only `Union[X, NoneType]` (i.e., `Optional[X]`) is allowed for `Union` because"
" the argument parser only supports one type per argument."
f""" Problem encountered in field '{field.name}'.""" )
if type(snake_case__ ) not in field.type.__args__:
# filter `str` in Union
lowerCAmelCase : str = field.type.__args__[0] if field.type.__args__[1] == str else field.type.__args__[1]
lowerCAmelCase : Optional[int] = getattr(field.type , "__origin__" , field.type )
elif bool not in field.type.__args__:
# filter `NoneType` in Union (except for `Union[bool, NoneType]`)
lowerCAmelCase : List[Any] = (
field.type.__args__[0] if isinstance(snake_case__ , field.type.__args__[1] ) else field.type.__args__[1]
)
lowerCAmelCase : List[Any] = getattr(field.type , "__origin__" , field.type )
# A variable to store kwargs for a boolean field, if needed
# so that we can init a `no_*` complement argument (see below)
lowerCAmelCase : List[Any] = {}
if origin_type is Literal or (isinstance(field.type , snake_case__ ) and issubclass(field.type , snake_case__ )):
if origin_type is Literal:
lowerCAmelCase : str = field.type.__args__
else:
lowerCAmelCase : List[str] = [x.value for x in field.type]
lowerCAmelCase : List[Any] = make_choice_type_function(kwargs["choices"] )
if field.default is not dataclasses.MISSING:
lowerCAmelCase : int = field.default
else:
lowerCAmelCase : List[str] = True
elif field.type is bool or field.type == Optional[bool]:
# Copy the currect kwargs to use to instantiate a `no_*` complement argument below.
# We do not initialize it here because the `no_*` alternative must be instantiated after the real argument
lowerCAmelCase : Dict = copy(snake_case__ )
# Hack because type=bool in argparse does not behave as we want.
lowerCAmelCase : str = string_to_bool
if field.type is bool or (field.default is not None and field.default is not dataclasses.MISSING):
# Default value is False if we have no default when of type bool.
lowerCAmelCase : int = False if field.default is dataclasses.MISSING else field.default
# This is the value that will get picked if we don't include --field_name in any way
lowerCAmelCase : Any = default
# This tells argparse we accept 0 or 1 value after --field_name
lowerCAmelCase : List[str] = "?"
# This is the value that will get picked if we do --field_name (without value)
lowerCAmelCase : Union[str, Any] = True
elif isclass(snake_case__ ) and issubclass(snake_case__ , snake_case__ ):
lowerCAmelCase : Optional[int] = field.type.__args__[0]
lowerCAmelCase : List[str] = "+"
if field.default_factory is not dataclasses.MISSING:
lowerCAmelCase : Union[str, Any] = field.default_factory()
elif field.default is dataclasses.MISSING:
lowerCAmelCase : int = True
else:
lowerCAmelCase : Optional[Any] = field.type
if field.default is not dataclasses.MISSING:
lowerCAmelCase : Optional[int] = field.default
elif field.default_factory is not dataclasses.MISSING:
lowerCAmelCase : Union[str, Any] = field.default_factory()
else:
lowerCAmelCase : List[str] = True
parser.add_argument(snake_case__ , *snake_case__ , **snake_case__ )
# Add a complement `no_*` argument for a boolean field AFTER the initial field has already been added.
# Order is important for arguments with the same destination!
# We use a copy of earlier kwargs because the original kwargs have changed a lot before reaching down
# here and we do not need those changes/additional keys.
if field.default is True and (field.type is bool or field.type == Optional[bool]):
lowerCAmelCase : Any = False
parser.add_argument(f"""--no_{field.name}""" , action="store_false" , dest=field.name , **snake_case__ )
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
if hasattr(snake_case__ , "_argument_group_name" ):
lowerCAmelCase : Optional[int] = self.add_argument_group(dtype._argument_group_name )
else:
lowerCAmelCase : Any = self
try:
lowerCAmelCase : Dict[str, type] = get_type_hints(snake_case__ )
except NameError:
raise RuntimeError(
f"""Type resolution failed for {dtype}. Try declaring the class in global scope or """
"removing line of `from __future__ import annotations` which opts in Postponed "
"Evaluation of Annotations (PEP 563)" )
except TypeError as ex:
# Remove this block when we drop Python 3.9 support
if sys.version_info[:2] < (3, 10) and "unsupported operand type(s) for |" in str(snake_case__ ):
lowerCAmelCase : Optional[int] = ".".join(map(snake_case__ , sys.version_info[:3] ) )
raise RuntimeError(
f"""Type resolution failed for {dtype} on Python {python_version}. Try removing """
"line of `from __future__ import annotations` which opts in union types as "
"`X | Y` (PEP 604) via Postponed Evaluation of Annotations (PEP 563). To "
"support Python versions that lower than 3.10, you need to use "
"`typing.Union[X, Y]` instead of `X | Y` and `typing.Optional[X]` instead of "
"`X | None`." ) from ex
raise
for field in dataclasses.fields(snake_case__ ):
if not field.init:
continue
lowerCAmelCase : Any = type_hints[field.name]
self._parse_dataclass_field(snake_case__ , snake_case__ )
def lowercase__ ( self , snake_case__=None , snake_case__=False , snake_case__=True , snake_case__=None , snake_case__=None , ):
"""simple docstring"""
if args_file_flag or args_filename or (look_for_args_file and len(sys.argv )):
lowerCAmelCase : Dict = []
if args_filename:
args_files.append(Path(snake_case__ ) )
elif look_for_args_file and len(sys.argv ):
args_files.append(Path(sys.argv[0] ).with_suffix(".args" ) )
# args files specified via command line flag should overwrite default args files so we add them last
if args_file_flag:
# Create special parser just to extract the args_file_flag values
lowerCAmelCase : Optional[Any] = ArgumentParser()
args_file_parser.add_argument(snake_case__ , type=snake_case__ , action="append" )
# Use only remaining args for further parsing (remove the args_file_flag)
lowerCAmelCase , lowerCAmelCase : List[Any] = args_file_parser.parse_known_args(args=snake_case__ )
lowerCAmelCase : Optional[int] = vars(snake_case__ ).get(args_file_flag.lstrip("-" ) , snake_case__ )
if cmd_args_file_paths:
args_files.extend([Path(snake_case__ ) for p in cmd_args_file_paths] )
lowerCAmelCase : Optional[Any] = []
for args_file in args_files:
if args_file.exists():
file_args += args_file.read_text().split()
# in case of duplicate arguments the last one has precedence
# args specified via the command line should overwrite args from files, so we add them last
lowerCAmelCase : List[str] = file_args + args if args is not None else file_args + sys.argv[1:]
lowerCAmelCase , lowerCAmelCase : Union[str, Any] = self.parse_known_args(args=snake_case__ )
lowerCAmelCase : List[Any] = []
for dtype in self.dataclass_types:
lowerCAmelCase : Union[str, Any] = {f.name for f in dataclasses.fields(snake_case__ ) if f.init}
lowerCAmelCase : List[str] = {k: v for k, v in vars(snake_case__ ).items() if k in keys}
for k in keys:
delattr(snake_case__ , snake_case__ )
lowerCAmelCase : Union[str, Any] = dtype(**snake_case__ )
outputs.append(snake_case__ )
if len(namespace.__dict__ ) > 0:
# additional namespace.
outputs.append(snake_case__ )
if return_remaining_strings:
return (*outputs, remaining_args)
else:
if remaining_args:
raise ValueError(f"""Some specified arguments are not used by the HfArgumentParser: {remaining_args}""" )
return (*outputs,)
def lowercase__ ( self , snake_case__ , snake_case__ = False ):
"""simple docstring"""
lowerCAmelCase : Optional[Any] = set(args.keys() )
lowerCAmelCase : Optional[Any] = []
for dtype in self.dataclass_types:
lowerCAmelCase : Optional[Any] = {f.name for f in dataclasses.fields(snake_case__ ) if f.init}
lowerCAmelCase : Union[str, Any] = {k: v for k, v in args.items() if k in keys}
unused_keys.difference_update(inputs.keys() )
lowerCAmelCase : Tuple = dtype(**snake_case__ )
outputs.append(snake_case__ )
if not allow_extra_keys and unused_keys:
raise ValueError(f"""Some keys are not used by the HfArgumentParser: {sorted(snake_case__ )}""" )
return tuple(snake_case__ )
def lowercase__ ( self , snake_case__ , snake_case__ = False ):
"""simple docstring"""
with open(Path(snake_case__ ) , encoding="utf-8" ) as open_json_file:
lowerCAmelCase : Dict = json.loads(open_json_file.read() )
lowerCAmelCase : Union[str, Any] = self.parse_dict(snake_case__ , allow_extra_keys=snake_case__ )
return tuple(snake_case__ )
def lowercase__ ( self , snake_case__ , snake_case__ = False ):
"""simple docstring"""
lowerCAmelCase : List[Any] = self.parse_dict(yaml.safe_load(Path(snake_case__ ).read_text() ) , allow_extra_keys=snake_case__ )
return tuple(snake_case__ )
| 645
| 1
|
import tempfile
import unittest
import numpy as np
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_flax
if is_flax_available():
import os
from flax.core.frozen_dict import unfreeze
from flax.traverse_util import flatten_dict
from transformers import FlaxBertModel
__a : Dict = "0.12" # assumed parallelism: 8
@require_flax
@is_staging_test
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def lowerCAmelCase_ ( cls : int ):
"""simple docstring"""
__A = TOKEN
HfFolder.save_token(UpperCamelCase_ )
@classmethod
def lowerCAmelCase_ ( cls : Any ):
"""simple docstring"""
try:
delete_repo(token=cls._token , repo_id="""test-model-flax""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""valid_org/test-model-flax-org""" )
except HTTPError:
pass
def lowerCAmelCase_ ( self : Optional[int] ):
"""simple docstring"""
__A = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
__A = FlaxBertModel(UpperCamelCase_ )
model.push_to_hub("""test-model-flax""" , use_auth_token=self._token )
__A = FlaxBertModel.from_pretrained(F"{USER}/test-model-flax" )
__A = flatten_dict(unfreeze(model.params ) )
__A = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
__A = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(UpperCamelCase_ , 1e-3 , msg=F"{key} not identical" )
# Reset repo
delete_repo(token=self._token , repo_id="""test-model-flax""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(UpperCamelCase_ , repo_id="""test-model-flax""" , push_to_hub=UpperCamelCase_ , use_auth_token=self._token )
__A = FlaxBertModel.from_pretrained(F"{USER}/test-model-flax" )
__A = flatten_dict(unfreeze(model.params ) )
__A = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
__A = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(UpperCamelCase_ , 1e-3 , msg=F"{key} not identical" )
def lowerCAmelCase_ ( self : List[str] ):
"""simple docstring"""
__A = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
__A = FlaxBertModel(UpperCamelCase_ )
model.push_to_hub("""valid_org/test-model-flax-org""" , use_auth_token=self._token )
__A = FlaxBertModel.from_pretrained("""valid_org/test-model-flax-org""" )
__A = flatten_dict(unfreeze(model.params ) )
__A = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
__A = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(UpperCamelCase_ , 1e-3 , msg=F"{key} not identical" )
# Reset repo
delete_repo(token=self._token , repo_id="""valid_org/test-model-flax-org""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(
UpperCamelCase_ , repo_id="""valid_org/test-model-flax-org""" , push_to_hub=UpperCamelCase_ , use_auth_token=self._token )
__A = FlaxBertModel.from_pretrained("""valid_org/test-model-flax-org""" )
__A = flatten_dict(unfreeze(model.params ) )
__A = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
__A = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(UpperCamelCase_ , 1e-3 , msg=F"{key} not identical" )
def _SCREAMING_SNAKE_CASE ( __lowercase : Optional[int] , __lowercase : str ) -> Any:
"""simple docstring"""
__A = True
__A = flatten_dict(modela.params )
__A = flatten_dict(modela.params )
for key in flat_params_a.keys():
if np.sum(np.abs(flat_params_a[key] - flat_params_a[key] ) ) > 1E-4:
__A = False
return models_are_equal
@require_flax
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
__A = BertConfig.from_pretrained("""hf-internal-testing/tiny-bert-flax-only""" )
__A = FlaxBertModel(UpperCamelCase_ )
__A = """bert"""
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(UpperCamelCase_ , UpperCamelCase_ ) )
with self.assertRaises(UpperCamelCase_ ):
__A = FlaxBertModel.from_pretrained(UpperCamelCase_ )
__A = FlaxBertModel.from_pretrained(UpperCamelCase_ , subfolder=UpperCamelCase_ )
self.assertTrue(check_models_equal(UpperCamelCase_ , UpperCamelCase_ ) )
def lowerCAmelCase_ ( self : Tuple ):
"""simple docstring"""
__A = BertConfig.from_pretrained("""hf-internal-testing/tiny-bert-flax-only""" )
__A = FlaxBertModel(UpperCamelCase_ )
__A = """bert"""
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(UpperCamelCase_ , UpperCamelCase_ ) , max_shard_size="""10KB""" )
with self.assertRaises(UpperCamelCase_ ):
__A = FlaxBertModel.from_pretrained(UpperCamelCase_ )
__A = FlaxBertModel.from_pretrained(UpperCamelCase_ , subfolder=UpperCamelCase_ )
self.assertTrue(check_models_equal(UpperCamelCase_ , UpperCamelCase_ ) )
def lowerCAmelCase_ ( self : Optional[Any] ):
"""simple docstring"""
__A = """bert"""
__A = """hf-internal-testing/tiny-random-bert-subfolder"""
with self.assertRaises(UpperCamelCase_ ):
__A = FlaxBertModel.from_pretrained(UpperCamelCase_ )
__A = FlaxBertModel.from_pretrained(UpperCamelCase_ , subfolder=UpperCamelCase_ )
self.assertIsNotNone(UpperCamelCase_ )
def lowerCAmelCase_ ( self : Optional[int] ):
"""simple docstring"""
__A = """bert"""
__A = """hf-internal-testing/tiny-random-bert-sharded-subfolder"""
with self.assertRaises(UpperCamelCase_ ):
__A = FlaxBertModel.from_pretrained(UpperCamelCase_ )
__A = FlaxBertModel.from_pretrained(UpperCamelCase_ , subfolder=UpperCamelCase_ )
self.assertIsNotNone(UpperCamelCase_ )
| 199
|
from collections.abc import Sequence
def _SCREAMING_SNAKE_CASE ( __lowercase : Sequence[float] , __lowercase : float ) -> float:
"""simple docstring"""
return sum(c * (x**i) for i, c in enumerate(__lowercase ) )
def _SCREAMING_SNAKE_CASE ( __lowercase : Sequence[float] , __lowercase : float ) -> float:
"""simple docstring"""
__A = 0.0
for coeff in reversed(__lowercase ):
__A = result * x + coeff
return result
if __name__ == "__main__":
__a : Dict = (0.0, 0.0, 5.0, 9.3, 7.0)
__a : Optional[Any] = 1_0.0
print(evaluate_poly(poly, x))
print(horner(poly, x))
| 199
| 1
|
import os
from typing import List, Optional, Union
from ...image_processing_utils import BatchFeature
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
from ..auto import AutoTokenizer
class _SCREAMING_SNAKE_CASE ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ = ["image_processor", "tokenizer"]
lowerCamelCase__ = "BlipImageProcessor"
lowerCamelCase__ = "AutoTokenizer"
def __init__( self : Tuple , __lowerCamelCase : Optional[int] , __lowerCamelCase : Dict , __lowerCamelCase : Any ):
super().__init__(__lowerCamelCase , __lowerCamelCase )
# add QFormer tokenizer
SCREAMING_SNAKE_CASE = qformer_tokenizer
def __call__( self : List[str] , __lowerCamelCase : ImageInput = None , __lowerCamelCase : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , __lowerCamelCase : bool = True , __lowerCamelCase : Union[bool, str, PaddingStrategy] = False , __lowerCamelCase : Union[bool, str, TruncationStrategy] = None , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : int = 0 , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : Optional[bool] = None , __lowerCamelCase : bool = False , __lowerCamelCase : bool = False , __lowerCamelCase : bool = False , __lowerCamelCase : bool = False , __lowerCamelCase : bool = False , __lowerCamelCase : bool = True , __lowerCamelCase : Optional[Union[str, TensorType]] = None , **__lowerCamelCase : Dict , ):
if images is None and text is None:
raise ValueError("You have to specify at least images or text." )
SCREAMING_SNAKE_CASE = BatchFeature()
if text is not None:
SCREAMING_SNAKE_CASE = self.tokenizer(
text=__lowerCamelCase , add_special_tokens=__lowerCamelCase , padding=__lowerCamelCase , truncation=__lowerCamelCase , max_length=__lowerCamelCase , stride=__lowerCamelCase , pad_to_multiple_of=__lowerCamelCase , return_attention_mask=__lowerCamelCase , return_overflowing_tokens=__lowerCamelCase , return_special_tokens_mask=__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , return_token_type_ids=__lowerCamelCase , return_length=__lowerCamelCase , verbose=__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase , )
encoding.update(__lowerCamelCase )
SCREAMING_SNAKE_CASE = self.qformer_tokenizer(
text=__lowerCamelCase , add_special_tokens=__lowerCamelCase , padding=__lowerCamelCase , truncation=__lowerCamelCase , max_length=__lowerCamelCase , stride=__lowerCamelCase , pad_to_multiple_of=__lowerCamelCase , return_attention_mask=__lowerCamelCase , return_overflowing_tokens=__lowerCamelCase , return_special_tokens_mask=__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , return_token_type_ids=__lowerCamelCase , return_length=__lowerCamelCase , verbose=__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase , )
SCREAMING_SNAKE_CASE = qformer_text_encoding.pop("input_ids" )
SCREAMING_SNAKE_CASE = qformer_text_encoding.pop("attention_mask" )
if images is not None:
SCREAMING_SNAKE_CASE = self.image_processor(__lowerCamelCase , return_tensors=__lowerCamelCase )
encoding.update(__lowerCamelCase )
return encoding
def _snake_case ( self : List[str] , *__lowerCamelCase : Union[str, Any] , **__lowerCamelCase : Union[str, Any] ):
return self.tokenizer.batch_decode(*__lowerCamelCase , **__lowerCamelCase )
def _snake_case ( self : Union[str, Any] , *__lowerCamelCase : Union[str, Any] , **__lowerCamelCase : Any ):
return self.tokenizer.decode(*__lowerCamelCase , **__lowerCamelCase )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def _snake_case ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE = self.tokenizer.model_input_names
SCREAMING_SNAKE_CASE = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
def _snake_case ( self : str , __lowerCamelCase : Union[str, Any] , **__lowerCamelCase : Optional[int] ):
if os.path.isfile(__lowerCamelCase ):
raise ValueError(f"Provided path ({save_directory}) should be a directory, not a file" )
os.makedirs(__lowerCamelCase , exist_ok=__lowerCamelCase )
SCREAMING_SNAKE_CASE = os.path.join(__lowerCamelCase , "qformer_tokenizer" )
self.qformer_tokenizer.save_pretrained(__lowerCamelCase )
return super().save_pretrained(__lowerCamelCase , **__lowerCamelCase )
@classmethod
def _snake_case ( cls : Union[str, Any] , __lowerCamelCase : Tuple , **__lowerCamelCase : Optional[Any] ):
SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(__lowerCamelCase , subfolder="qformer_tokenizer" )
SCREAMING_SNAKE_CASE = cls._get_arguments_from_pretrained(__lowerCamelCase , **__lowerCamelCase )
args.append(__lowerCamelCase )
return cls(*__lowerCamelCase )
| 16
|
from __future__ import annotations
__A : str = list[tuple[int, int]]
__A : Optional[int] = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
__A : List[str] = ([-1, 0], [0, -1], [1, 0], [0, 1]) # up, left, down, right
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : str , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : float , __lowerCamelCase : Node | None , ):
SCREAMING_SNAKE_CASE = pos_x
SCREAMING_SNAKE_CASE = pos_y
SCREAMING_SNAKE_CASE = (pos_y, pos_x)
SCREAMING_SNAKE_CASE = goal_x
SCREAMING_SNAKE_CASE = goal_y
SCREAMING_SNAKE_CASE = g_cost
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = self.calculate_heuristic()
def _snake_case ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE = abs(self.pos_x - self.goal_x )
SCREAMING_SNAKE_CASE = abs(self.pos_y - self.goal_y )
return dx + dy
def __lt__( self : Union[str, Any] , __lowerCamelCase : List[Any] ):
return self.f_cost < other.f_cost
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : Optional[int] , __lowerCamelCase : tuple[int, int] , __lowerCamelCase : tuple[int, int] ):
SCREAMING_SNAKE_CASE = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , __lowerCamelCase )
SCREAMING_SNAKE_CASE = Node(goal[1] , goal[0] , goal[1] , goal[0] , 99999 , __lowerCamelCase )
SCREAMING_SNAKE_CASE = [self.start]
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = False
def _snake_case ( self : Optional[Any] ):
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
SCREAMING_SNAKE_CASE = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
SCREAMING_SNAKE_CASE = True
return self.retrace_path(__lowerCamelCase )
self.closed_nodes.append(__lowerCamelCase )
SCREAMING_SNAKE_CASE = self.get_successors(__lowerCamelCase )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(__lowerCamelCase )
else:
# retrieve the best current path
SCREAMING_SNAKE_CASE = self.open_nodes.pop(self.open_nodes.index(__lowerCamelCase ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(__lowerCamelCase )
else:
self.open_nodes.append(__lowerCamelCase )
if not self.reached:
return [self.start.pos]
return None
def _snake_case ( self : List[Any] , __lowerCamelCase : Node ):
SCREAMING_SNAKE_CASE = []
for action in delta:
SCREAMING_SNAKE_CASE = parent.pos_x + action[1]
SCREAMING_SNAKE_CASE = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(__lowerCamelCase ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
__lowerCamelCase , __lowerCamelCase , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , __lowerCamelCase , ) )
return successors
def _snake_case ( self : str , __lowerCamelCase : Node | None ):
SCREAMING_SNAKE_CASE = node
SCREAMING_SNAKE_CASE = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
SCREAMING_SNAKE_CASE = current_node.parent
path.reverse()
return path
if __name__ == "__main__":
__A : Optional[Any] = (0, 0)
__A : Optional[int] = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
print('------')
__A : List[str] = GreedyBestFirst(init, goal)
__A : Tuple = greedy_bf.search()
if path:
for pos_x, pos_y in path:
__A : Optional[Any] = 2
for elem in grid:
print(elem)
| 16
| 1
|
import contextlib
import csv
import json
import os
import sqlitea
import tarfile
import textwrap
import zipfile
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
import datasets
import datasets.config
@pytest.fixture(scope="""session""" )
def lowerCAmelCase__ ( ):
"""simple docstring"""
snake_case__ : int = 10
snake_case__ : List[str] = datasets.Features(
{
"""tokens""": datasets.Sequence(datasets.Value("""string""" ) ),
"""labels""": datasets.Sequence(datasets.ClassLabel(names=["""negative""", """positive"""] ) ),
"""answers""": datasets.Sequence(
{
"""text""": datasets.Value("""string""" ),
"""answer_start""": datasets.Value("""int32""" ),
} ),
"""id""": datasets.Value("""int64""" ),
} )
snake_case__ : Optional[Any] = datasets.Dataset.from_dict(
{
"""tokens""": [["""foo"""] * 5] * n,
"""labels""": [[1] * 5] * n,
"""answers""": [{"""answer_start""": [97], """text""": ["""1976"""]}] * 10,
"""id""": list(range(lowerCamelCase_ ) ),
} , features=lowerCamelCase_ , )
return dataset
@pytest.fixture(scope="""session""" )
def lowerCAmelCase__ ( UpperCAmelCase , UpperCAmelCase ):
"""simple docstring"""
snake_case__ : Dict = str(tmp_path_factory.mktemp("""data""" ) / """file.arrow""" )
dataset.map(cache_file_name=lowerCamelCase_ )
return filename
# FILE_CONTENT + files
lowerCAmelCase__ = '\\n Text data.\n Second line of data.'
@pytest.fixture(scope="""session""" )
def lowerCAmelCase__ ( UpperCAmelCase ):
"""simple docstring"""
snake_case__ : Dict = tmp_path_factory.mktemp("""data""" ) / """file.txt"""
snake_case__ : Dict = FILE_CONTENT
with open(lowerCamelCase_ , """w""" ) as f:
f.write(lowerCamelCase_ )
return filename
@pytest.fixture(scope="""session""" )
def lowerCAmelCase__ ( UpperCAmelCase ):
"""simple docstring"""
import bza
snake_case__ : Optional[Any] = tmp_path_factory.mktemp("""data""" ) / """file.txt.bz2"""
snake_case__ : Dict = bytes(lowerCamelCase_ , """utf-8""" )
with bza.open(lowerCamelCase_ , """wb""" ) as f:
f.write(lowerCamelCase_ )
return path
@pytest.fixture(scope="""session""" )
def lowerCAmelCase__ ( UpperCAmelCase ):
"""simple docstring"""
import gzip
snake_case__ : List[str] = str(tmp_path_factory.mktemp("""data""" ) / """file.txt.gz""" )
snake_case__ : List[Any] = bytes(lowerCamelCase_ , """utf-8""" )
with gzip.open(lowerCamelCase_ , """wb""" ) as f:
f.write(lowerCamelCase_ )
return path
@pytest.fixture(scope="""session""" )
def lowerCAmelCase__ ( UpperCAmelCase ):
"""simple docstring"""
if datasets.config.LZ4_AVAILABLE:
import lza.frame
snake_case__ : int = tmp_path_factory.mktemp("""data""" ) / """file.txt.lz4"""
snake_case__ : Optional[Any] = bytes(lowerCamelCase_ , """utf-8""" )
with lza.frame.open(lowerCamelCase_ , """wb""" ) as f:
f.write(lowerCamelCase_ )
return path
@pytest.fixture(scope="""session""" )
def lowerCAmelCase__ ( UpperCAmelCase , UpperCAmelCase ):
"""simple docstring"""
if datasets.config.PY7ZR_AVAILABLE:
import pyazr
snake_case__ : Union[str, Any] = tmp_path_factory.mktemp("""data""" ) / """file.txt.7z"""
with pyazr.SevenZipFile(lowerCamelCase_ , """w""" ) as archive:
archive.write(lowerCamelCase_ , arcname=os.path.basename(lowerCamelCase_ ) )
return path
@pytest.fixture(scope="""session""" )
def lowerCAmelCase__ ( UpperCAmelCase , UpperCAmelCase ):
"""simple docstring"""
import tarfile
snake_case__ : Optional[int] = tmp_path_factory.mktemp("""data""" ) / """file.txt.tar"""
with tarfile.TarFile(lowerCamelCase_ , """w""" ) as f:
f.add(lowerCamelCase_ , arcname=os.path.basename(lowerCamelCase_ ) )
return path
@pytest.fixture(scope="""session""" )
def lowerCAmelCase__ ( UpperCAmelCase ):
"""simple docstring"""
import lzma
snake_case__ : List[Any] = tmp_path_factory.mktemp("""data""" ) / """file.txt.xz"""
snake_case__ : List[Any] = bytes(lowerCamelCase_ , """utf-8""" )
with lzma.open(lowerCamelCase_ , """wb""" ) as f:
f.write(lowerCamelCase_ )
return path
@pytest.fixture(scope="""session""" )
def lowerCAmelCase__ ( UpperCAmelCase , UpperCAmelCase ):
"""simple docstring"""
import zipfile
snake_case__ : List[Any] = tmp_path_factory.mktemp("""data""" ) / """file.txt.zip"""
with zipfile.ZipFile(lowerCamelCase_ , """w""" ) as f:
f.write(lowerCamelCase_ , arcname=os.path.basename(lowerCamelCase_ ) )
return path
@pytest.fixture(scope="""session""" )
def lowerCAmelCase__ ( UpperCAmelCase ):
"""simple docstring"""
if datasets.config.ZSTANDARD_AVAILABLE:
import zstandard as zstd
snake_case__ : List[str] = tmp_path_factory.mktemp("""data""" ) / """file.txt.zst"""
snake_case__ : List[str] = bytes(lowerCamelCase_ , """utf-8""" )
with zstd.open(lowerCamelCase_ , """wb""" ) as f:
f.write(lowerCamelCase_ )
return path
@pytest.fixture(scope="""session""" )
def lowerCAmelCase__ ( UpperCAmelCase ):
"""simple docstring"""
snake_case__ : Optional[int] = tmp_path_factory.mktemp("""data""" ) / """file.xml"""
snake_case__ : str = textwrap.dedent(
"""\
<?xml version=\"1.0\" encoding=\"UTF-8\" ?>
<tmx version=\"1.4\">
<header segtype=\"sentence\" srclang=\"ca\" />
<body>
<tu>
<tuv xml:lang=\"ca\"><seg>Contingut 1</seg></tuv>
<tuv xml:lang=\"en\"><seg>Content 1</seg></tuv>
</tu>
<tu>
<tuv xml:lang=\"ca\"><seg>Contingut 2</seg></tuv>
<tuv xml:lang=\"en\"><seg>Content 2</seg></tuv>
</tu>
<tu>
<tuv xml:lang=\"ca\"><seg>Contingut 3</seg></tuv>
<tuv xml:lang=\"en\"><seg>Content 3</seg></tuv>
</tu>
<tu>
<tuv xml:lang=\"ca\"><seg>Contingut 4</seg></tuv>
<tuv xml:lang=\"en\"><seg>Content 4</seg></tuv>
</tu>
<tu>
<tuv xml:lang=\"ca\"><seg>Contingut 5</seg></tuv>
<tuv xml:lang=\"en\"><seg>Content 5</seg></tuv>
</tu>
</body>
</tmx>""" )
with open(lowerCamelCase_ , """w""" ) as f:
f.write(lowerCamelCase_ )
return filename
lowerCAmelCase__ = [
{'col_1': '0', 'col_2': 0, 'col_3': 0.0},
{'col_1': '1', 'col_2': 1, 'col_3': 1.0},
{'col_1': '2', 'col_2': 2, 'col_3': 2.0},
{'col_1': '3', 'col_2': 3, 'col_3': 3.0},
]
lowerCAmelCase__ = [
{'col_1': '4', 'col_2': 4, 'col_3': 4.0},
{'col_1': '5', 'col_2': 5, 'col_3': 5.0},
]
lowerCAmelCase__ = {
'col_1': ['0', '1', '2', '3'],
'col_2': [0, 1, 2, 3],
'col_3': [0.0, 1.0, 2.0, 3.0],
}
lowerCAmelCase__ = [
{'col_3': 0.0, 'col_1': '0', 'col_2': 0},
{'col_3': 1.0, 'col_1': '1', 'col_2': 1},
]
lowerCAmelCase__ = [
{'col_1': 's0', 'col_2': 0, 'col_3': 0.0},
{'col_1': 's1', 'col_2': 1, 'col_3': 1.0},
{'col_1': 's2', 'col_2': 2, 'col_3': 2.0},
{'col_1': 's3', 'col_2': 3, 'col_3': 3.0},
]
@pytest.fixture(scope="""session""" )
def lowerCAmelCase__ ( ):
"""simple docstring"""
return DATA_DICT_OF_LISTS
@pytest.fixture(scope="""session""" )
def lowerCAmelCase__ ( UpperCAmelCase ):
"""simple docstring"""
snake_case__ : str = datasets.Dataset.from_dict(lowerCamelCase_ )
snake_case__ : List[str] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.arrow""" )
dataset.map(cache_file_name=lowerCamelCase_ )
return path
@pytest.fixture(scope="""session""" )
def lowerCAmelCase__ ( UpperCAmelCase ):
"""simple docstring"""
snake_case__ : Tuple = str(tmp_path_factory.mktemp("""data""" ) / """dataset.sqlite""" )
with contextlib.closing(sqlitea.connect(lowerCamelCase_ ) ) as con:
snake_case__ : str = con.cursor()
cur.execute("""CREATE TABLE dataset(col_1 text, col_2 int, col_3 real)""" )
for item in DATA:
cur.execute("""INSERT INTO dataset(col_1, col_2, col_3) VALUES (?, ?, ?)""" , tuple(item.values() ) )
con.commit()
return path
@pytest.fixture(scope="""session""" )
def lowerCAmelCase__ ( UpperCAmelCase ):
"""simple docstring"""
snake_case__ : Optional[Any] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.csv""" )
with open(lowerCamelCase_ , """w""" , newline="""""" ) as f:
snake_case__ : Any = csv.DictWriter(lowerCamelCase_ , fieldnames=["""col_1""", """col_2""", """col_3"""] )
writer.writeheader()
for item in DATA:
writer.writerow(lowerCamelCase_ )
return path
@pytest.fixture(scope="""session""" )
def lowerCAmelCase__ ( UpperCAmelCase ):
"""simple docstring"""
snake_case__ : Optional[int] = str(tmp_path_factory.mktemp("""data""" ) / """dataset2.csv""" )
with open(lowerCamelCase_ , """w""" , newline="""""" ) as f:
snake_case__ : List[Any] = csv.DictWriter(lowerCamelCase_ , fieldnames=["""col_1""", """col_2""", """col_3"""] )
writer.writeheader()
for item in DATA:
writer.writerow(lowerCamelCase_ )
return path
@pytest.fixture(scope="""session""" )
def lowerCAmelCase__ ( UpperCAmelCase , UpperCAmelCase ):
"""simple docstring"""
import bza
snake_case__ : Any = tmp_path_factory.mktemp("""data""" ) / """dataset.csv.bz2"""
with open(lowerCamelCase_ , """rb""" ) as f:
snake_case__ : List[Any] = f.read()
# data = bytes(FILE_CONTENT, "utf-8")
with bza.open(lowerCamelCase_ , """wb""" ) as f:
f.write(lowerCamelCase_ )
return path
@pytest.fixture(scope="""session""" )
def lowerCAmelCase__ ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
"""simple docstring"""
snake_case__ : Any = tmp_path_factory.mktemp("""data""" ) / """dataset.csv.zip"""
with zipfile.ZipFile(lowerCamelCase_ , """w""" ) as f:
f.write(lowerCamelCase_ , arcname=os.path.basename(lowerCamelCase_ ) )
f.write(lowerCamelCase_ , arcname=os.path.basename(lowerCamelCase_ ) )
return path
@pytest.fixture(scope="""session""" )
def lowerCAmelCase__ ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
"""simple docstring"""
snake_case__ : Optional[Any] = tmp_path_factory.mktemp("""data""" ) / """dataset.csv.zip"""
with zipfile.ZipFile(lowerCamelCase_ , """w""" ) as f:
f.write(lowerCamelCase_ , arcname=os.path.basename(csv_path.replace(""".csv""" , """.CSV""" ) ) )
f.write(lowerCamelCase_ , arcname=os.path.basename(csva_path.replace(""".csv""" , """.CSV""" ) ) )
return path
@pytest.fixture(scope="""session""" )
def lowerCAmelCase__ ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
"""simple docstring"""
snake_case__ : List[str] = tmp_path_factory.mktemp("""data""" ) / """dataset_with_dir.csv.zip"""
with zipfile.ZipFile(lowerCamelCase_ , """w""" ) as f:
f.write(lowerCamelCase_ , arcname=os.path.join("""main_dir""" , os.path.basename(lowerCamelCase_ ) ) )
f.write(lowerCamelCase_ , arcname=os.path.join("""main_dir""" , os.path.basename(lowerCamelCase_ ) ) )
return path
@pytest.fixture(scope="""session""" )
def lowerCAmelCase__ ( UpperCAmelCase ):
"""simple docstring"""
snake_case__ : Dict = str(tmp_path_factory.mktemp("""data""" ) / """dataset.parquet""" )
snake_case__ : str = pa.schema(
{
"""col_1""": pa.string(),
"""col_2""": pa.intaa(),
"""col_3""": pa.floataa(),
} )
with open(lowerCamelCase_ , """wb""" ) as f:
snake_case__ : Tuple = pq.ParquetWriter(lowerCamelCase_ , schema=lowerCamelCase_ )
snake_case__ : Tuple = pa.Table.from_pydict({k: [DATA[i][k] for i in range(len(lowerCamelCase_ ) )] for k in DATA[0]} , schema=lowerCamelCase_ )
writer.write_table(lowerCamelCase_ )
writer.close()
return path
@pytest.fixture(scope="""session""" )
def lowerCAmelCase__ ( UpperCAmelCase ):
"""simple docstring"""
snake_case__ : str = str(tmp_path_factory.mktemp("""data""" ) / """dataset.json""" )
snake_case__ : Any = {"""data""": DATA}
with open(lowerCamelCase_ , """w""" ) as f:
json.dump(lowerCamelCase_ , lowerCamelCase_ )
return path
@pytest.fixture(scope="""session""" )
def lowerCAmelCase__ ( UpperCAmelCase ):
"""simple docstring"""
snake_case__ : Tuple = str(tmp_path_factory.mktemp("""data""" ) / """dataset.json""" )
snake_case__ : str = {"""data""": DATA_DICT_OF_LISTS}
with open(lowerCamelCase_ , """w""" ) as f:
json.dump(lowerCamelCase_ , lowerCamelCase_ )
return path
@pytest.fixture(scope="""session""" )
def lowerCAmelCase__ ( UpperCAmelCase ):
"""simple docstring"""
snake_case__ : str = str(tmp_path_factory.mktemp("""data""" ) / """dataset.jsonl""" )
with open(lowerCamelCase_ , """w""" ) as f:
for item in DATA:
f.write(json.dumps(lowerCamelCase_ ) + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def lowerCAmelCase__ ( UpperCAmelCase ):
"""simple docstring"""
snake_case__ : List[Any] = str(tmp_path_factory.mktemp("""data""" ) / """dataset2.jsonl""" )
with open(lowerCamelCase_ , """w""" ) as f:
for item in DATA:
f.write(json.dumps(lowerCamelCase_ ) + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def lowerCAmelCase__ ( UpperCAmelCase ):
"""simple docstring"""
snake_case__ : Dict = str(tmp_path_factory.mktemp("""data""" ) / """dataset_312.jsonl""" )
with open(lowerCamelCase_ , """w""" ) as f:
for item in DATA_312:
f.write(json.dumps(lowerCamelCase_ ) + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def lowerCAmelCase__ ( UpperCAmelCase ):
"""simple docstring"""
snake_case__ : Tuple = str(tmp_path_factory.mktemp("""data""" ) / """dataset-str.jsonl""" )
with open(lowerCamelCase_ , """w""" ) as f:
for item in DATA_STR:
f.write(json.dumps(lowerCamelCase_ ) + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def lowerCAmelCase__ ( UpperCAmelCase , UpperCAmelCase ):
"""simple docstring"""
import gzip
snake_case__ : int = str(tmp_path_factory.mktemp("""data""" ) / """dataset.txt.gz""" )
with open(lowerCamelCase_ , """rb""" ) as orig_file:
with gzip.open(lowerCamelCase_ , """wb""" ) as zipped_file:
zipped_file.writelines(lowerCamelCase_ )
return path
@pytest.fixture(scope="""session""" )
def lowerCAmelCase__ ( UpperCAmelCase , UpperCAmelCase ):
"""simple docstring"""
import gzip
snake_case__ : Tuple = str(tmp_path_factory.mktemp("""data""" ) / """dataset.jsonl.gz""" )
with open(lowerCamelCase_ , """rb""" ) as orig_file:
with gzip.open(lowerCamelCase_ , """wb""" ) as zipped_file:
zipped_file.writelines(lowerCamelCase_ )
return path
@pytest.fixture(scope="""session""" )
def lowerCAmelCase__ ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
"""simple docstring"""
snake_case__ : List[str] = tmp_path_factory.mktemp("""data""" ) / """dataset.jsonl.zip"""
with zipfile.ZipFile(lowerCamelCase_ , """w""" ) as f:
f.write(lowerCamelCase_ , arcname=os.path.basename(lowerCamelCase_ ) )
f.write(lowerCamelCase_ , arcname=os.path.basename(lowerCamelCase_ ) )
return path
@pytest.fixture(scope="""session""" )
def lowerCAmelCase__ ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
"""simple docstring"""
snake_case__ : Union[str, Any] = tmp_path_factory.mktemp("""data""" ) / """dataset_nested.jsonl.zip"""
with zipfile.ZipFile(lowerCamelCase_ , """w""" ) as f:
f.write(lowerCamelCase_ , arcname=os.path.join("""nested""" , os.path.basename(lowerCamelCase_ ) ) )
return path
@pytest.fixture(scope="""session""" )
def lowerCAmelCase__ ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
"""simple docstring"""
snake_case__ : str = tmp_path_factory.mktemp("""data""" ) / """dataset_with_dir.jsonl.zip"""
with zipfile.ZipFile(lowerCamelCase_ , """w""" ) as f:
f.write(lowerCamelCase_ , arcname=os.path.join("""main_dir""" , os.path.basename(lowerCamelCase_ ) ) )
f.write(lowerCamelCase_ , arcname=os.path.join("""main_dir""" , os.path.basename(lowerCamelCase_ ) ) )
return path
@pytest.fixture(scope="""session""" )
def lowerCAmelCase__ ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
"""simple docstring"""
snake_case__ : List[Any] = tmp_path_factory.mktemp("""data""" ) / """dataset.jsonl.tar"""
with tarfile.TarFile(lowerCamelCase_ , """w""" ) as f:
f.add(lowerCamelCase_ , arcname=os.path.basename(lowerCamelCase_ ) )
f.add(lowerCamelCase_ , arcname=os.path.basename(lowerCamelCase_ ) )
return path
@pytest.fixture(scope="""session""" )
def lowerCAmelCase__ ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
"""simple docstring"""
snake_case__ : int = tmp_path_factory.mktemp("""data""" ) / """dataset_nested.jsonl.tar"""
with tarfile.TarFile(lowerCamelCase_ , """w""" ) as f:
f.add(lowerCamelCase_ , arcname=os.path.join("""nested""" , os.path.basename(lowerCamelCase_ ) ) )
return path
@pytest.fixture(scope="""session""" )
def lowerCAmelCase__ ( UpperCAmelCase ):
"""simple docstring"""
snake_case__ : Dict = ["""0""", """1""", """2""", """3"""]
snake_case__ : Optional[int] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.txt""" )
with open(lowerCamelCase_ , """w""" ) as f:
for item in data:
f.write(item + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def lowerCAmelCase__ ( UpperCAmelCase ):
"""simple docstring"""
snake_case__ : Dict = ["""0""", """1""", """2""", """3"""]
snake_case__ : Dict = str(tmp_path_factory.mktemp("""data""" ) / """dataset2.txt""" )
with open(lowerCamelCase_ , """w""" ) as f:
for item in data:
f.write(item + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def lowerCAmelCase__ ( UpperCAmelCase ):
"""simple docstring"""
snake_case__ : List[Any] = ["""0""", """1""", """2""", """3"""]
snake_case__ : Union[str, Any] = tmp_path_factory.mktemp("""data""" ) / """dataset.abc"""
with open(lowerCamelCase_ , """w""" ) as f:
for item in data:
f.write(item + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def lowerCAmelCase__ ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
"""simple docstring"""
snake_case__ : Optional[Any] = tmp_path_factory.mktemp("""data""" ) / """dataset.text.zip"""
with zipfile.ZipFile(lowerCamelCase_ , """w""" ) as f:
f.write(lowerCamelCase_ , arcname=os.path.basename(lowerCamelCase_ ) )
f.write(lowerCamelCase_ , arcname=os.path.basename(lowerCamelCase_ ) )
return path
@pytest.fixture(scope="""session""" )
def lowerCAmelCase__ ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
"""simple docstring"""
snake_case__ : Tuple = tmp_path_factory.mktemp("""data""" ) / """dataset_with_dir.text.zip"""
with zipfile.ZipFile(lowerCamelCase_ , """w""" ) as f:
f.write(lowerCamelCase_ , arcname=os.path.join("""main_dir""" , os.path.basename(lowerCamelCase_ ) ) )
f.write(lowerCamelCase_ , arcname=os.path.join("""main_dir""" , os.path.basename(lowerCamelCase_ ) ) )
return path
@pytest.fixture(scope="""session""" )
def lowerCAmelCase__ ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
"""simple docstring"""
snake_case__ : Tuple = tmp_path_factory.mktemp("""data""" ) / """dataset.ext.zip"""
with zipfile.ZipFile(lowerCamelCase_ , """w""" ) as f:
f.write(lowerCamelCase_ , arcname=os.path.basename("""unsupported.ext""" ) )
f.write(lowerCamelCase_ , arcname=os.path.basename("""unsupported_2.ext""" ) )
return path
@pytest.fixture(scope="""session""" )
def lowerCAmelCase__ ( UpperCAmelCase ):
"""simple docstring"""
snake_case__ : Dict = """\n""".join(["""First""", """Second\u2029with Unicode new line""", """Third"""] )
snake_case__ : Dict = str(tmp_path_factory.mktemp("""data""" ) / """dataset_with_unicode_new_lines.txt""" )
with open(lowerCamelCase_ , """w""" , encoding="""utf-8""" ) as f:
f.write(lowerCamelCase_ )
return path
@pytest.fixture(scope="""session""" )
def lowerCAmelCase__ ( ):
"""simple docstring"""
return os.path.join("""tests""" , """features""" , """data""" , """test_image_rgb.jpg""" )
@pytest.fixture(scope="""session""" )
def lowerCAmelCase__ ( ):
"""simple docstring"""
return os.path.join("""tests""" , """features""" , """data""" , """test_audio_44100.wav""" )
@pytest.fixture(scope="""session""" )
def lowerCAmelCase__ ( UpperCAmelCase , UpperCAmelCase ):
"""simple docstring"""
snake_case__ : List[Any] = tmp_path_factory.mktemp("""data""" ) / """dataset.img.zip"""
with zipfile.ZipFile(lowerCamelCase_ , """w""" ) as f:
f.write(lowerCamelCase_ , arcname=os.path.basename(lowerCamelCase_ ) )
f.write(lowerCamelCase_ , arcname=os.path.basename(lowerCamelCase_ ).replace(""".jpg""" , """2.jpg""" ) )
return path
@pytest.fixture(scope="""session""" )
def lowerCAmelCase__ ( UpperCAmelCase ):
"""simple docstring"""
snake_case__ : Optional[Any] = tmp_path_factory.mktemp("""data_dir""" )
(data_dir / "subdir").mkdir()
with open(data_dir / """subdir""" / """train.txt""" , """w""" ) as f:
f.write("""foo\n""" * 10 )
with open(data_dir / """subdir""" / """test.txt""" , """w""" ) as f:
f.write("""bar\n""" * 10 )
# hidden file
with open(data_dir / """subdir""" / """.test.txt""" , """w""" ) as f:
f.write("""bar\n""" * 10 )
# hidden directory
(data_dir / ".subdir").mkdir()
with open(data_dir / """.subdir""" / """train.txt""" , """w""" ) as f:
f.write("""foo\n""" * 10 )
with open(data_dir / """.subdir""" / """test.txt""" , """w""" ) as f:
f.write("""bar\n""" * 10 )
return data_dir
| 712
|
'''simple docstring'''
import doctest
import logging
import os
import unittest
from pathlib import Path
from typing import List, Union
import transformers
from transformers.testing_utils import require_tf, require_torch, slow
lowerCAmelCase__ = logging.getLogger()
@unittest.skip('Temporarily disable the doc tests.' )
@require_torch
@require_tf
@slow
class _A ( unittest.TestCase ):
'''simple docstring'''
def __lowerCAmelCase ( self : List[str] , lowerCamelCase : Path , lowerCamelCase : Union[str, None] = None , lowerCamelCase : Union[List[str], None] = None , lowerCamelCase : Union[str, List[str], None] = None , lowerCamelCase : bool = True , )-> Dict:
snake_case__ : int = [file for file in os.listdir(lowerCamelCase ) if os.path.isfile(os.path.join(lowerCamelCase , lowerCamelCase ) )]
if identifier is not None:
snake_case__ : List[Any] = [file for file in files if identifier in file]
if n_identifier is not None:
if isinstance(lowerCamelCase , lowerCamelCase ):
for n_ in n_identifier:
snake_case__ : Union[str, Any] = [file for file in files if n_ not in file]
else:
snake_case__ : Optional[Any] = [file for file in files if n_identifier not in file]
snake_case__ : Tuple = ignore_files or []
ignore_files.append("""__init__.py""" )
snake_case__ : int = [file for file in files if file not in ignore_files]
for file in files:
# Open all files
print("""Testing""" , lowerCamelCase )
if only_modules:
snake_case__ : Union[str, Any] = file.split(""".""" )[0]
try:
snake_case__ : Any = getattr(lowerCamelCase , lowerCamelCase )
snake_case__ : Optional[Any] = doctest.DocTestSuite(lowerCamelCase )
snake_case__ : int = unittest.TextTestRunner().run(lowerCamelCase )
self.assertIs(len(result.failures ) , 0 )
except AttributeError:
logger.info(F"""{module_identifier} is not a module.""" )
else:
snake_case__ : List[Any] = doctest.testfile(str("""..""" / directory / file ) , optionflags=doctest.ELLIPSIS )
self.assertIs(result.failed , 0 )
def __lowerCAmelCase ( self : Tuple )-> List[str]:
snake_case__ : Optional[int] = Path("""src/transformers""" )
snake_case__ : Optional[Any] = """modeling"""
snake_case__ : Optional[Any] = [
"""modeling_ctrl.py""",
"""modeling_tf_ctrl.py""",
]
self.analyze_directory(lowerCamelCase , identifier=lowerCamelCase , ignore_files=lowerCamelCase )
def __lowerCAmelCase ( self : List[str] )-> Union[str, Any]:
snake_case__ : Optional[Any] = Path("""src/transformers""" )
snake_case__ : Any = """tokenization"""
self.analyze_directory(lowerCamelCase , identifier=lowerCamelCase )
def __lowerCAmelCase ( self : Dict )-> Dict:
snake_case__ : Any = Path("""src/transformers""" )
snake_case__ : List[Any] = """configuration"""
self.analyze_directory(lowerCamelCase , identifier=lowerCamelCase )
def __lowerCAmelCase ( self : Dict )-> Tuple:
snake_case__ : int = Path("""src/transformers""" )
snake_case__ : int = ["""configuration""", """modeling""", """tokenization"""]
self.analyze_directory(lowerCamelCase , n_identifier=lowerCamelCase )
def __lowerCAmelCase ( self : Union[str, Any] )-> Tuple:
snake_case__ : List[Any] = Path("""docs/source""" )
snake_case__ : Optional[int] = ["""favicon.ico"""]
self.analyze_directory(lowerCamelCase , ignore_files=lowerCamelCase , only_modules=lowerCamelCase )
| 172
| 0
|
"""simple docstring"""
def A__ ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
"""simple docstring"""
global f # a global dp table for knapsack
if f[i][j] < 0:
if j < wt[i - 1]:
_lowerCAmelCase = mf_knapsack(i - 1, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
else:
_lowerCAmelCase = max(
mf_knapsack(i - 1, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ), mf_knapsack(i - 1, UpperCAmelCase__, UpperCAmelCase__, j - wt[i - 1] ) + val[i - 1], )
_lowerCAmelCase = val
return f[i][j]
def A__ ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
"""simple docstring"""
_lowerCAmelCase = [[0] * (w + 1) for _ in range(n + 1 )]
for i in range(1, n + 1 ):
for w_ in range(1, w + 1 ):
if wt[i - 1] <= w_:
_lowerCAmelCase = max(val[i - 1] + dp[i - 1][w_ - wt[i - 1]], dp[i - 1][w_] )
else:
_lowerCAmelCase = dp[i - 1][w_]
return dp[n][w_], dp
def A__ ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
"""simple docstring"""
if not (isinstance(UpperCAmelCase__, (list, tuple) ) and isinstance(UpperCAmelCase__, (list, tuple) )):
raise ValueError(
'Both the weights and values vectors must be either lists or tuples' )
_lowerCAmelCase = len(UpperCAmelCase__ )
if num_items != len(UpperCAmelCase__ ):
_lowerCAmelCase = (
'The number of weights must be the same as the number of values.\n'
F'''But got {num_items} weights and {len(UpperCAmelCase__ )} values'''
)
raise ValueError(UpperCAmelCase__ )
for i in range(UpperCAmelCase__ ):
if not isinstance(wt[i], UpperCAmelCase__ ):
_lowerCAmelCase = (
'All weights must be integers but got weight of '
F'''type {type(wt[i] )} at index {i}'''
)
raise TypeError(UpperCAmelCase__ )
_lowerCAmelCase = knapsack(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
_lowerCAmelCase = set()
_construct_solution(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
return optimal_val, example_optional_set
def A__ ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
"""simple docstring"""
# for the current item i at a maximum weight j to be part of an optimal subset,
# the optimal value at (i, j) must be greater than the optimal value at (i-1, j).
# where i - 1 means considering only the previous items at the given maximum weight
if i > 0 and j > 0:
if dp[i - 1][j] == dp[i][j]:
_construct_solution(UpperCAmelCase__, UpperCAmelCase__, i - 1, UpperCAmelCase__, UpperCAmelCase__ )
else:
optimal_set.add(UpperCAmelCase__ )
_construct_solution(UpperCAmelCase__, UpperCAmelCase__, i - 1, j - wt[i - 1], UpperCAmelCase__ )
if __name__ == "__main__":
a__ : str = [3, 2, 4, 4]
a__ : Union[str, Any] = [4, 3, 2, 3]
a__ : List[Any] = 4
a__ : Any = 6
a__ : List[Any] = [[0] * (w + 1)] + [[0] + [-1] * (w + 1) for _ in range(n + 1)]
a__ , a__ : Tuple = knapsack(w, wt, val, n)
print(optimal_solution)
print(mf_knapsack(n, wt, val, w)) # switched the n and w
# testing the dynamic programming problem with example
# the optimal subset for the above example are items 3 and 4
a__ , a__ : Tuple = knapsack_with_example_solution(w, wt, val)
assert optimal_solution == 8
assert optimal_subset == {3, 4}
print("""optimal_value = """, optimal_solution)
print("""An optimal subset corresponding to the optimal value""", optimal_subset)
| 589
|
'''simple docstring'''
def UpperCAmelCase ( UpperCAmelCase__ : int):
if number < 0:
raise ValueError('number must not be negative')
return number & (number - 1) == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 320
| 0
|
'''simple docstring'''
import argparse
import os
from io import BytesIO
from pathlib import Path
import requests
from clip_retrieval.clip_client import ClipClient
from PIL import Image
from tqdm import tqdm
def __UpperCAmelCase ( SCREAMING_SNAKE_CASE__: Any, SCREAMING_SNAKE_CASE__: Union[str, Any], SCREAMING_SNAKE_CASE__: Tuple ) -> Optional[Any]:
"""simple docstring"""
__a = 1.5
__a = int(factor * num_class_images )
__a = ClipClient(
url='https://knn.laion.ai/knn-service', indice_name='laion_400m', num_images=SCREAMING_SNAKE_CASE__, aesthetic_weight=0.1 )
os.makedirs(f"""{class_data_dir}/images""", exist_ok=SCREAMING_SNAKE_CASE__ )
if len(list(Path(f"""{class_data_dir}/images""" ).iterdir() ) ) >= num_class_images:
return
while True:
__a = client.query(text=SCREAMING_SNAKE_CASE__ )
if len(SCREAMING_SNAKE_CASE__ ) >= factor * num_class_images or num_images > 1e4:
break
else:
__a = int(factor * num_images )
__a = ClipClient(
url='https://knn.laion.ai/knn-service', indice_name='laion_400m', num_images=SCREAMING_SNAKE_CASE__, aesthetic_weight=0.1, )
__a = 0
__a = 0
__a = tqdm(desc='downloading real regularization images', total=SCREAMING_SNAKE_CASE__ )
with open(f"""{class_data_dir}/caption.txt""", 'w' ) as fa, open(f"""{class_data_dir}/urls.txt""", 'w' ) as fa, open(
f"""{class_data_dir}/images.txt""", 'w' ) as fa:
while total < num_class_images:
__a = class_images[count]
count += 1
try:
__a = requests.get(images['url'] )
if img.status_code == 200:
__a = Image.open(BytesIO(img.content ) )
with open(f"""{class_data_dir}/images/{total}.jpg""", 'wb' ) as f:
f.write(img.content )
fa.write(images['caption'] + '\n' )
fa.write(images['url'] + '\n' )
fa.write(f"""{class_data_dir}/images/{total}.jpg""" + '\n' )
total += 1
pbar.update(1 )
else:
continue
except Exception:
continue
return
def __UpperCAmelCase ( ) -> List[str]:
"""simple docstring"""
__a = argparse.ArgumentParser('', add_help=SCREAMING_SNAKE_CASE__ )
parser.add_argument('--class_prompt', help='text prompt to retrieve images', required=SCREAMING_SNAKE_CASE__, type=SCREAMING_SNAKE_CASE__ )
parser.add_argument('--class_data_dir', help='path to save images', required=SCREAMING_SNAKE_CASE__, type=SCREAMING_SNAKE_CASE__ )
parser.add_argument('--num_class_images', help='number of images to download', default=200, type=SCREAMING_SNAKE_CASE__ )
return parser.parse_args()
if __name__ == "__main__":
__UpperCamelCase : Union[str, Any] = parse_args()
retrieve(args.class_prompt, args.class_data_dir, args.num_class_images)
| 707
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_tf_available,
is_torch_available,
)
__UpperCamelCase : Optional[int] = {
"""configuration_speech_to_text""": ["""SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """Speech2TextConfig"""],
"""processing_speech_to_text""": ["""Speech2TextProcessor"""],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Union[str, Any] = ["""Speech2TextTokenizer"""]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Optional[Any] = ["""Speech2TextFeatureExtractor"""]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : List[Any] = [
"""TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFSpeech2TextForConditionalGeneration""",
"""TFSpeech2TextModel""",
"""TFSpeech2TextPreTrainedModel""",
]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Tuple = [
"""SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Speech2TextForConditionalGeneration""",
"""Speech2TextModel""",
"""Speech2TextPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, SpeechaTextConfig
from .processing_speech_to_text import SpeechaTextProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speech_to_text import SpeechaTextTokenizer
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_speech_to_text import SpeechaTextFeatureExtractor
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_speech_to_text import (
TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSpeechaTextForConditionalGeneration,
TFSpeechaTextModel,
TFSpeechaTextPreTrainedModel,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_to_text import (
SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechaTextForConditionalGeneration,
SpeechaTextModel,
SpeechaTextPreTrainedModel,
)
else:
import sys
__UpperCamelCase : List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 270
| 0
|
'''simple docstring'''
def _A ( UpperCAmelCase ):
'''simple docstring'''
A__ = []
A__ = []
A__ = {
'^': 3,
'*': 2,
'/': 2,
'%': 2,
'+': 1,
'-': 1,
} # Priority of each operator
A__ = len(UpperCAmelCase ) if (len(UpperCAmelCase ) > 7) else 7
# Print table header for output
print(
'Symbol'.center(8 ) ,'Stack'.center(UpperCAmelCase ) ,'Postfix'.center(UpperCAmelCase ) ,sep=' | ' ,)
print('-' * (print_width * 3 + 7) )
for x in infix:
if x.isalpha() or x.isdigit():
post_fix.append(UpperCAmelCase ) # if x is Alphabet / Digit, add it to Postfix
elif x == "(":
stack.append(UpperCAmelCase ) # if x is "(" push to Stack
elif x == ")": # if x is ")" pop stack until "(" is encountered
while stack[-1] != "(":
post_fix.append(stack.pop() ) # Pop stack & add the content to Postfix
stack.pop()
else:
if len(UpperCAmelCase ) == 0:
stack.append(UpperCAmelCase ) # If stack is empty, push x to stack
else: # while priority of x is not > priority of element in the stack
while len(UpperCAmelCase ) > 0 and priority[x] <= priority[stack[-1]]:
post_fix.append(stack.pop() ) # pop stack & add to Postfix
stack.append(UpperCAmelCase ) # push x to stack
print(
x.center(8 ) ,(''.join(UpperCAmelCase )).ljust(UpperCAmelCase ) ,(''.join(UpperCAmelCase )).ljust(UpperCAmelCase ) ,sep=' | ' ,) # Output in tabular format
while len(UpperCAmelCase ) > 0: # while stack is not empty
post_fix.append(stack.pop() ) # pop stack & add to Postfix
print(
' '.center(8 ) ,(''.join(UpperCAmelCase )).ljust(UpperCAmelCase ) ,(''.join(UpperCAmelCase )).ljust(UpperCAmelCase ) ,sep=' | ' ,) # Output in tabular format
return "".join(UpperCAmelCase ) # return Postfix as str
def _A ( UpperCAmelCase ):
'''simple docstring'''
A__ = list(infix[::-1] ) # reverse the infix equation
for i in range(len(UpperCAmelCase ) ):
if infix[i] == "(":
A__ = ')' # change "(" to ")"
elif infix[i] == ")":
A__ = '(' # change ")" to "("
return (infix_2_postfix(''.join(UpperCAmelCase ) ))[
::-1
] # call infix_2_postfix on Infix, return reverse of Postfix
if __name__ == "__main__":
lowerCAmelCase_ = input('''\nEnter an Infix Equation = ''') # Input an Infix equation
lowerCAmelCase_ = ''''''.join(Infix.split()) # Remove spaces from the input
print('''\n\t''', Infix, '''(Infix) -> ''', infix_2_prefix(Infix), '''(Prefix)''')
| 531
|
'''simple docstring'''
from __future__ import annotations
import unittest
import numpy as np
from transformers import BlipTextConfig
from transformers.testing_utils import require_tf, slow
from transformers.utils import is_tf_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
if is_tf_available():
import tensorflow as tf
from transformers import TFBlipTextModel
from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST
class _snake_case:
def __init__(self : Any , a : str , a : Union[str, Any]=12 , a : List[str]=7 , a : Dict=True , a : Tuple=True , a : Any=True , a : Optional[Any]=99 , a : Optional[Any]=32 , a : Tuple=32 , a : List[Any]=2 , a : str=4 , a : Dict=37 , a : Optional[Any]=0.1 , a : List[Any]=0.1 , a : Dict=5_12 , a : List[Any]=0.02 , a : Any=0 , a : Optional[int]=None , ) -> List[Any]:
"""simple docstring"""
A__ = parent
A__ = batch_size
A__ = seq_length
A__ = is_training
A__ = use_input_mask
A__ = use_labels
A__ = vocab_size
A__ = hidden_size
A__ = projection_dim
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = intermediate_size
A__ = dropout
A__ = attention_dropout
A__ = max_position_embeddings
A__ = initializer_range
A__ = scope
A__ = bos_token_id
def _UpperCamelCase (self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
A__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A__ = None
if self.use_input_mask:
A__ = random_attention_mask([self.batch_size, self.seq_length] )
if input_mask is not None:
A__ = input_mask.numpy()
A__ , A__ = input_mask.shape
A__ = np.random.randint(1 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(a ):
A__ = 1
A__ = 0
A__ = self.get_config()
return config, input_ids, tf.convert_to_tensor(a )
def _UpperCamelCase (self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
return BlipTextConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , projection_dim=self.projection_dim , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , dropout=self.dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , bos_token_id=self.bos_token_id , )
def _UpperCamelCase (self : Optional[int] , a : Any , a : List[Any] , a : Union[str, Any] ) -> Any:
"""simple docstring"""
A__ = TFBlipTextModel(config=a )
A__ = model(a , attention_mask=a , training=a )
A__ = model(a , training=a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _UpperCamelCase (self : str ) -> List[Any]:
"""simple docstring"""
A__ = self.prepare_config_and_inputs()
A__ , A__ , A__ = config_and_inputs
A__ = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class _snake_case( UpperCAmelCase , unittest.TestCase ):
__snake_case: Optional[int] = (TFBlipTextModel,) if is_tf_available() else ()
__snake_case: Union[str, Any] = False
__snake_case: Any = False
__snake_case: Union[str, Any] = False
def _UpperCamelCase (self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
A__ = BlipTextModelTester(self )
A__ = ConfigTester(self , config_class=a , hidden_size=37 )
def _UpperCamelCase (self : Tuple ) -> str:
"""simple docstring"""
self.config_tester.run_common_tests()
def _UpperCamelCase (self : Any ) -> Dict:
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a )
def _UpperCamelCase (self : Dict ) -> Dict:
"""simple docstring"""
pass
def _UpperCamelCase (self : str ) -> int:
"""simple docstring"""
pass
@unittest.skip(reason='Blip does not use inputs_embeds' )
def _UpperCamelCase (self : Union[str, Any] ) -> str:
"""simple docstring"""
pass
@unittest.skip(reason='BlipTextModel has no base class and is not available in MODEL_MAPPING' )
def _UpperCamelCase (self : int ) -> List[Any]:
"""simple docstring"""
pass
@unittest.skip(reason='BlipTextModel has no base class and is not available in MODEL_MAPPING' )
def _UpperCamelCase (self : int ) -> Optional[Any]:
"""simple docstring"""
pass
@slow
def _UpperCamelCase (self : int ) -> Optional[Any]:
"""simple docstring"""
for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ = TFBlipTextModel.from_pretrained(a )
self.assertIsNotNone(a )
def _UpperCamelCase (self : str , a : Optional[int]=True ) -> List[Any]:
"""simple docstring"""
super().test_pt_tf_model_equivalence(allow_missing_keys=a )
| 531
| 1
|
import os
from typing import Optional
import fsspec
from fsspec.archive import AbstractArchiveFileSystem
from fsspec.utils import DEFAULT_BLOCK_SIZE
class UpperCAmelCase__ ( __UpperCamelCase ):
'''simple docstring'''
UpperCamelCase = """"""
UpperCamelCase = (
None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz
)
UpperCamelCase = None # compression type in fsspec. ex: "gzip"
UpperCamelCase = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz
def __init__( self : Optional[int] , a_ : str = "" , a_ : Optional[str] = None , a_ : Optional[dict] = None , **a_ : List[Any] ):
'''simple docstring'''
super().__init__(self , **a_ )
# always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode
__UpperCAmelCase : Optional[int] = fsspec.open(
a_ , mode='''rb''' , protocol=a_ , compression=self.compression , client_kwargs={
'''requote_redirect_url''': False, # see https://github.com/huggingface/datasets/pull/5459
'''trust_env''': True, # Enable reading proxy env variables.
**(target_options or {}).pop('''client_kwargs''' , {} ), # To avoid issues if it was already passed.
} , **(target_options or {}) , )
__UpperCAmelCase : Optional[Any] = os.path.basename(self.file.path.split('''::''' )[0] )
__UpperCAmelCase : Union[str, Any] = (
self.compressed_name[: self.compressed_name.rindex('''.''' )]
if '''.''' in self.compressed_name
else self.compressed_name
)
__UpperCAmelCase : Any = None
@classmethod
def snake_case__ ( cls : List[Any] , a_ : List[str] ):
'''simple docstring'''
return super()._strip_protocol(a_ ).lstrip('''/''' )
def snake_case__ ( self : Optional[Any] ):
'''simple docstring'''
if self.dir_cache is None:
__UpperCAmelCase : int = {**self.file.fs.info(self.file.path ), '''name''': self.uncompressed_name}
__UpperCAmelCase : int = {f['''name''']: f}
def snake_case__ ( self : str , a_ : str ):
'''simple docstring'''
return self.file.open().read()
def snake_case__ ( self : str , a_ : str , a_ : str = "rb" , a_ : str=None , a_ : int=True , a_ : List[str]=None , **a_ : int , ):
'''simple docstring'''
__UpperCAmelCase : int = self._strip_protocol(a_ )
if mode != "rb":
raise ValueError(F'Tried to read with mode {mode} on file {self.file.path} opened with mode \'rb\'' )
return self.file.open()
class UpperCAmelCase__ ( __UpperCamelCase ):
'''simple docstring'''
UpperCamelCase = """bz2"""
UpperCamelCase = """bz2"""
UpperCamelCase = """.bz2"""
class UpperCAmelCase__ ( __UpperCamelCase ):
'''simple docstring'''
UpperCamelCase = """gzip"""
UpperCamelCase = """gzip"""
UpperCamelCase = """.gz"""
class UpperCAmelCase__ ( __UpperCamelCase ):
'''simple docstring'''
UpperCamelCase = """lz4"""
UpperCamelCase = """lz4"""
UpperCamelCase = """.lz4"""
class UpperCAmelCase__ ( __UpperCamelCase ):
'''simple docstring'''
UpperCamelCase = """xz"""
UpperCamelCase = """xz"""
UpperCamelCase = """.xz"""
class UpperCAmelCase__ ( __UpperCamelCase ):
'''simple docstring'''
UpperCamelCase = """zstd"""
UpperCamelCase = """zstd"""
UpperCamelCase = """.zst"""
def __init__( self : Tuple , a_ : str , a_ : str = "rb" , a_ : Optional[str] = None , a_ : Optional[dict] = None , a_ : int = DEFAULT_BLOCK_SIZE , **a_ : Union[str, Any] , ):
'''simple docstring'''
super().__init__(
fo=a_ , mode=a_ , target_protocol=a_ , target_options=a_ , block_size=a_ , **a_ , )
# We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2:
#
# File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open
# out.close = close
# AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only
#
# see https://github.com/intake/filesystem_spec/issues/725
__UpperCAmelCase : List[str] = self.file.__enter__
class UpperCAmelCase__ :
'''simple docstring'''
def __init__( self : Optional[int] , a_ : int ):
'''simple docstring'''
__UpperCAmelCase : List[str] = file_
def __enter__( self : int ):
'''simple docstring'''
self._file.__enter__()
return self
def __exit__( self : Optional[int] , *a_ : Union[str, Any] , **a_ : List[str] ):
'''simple docstring'''
self._file.__exit__(*a_ , **a_ )
def __iter__( self : List[str] ):
'''simple docstring'''
return iter(self._file )
def snake_case__ ( self : Optional[int] ):
'''simple docstring'''
return next(self._file )
def __getattr__( self : Union[str, Any] , a_ : Any ):
'''simple docstring'''
return getattr(self._file , a_ )
def fixed_enter(*a_ : Union[str, Any] , **a_ : Tuple ):
return WrappedFile(_enter(*a_ , **a_ ) )
__UpperCAmelCase : Any = fixed_enter
| 704
|
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFCamembertModel
@require_tf
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def snake_case__ ( self : Tuple ):
'''simple docstring'''
__UpperCAmelCase : Tuple = TFCamembertModel.from_pretrained('''jplu/tf-camembert-base''' )
__UpperCAmelCase : Dict = tf.convert_to_tensor(
[[5, 1_21, 11, 6_60, 16, 7_30, 2_55_43, 1_10, 83, 6]] , dtype=tf.intaa , ) # J'aime le camembert !"
__UpperCAmelCase : int = model(a_ )['''last_hidden_state''']
__UpperCAmelCase : Optional[int] = tf.TensorShape((1, 10, 7_68) )
self.assertEqual(output.shape , a_ )
# compare the actual values for a slice.
__UpperCAmelCase : Union[str, Any] = tf.convert_to_tensor(
[[[-0.0_2_5_4, 0.0_2_3_5, 0.1_0_2_7], [0.0_6_0_6, -0.1_8_1_1, -0.0_4_1_8], [-0.1_5_6_1, -0.1_1_2_7, 0.2_6_8_7]]] , dtype=tf.floataa , )
# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')
# camembert.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 241
| 0
|
from typing import Optional
from urllib.parse import quote
import huggingface_hub as hfh
from packaging import version
def lowerCAmelCase_ ( __a , __a , __a = None ) -> str:
"""simple docstring"""
if version.parse(hfh.__version__ ).release < version.parse("0.11.0" ).release:
# old versions of hfh don't url-encode the file path
lowerCamelCase__: Union[str, Any] =quote(__a )
return hfh.hf_hub_url(__a , __a , repo_type="dataset" , revision=__a )
| 59
|
"""simple docstring"""
import argparse
import tensorflow as tf
import torch
from transformers import BertConfig, BertForMaskedLM
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertPooler,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
logging.set_verbosity_info()
def _SCREAMING_SNAKE_CASE ( UpperCamelCase : str , UpperCamelCase : str , UpperCamelCase : str ):
def get_masked_lm_array(UpperCamelCase : str ):
A__ = F"""masked_lm/{name}/.ATTRIBUTES/VARIABLE_VALUE"""
A__ = tf.train.load_variable(UpperCamelCase , UpperCamelCase )
if "kernel" in name:
A__ = array.transpose()
return torch.from_numpy(UpperCamelCase )
def get_encoder_array(UpperCamelCase : str ):
A__ = F"""encoder/{name}/.ATTRIBUTES/VARIABLE_VALUE"""
A__ = tf.train.load_variable(UpperCamelCase , UpperCamelCase )
if "kernel" in name:
A__ = array.transpose()
return torch.from_numpy(UpperCamelCase )
def get_encoder_layer_array(UpperCamelCase : int , UpperCamelCase : str ):
A__ = F"""encoder/_transformer_layers/{layer_index}/{name}/.ATTRIBUTES/VARIABLE_VALUE"""
A__ = tf.train.load_variable(UpperCamelCase , UpperCamelCase )
if "kernel" in name:
A__ = array.transpose()
return torch.from_numpy(UpperCamelCase )
def get_encoder_attention_layer_array(UpperCamelCase : int , UpperCamelCase : str , UpperCamelCase : int ):
A__ = F"""encoder/_transformer_layers/{layer_index}/_attention_layer/{name}/.ATTRIBUTES/VARIABLE_VALUE"""
A__ = tf.train.load_variable(UpperCamelCase , UpperCamelCase )
A__ = array.reshape(UpperCamelCase )
if "kernel" in name:
A__ = array.transpose()
return torch.from_numpy(UpperCamelCase )
print(F"""Loading model based on config from {config_path}...""" )
A__ = BertConfig.from_json_file(UpperCamelCase )
A__ = BertForMaskedLM(UpperCamelCase )
# Layers
for layer_index in range(0 , config.num_hidden_layers ):
A__ = model.bert.encoder.layer[layer_index]
# Self-attention
A__ = layer.attention.self
A__ = get_encoder_attention_layer_array(
UpperCamelCase , """_query_dense/kernel""" , self_attn.query.weight.data.shape )
A__ = get_encoder_attention_layer_array(
UpperCamelCase , """_query_dense/bias""" , self_attn.query.bias.data.shape )
A__ = get_encoder_attention_layer_array(
UpperCamelCase , """_key_dense/kernel""" , self_attn.key.weight.data.shape )
A__ = get_encoder_attention_layer_array(
UpperCamelCase , """_key_dense/bias""" , self_attn.key.bias.data.shape )
A__ = get_encoder_attention_layer_array(
UpperCamelCase , """_value_dense/kernel""" , self_attn.value.weight.data.shape )
A__ = get_encoder_attention_layer_array(
UpperCamelCase , """_value_dense/bias""" , self_attn.value.bias.data.shape )
# Self-attention Output
A__ = layer.attention.output
A__ = get_encoder_attention_layer_array(
UpperCamelCase , """_output_dense/kernel""" , self_output.dense.weight.data.shape )
A__ = get_encoder_attention_layer_array(
UpperCamelCase , """_output_dense/bias""" , self_output.dense.bias.data.shape )
A__ = get_encoder_layer_array(UpperCamelCase , """_attention_layer_norm/gamma""" )
A__ = get_encoder_layer_array(UpperCamelCase , """_attention_layer_norm/beta""" )
# Intermediate
A__ = layer.intermediate
A__ = get_encoder_layer_array(UpperCamelCase , """_intermediate_dense/kernel""" )
A__ = get_encoder_layer_array(UpperCamelCase , """_intermediate_dense/bias""" )
# Output
A__ = layer.output
A__ = get_encoder_layer_array(UpperCamelCase , """_output_dense/kernel""" )
A__ = get_encoder_layer_array(UpperCamelCase , """_output_dense/bias""" )
A__ = get_encoder_layer_array(UpperCamelCase , """_output_layer_norm/gamma""" )
A__ = get_encoder_layer_array(UpperCamelCase , """_output_layer_norm/beta""" )
# Embeddings
A__ = get_encoder_array("""_position_embedding_layer/embeddings""" )
A__ = get_encoder_array("""_type_embedding_layer/embeddings""" )
A__ = get_encoder_array("""_embedding_norm_layer/gamma""" )
A__ = get_encoder_array("""_embedding_norm_layer/beta""" )
# LM Head
A__ = model.cls.predictions.transform
A__ = get_masked_lm_array("""dense/kernel""" )
A__ = get_masked_lm_array("""dense/bias""" )
A__ = get_masked_lm_array("""layer_norm/gamma""" )
A__ = get_masked_lm_array("""layer_norm/beta""" )
A__ = get_masked_lm_array("""embedding_table""" )
# Pooling
A__ = BertPooler(config=UpperCamelCase )
A__ = get_encoder_array("""_pooler_layer/kernel""" )
A__ = get_encoder_array("""_pooler_layer/bias""" )
# Export final model
model.save_pretrained(UpperCamelCase )
# Integration test - should load without any errors ;)
A__ = BertForMaskedLM.from_pretrained(UpperCamelCase )
print(new_model.eval() )
print("""Model conversion was done sucessfully!""" )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
parser.add_argument(
"--tf_checkpoint_path", type=str, required=True, help="Path to the TensorFlow Token Dropping checkpoint path."
)
parser.add_argument(
"--bert_config_file",
type=str,
required=True,
help="The config json file corresponding to the BERT model. This specifies the model architecture.",
)
parser.add_argument(
"--pytorch_dump_path",
type=str,
required=True,
help="Path to the output PyTorch model.",
)
lowerCamelCase__ = parser.parse_args()
convert_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 574
| 0
|
'''simple docstring'''
from functools import lru_cache
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ) -> set:
_a : Any =2
_a : Tuple =set()
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.add(_UpperCAmelCase )
if n > 1:
factors.add(_UpperCAmelCase )
return factors
@lru_cache
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ) -> int:
return len(unique_prime_factors(_UpperCAmelCase ) )
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : list ) -> bool:
return len(set(_UpperCAmelCase ) ) in (0, 1)
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ) -> list:
_a : int =2
while True:
# Increment each value of a generated range
_a : str =[base + i for i in range(_UpperCAmelCase )]
# Run elements through out unique_prime_factors function
# Append our target number to the end.
_a : List[Any] =[upf_len(_UpperCAmelCase ) for x in group]
checker.append(_UpperCAmelCase )
# If all numbers in the list are equal, return the group variable.
if equality(_UpperCAmelCase ):
return group
# Increment our base variable by 1
base += 1
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int = 4 ) -> int:
_a : Optional[int] =run(_UpperCAmelCase )
return results[0] if len(_UpperCAmelCase ) else None
if __name__ == "__main__":
print(solution())
| 700
|
'''simple docstring'''
from functools import lru_cache
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ) -> set:
_a : Any =2
_a : Tuple =set()
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.add(_UpperCAmelCase )
if n > 1:
factors.add(_UpperCAmelCase )
return factors
@lru_cache
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ) -> int:
return len(unique_prime_factors(_UpperCAmelCase ) )
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : list ) -> bool:
return len(set(_UpperCAmelCase ) ) in (0, 1)
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ) -> list:
_a : int =2
while True:
# Increment each value of a generated range
_a : str =[base + i for i in range(_UpperCAmelCase )]
# Run elements through out unique_prime_factors function
# Append our target number to the end.
_a : List[Any] =[upf_len(_UpperCAmelCase ) for x in group]
checker.append(_UpperCAmelCase )
# If all numbers in the list are equal, return the group variable.
if equality(_UpperCAmelCase ):
return group
# Increment our base variable by 1
base += 1
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int = 4 ) -> int:
_a : Optional[int] =run(_UpperCAmelCase )
return results[0] if len(_UpperCAmelCase ) else None
if __name__ == "__main__":
print(solution())
| 506
| 0
|
'''simple docstring'''
def __snake_case ( lowerCAmelCase : int , lowerCAmelCase : int ):
if a < 0 or b < 0:
raise ValueError('the value of both inputs must be positive' )
__UpperCAmelCase = str(bin(lowerCAmelCase ) )[2:] # remove the leading "0b"
__UpperCAmelCase = str(bin(lowerCAmelCase ) )[2:] # remove the leading "0b"
__UpperCAmelCase = max(len(lowerCAmelCase ) , len(lowerCAmelCase ) )
return "0b" + "".join(
str(int(char_a != char_b ) )
for char_a, char_b in zip(a_binary.zfill(lowerCAmelCase ) , b_binary.zfill(lowerCAmelCase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 396
|
import warnings
from diffusers import StableDiffusionInpaintPipeline as StableDiffusionInpaintPipeline # noqa F401
warnings.warn(
'The `inpainting.py` script is outdated. Please use directly `from diffusers import'
' StableDiffusionInpaintPipeline` instead.'
)
| 15
| 0
|
"""simple docstring"""
__lowerCAmelCase = 65_521
def A_ ( __UpperCamelCase : str ):
lowercase = 1
lowercase = 0
for plain_chr in plain_text:
lowercase = (a + ord(__UpperCamelCase )) % MOD_ADLER
lowercase = (b + a) % MOD_ADLER
return (b << 16) | a
| 721
|
"""simple docstring"""
def A_ ( __UpperCamelCase : int = 1 , __UpperCamelCase : int = 10_00 ):
lowercase = 1
lowercase = 0
for divide_by_number in range(__UpperCamelCase , digit + 1 ):
lowercase = []
lowercase = numerator
for _ in range(1 , digit + 1 ):
if now_divide in has_been_divided:
if longest_list_length < len(__UpperCamelCase ):
lowercase = len(__UpperCamelCase )
lowercase = divide_by_number
else:
has_been_divided.append(__UpperCamelCase )
lowercase = now_divide * 10 % divide_by_number
return the_digit
# Tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 396
| 0
|
'''simple docstring'''
import argparse
import json
import math
import os
import time
import traceback
import zipfile
from collections import Counter
import requests
def __a(SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Tuple=None ):
'''simple docstring'''
_lowerCAmelCase = None
if token is not None:
_lowerCAmelCase = {"Accept": "application/vnd.github+json", "Authorization": F'''Bearer {token}'''}
_lowerCAmelCase = F'''https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100'''
_lowerCAmelCase = requests.get(SCREAMING_SNAKE_CASE_ , headers=SCREAMING_SNAKE_CASE_ ).json()
_lowerCAmelCase = {}
try:
job_links.update({job["name"]: job["html_url"] for job in result["jobs"]} )
_lowerCAmelCase = math.ceil((result["total_count"] - 100) / 100 )
for i in range(SCREAMING_SNAKE_CASE_ ):
_lowerCAmelCase = requests.get(url + F'''&page={i + 2}''' , headers=SCREAMING_SNAKE_CASE_ ).json()
job_links.update({job["name"]: job["html_url"] for job in result["jobs"]} )
return job_links
except Exception:
print(F'''Unknown error, could not fetch links:\n{traceback.format_exc()}''' )
return {}
def __a(SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Dict=None ):
'''simple docstring'''
_lowerCAmelCase = None
if token is not None:
_lowerCAmelCase = {"Accept": "application/vnd.github+json", "Authorization": F'''Bearer {token}'''}
_lowerCAmelCase = F'''https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100'''
_lowerCAmelCase = requests.get(SCREAMING_SNAKE_CASE_ , headers=SCREAMING_SNAKE_CASE_ ).json()
_lowerCAmelCase = {}
try:
artifacts.update({artifact["name"]: artifact["archive_download_url"] for artifact in result["artifacts"]} )
_lowerCAmelCase = math.ceil((result["total_count"] - 100) / 100 )
for i in range(SCREAMING_SNAKE_CASE_ ):
_lowerCAmelCase = requests.get(url + F'''&page={i + 2}''' , headers=SCREAMING_SNAKE_CASE_ ).json()
artifacts.update({artifact["name"]: artifact["archive_download_url"] for artifact in result["artifacts"]} )
return artifacts
except Exception:
print(F'''Unknown error, could not fetch links:\n{traceback.format_exc()}''' )
return {}
def __a(SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Dict ):
'''simple docstring'''
_lowerCAmelCase = None
if token is not None:
_lowerCAmelCase = {"Accept": "application/vnd.github+json", "Authorization": F'''Bearer {token}'''}
_lowerCAmelCase = requests.get(SCREAMING_SNAKE_CASE_ , headers=SCREAMING_SNAKE_CASE_ , allow_redirects=SCREAMING_SNAKE_CASE_ )
_lowerCAmelCase = result.headers["Location"]
_lowerCAmelCase = requests.get(SCREAMING_SNAKE_CASE_ , allow_redirects=SCREAMING_SNAKE_CASE_ )
_lowerCAmelCase = os.path.join(SCREAMING_SNAKE_CASE_ , F'''{artifact_name}.zip''' )
with open(SCREAMING_SNAKE_CASE_ , "wb" ) as fp:
fp.write(response.content )
def __a(SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[Any]=None ):
'''simple docstring'''
_lowerCAmelCase = []
_lowerCAmelCase = []
_lowerCAmelCase = None
with zipfile.ZipFile(SCREAMING_SNAKE_CASE_ ) as z:
for filename in z.namelist():
if not os.path.isdir(SCREAMING_SNAKE_CASE_ ):
# read the file
if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]:
with z.open(SCREAMING_SNAKE_CASE_ ) as f:
for line in f:
_lowerCAmelCase = line.decode("UTF-8" ).strip()
if filename == "failures_line.txt":
try:
# `error_line` is the place where `error` occurs
_lowerCAmelCase = line[: line.index(": " )]
_lowerCAmelCase = line[line.index(": " ) + len(": " ) :]
errors.append([error_line, error] )
except Exception:
# skip un-related lines
pass
elif filename == "summary_short.txt" and line.startswith("FAILED " ):
# `test` is the test method that failed
_lowerCAmelCase = line[len("FAILED " ) :]
failed_tests.append(SCREAMING_SNAKE_CASE_ )
elif filename == "job_name.txt":
_lowerCAmelCase = line
if len(SCREAMING_SNAKE_CASE_ ) != len(SCREAMING_SNAKE_CASE_ ):
raise ValueError(
F'''`errors` and `failed_tests` should have the same number of elements. Got {len(SCREAMING_SNAKE_CASE_ )} for `errors` '''
F'''and {len(SCREAMING_SNAKE_CASE_ )} for `failed_tests` instead. The test reports in {artifact_zip_path} have some'''
" problem." )
_lowerCAmelCase = None
if job_name and job_links:
_lowerCAmelCase = job_links.get(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# A list with elements of the form (line of error, error, failed test)
_lowerCAmelCase = [x + [y] + [job_link] for x, y in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )]
return result
def __a(SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Tuple=None ):
'''simple docstring'''
_lowerCAmelCase = []
_lowerCAmelCase = [os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for p in os.listdir(SCREAMING_SNAKE_CASE_ ) if p.endswith(".zip" )]
for p in paths:
errors.extend(get_errors_from_single_artifact(SCREAMING_SNAKE_CASE_ , job_links=SCREAMING_SNAKE_CASE_ ) )
return errors
def __a(SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : str=None ):
'''simple docstring'''
_lowerCAmelCase = Counter()
counter.update([x[1] for x in logs] )
_lowerCAmelCase = counter.most_common()
_lowerCAmelCase = {}
for error, count in counts:
if error_filter is None or error not in error_filter:
_lowerCAmelCase = {"count": count, "failed_tests": [(x[2], x[0]) for x in logs if x[1] == error]}
_lowerCAmelCase = dict(sorted(r.items() , key=lambda SCREAMING_SNAKE_CASE_ : item[1]["count"] , reverse=SCREAMING_SNAKE_CASE_ ) )
return r
def __a(SCREAMING_SNAKE_CASE_ : List[str] ):
'''simple docstring'''
_lowerCAmelCase = test.split("::" )[0]
if test.startswith("tests/models/" ):
_lowerCAmelCase = test.split("/" )[2]
else:
_lowerCAmelCase = None
return test
def __a(SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Tuple=None ):
'''simple docstring'''
_lowerCAmelCase = [(x[0], x[1], get_model(x[2] )) for x in logs]
_lowerCAmelCase = [x for x in logs if x[2] is not None]
_lowerCAmelCase = {x[2] for x in logs}
_lowerCAmelCase = {}
for test in tests:
_lowerCAmelCase = Counter()
# count by errors in `test`
counter.update([x[1] for x in logs if x[2] == test] )
_lowerCAmelCase = counter.most_common()
_lowerCAmelCase = {error: count for error, count in counts if (error_filter is None or error not in error_filter)}
_lowerCAmelCase = sum(error_counts.values() )
if n_errors > 0:
_lowerCAmelCase = {"count": n_errors, "errors": error_counts}
_lowerCAmelCase = dict(sorted(r.items() , key=lambda SCREAMING_SNAKE_CASE_ : item[1]["count"] , reverse=SCREAMING_SNAKE_CASE_ ) )
return r
def __a(SCREAMING_SNAKE_CASE_ : Optional[int] ):
'''simple docstring'''
_lowerCAmelCase = "| no. | error | status |"
_lowerCAmelCase = "|-:|:-|:-|"
_lowerCAmelCase = [header, sep]
for error in reduced_by_error:
_lowerCAmelCase = reduced_by_error[error]["count"]
_lowerCAmelCase = F'''| {count} | {error[:100]} | |'''
lines.append(SCREAMING_SNAKE_CASE_ )
return "\n".join(SCREAMING_SNAKE_CASE_ )
def __a(SCREAMING_SNAKE_CASE_ : Optional[int] ):
'''simple docstring'''
_lowerCAmelCase = "| model | no. of errors | major error | count |"
_lowerCAmelCase = "|-:|-:|-:|-:|"
_lowerCAmelCase = [header, sep]
for model in reduced_by_model:
_lowerCAmelCase = reduced_by_model[model]["count"]
_lowerCAmelCase , _lowerCAmelCase = list(reduced_by_model[model]["errors"].items() )[0]
_lowerCAmelCase = F'''| {model} | {count} | {error[:60]} | {_count} |'''
lines.append(SCREAMING_SNAKE_CASE_ )
return "\n".join(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--workflow_run_id", type=str, required=True, help="A GitHub Actions workflow run id.")
parser.add_argument(
"--output_dir",
type=str,
required=True,
help="Where to store the downloaded artifacts and other result files.",
)
parser.add_argument("--token", default=None, type=str, help="A token that has actions:read permission.")
_SCREAMING_SNAKE_CASE = parser.parse_args()
os.makedirs(args.output_dir, exist_ok=True)
_SCREAMING_SNAKE_CASE = get_job_links(args.workflow_run_id, token=args.token)
_SCREAMING_SNAKE_CASE = {}
# To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee.
# For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`.
if _job_links:
for k, v in _job_links.items():
# This is how GitHub actions combine job names.
if " / " in k:
_SCREAMING_SNAKE_CASE = k.find(" / ")
_SCREAMING_SNAKE_CASE = k[index + len(" / ") :]
_SCREAMING_SNAKE_CASE = v
with open(os.path.join(args.output_dir, "job_links.json"), "w", encoding="UTF-8") as fp:
json.dump(job_links, fp, ensure_ascii=False, indent=4)
_SCREAMING_SNAKE_CASE = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, "artifacts.json"), "w", encoding="UTF-8") as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
for idx, (name, url) in enumerate(artifacts.items()):
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
_SCREAMING_SNAKE_CASE = get_all_errors(args.output_dir, job_links=job_links)
# `e[1]` is the error
_SCREAMING_SNAKE_CASE = Counter()
counter.update([e[1] for e in errors])
# print the top 30 most common test errors
_SCREAMING_SNAKE_CASE = counter.most_common(30)
for item in most_common:
print(item)
with open(os.path.join(args.output_dir, "errors.json"), "w", encoding="UTF-8") as fp:
json.dump(errors, fp, ensure_ascii=False, indent=4)
_SCREAMING_SNAKE_CASE = reduce_by_error(errors)
_SCREAMING_SNAKE_CASE = reduce_by_model(errors)
_SCREAMING_SNAKE_CASE = make_github_table(reduced_by_error)
_SCREAMING_SNAKE_CASE = make_github_table_per_model(reduced_by_model)
with open(os.path.join(args.output_dir, "reduced_by_error.txt"), "w", encoding="UTF-8") as fp:
fp.write(sa)
with open(os.path.join(args.output_dir, "reduced_by_model.txt"), "w", encoding="UTF-8") as fp:
fp.write(sa)
| 18
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
"naver-clova-ix/donut-base": "https://huggingface.co/naver-clova-ix/donut-base/resolve/main/config.json",
# See all Donut models at https://huggingface.co/models?filter=donut-swin
}
class lowerCAmelCase_ ( __magic_name__ ):
__lowerCamelCase : Union[str, Any] = "donut-swin"
__lowerCamelCase : int = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self , _lowerCAmelCase=224 , _lowerCAmelCase=4 , _lowerCAmelCase=3 , _lowerCAmelCase=96 , _lowerCAmelCase=[2, 2, 6, 2] , _lowerCAmelCase=[3, 6, 12, 24] , _lowerCAmelCase=7 , _lowerCAmelCase=4.0 , _lowerCAmelCase=True , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.1 , _lowerCAmelCase="gelu" , _lowerCAmelCase=False , _lowerCAmelCase=0.02 , _lowerCAmelCase=1E-5 , **_lowerCAmelCase , ) -> Optional[Any]:
super().__init__(**_lowerCAmelCase )
_lowerCAmelCase = image_size
_lowerCAmelCase = patch_size
_lowerCAmelCase = num_channels
_lowerCAmelCase = embed_dim
_lowerCAmelCase = depths
_lowerCAmelCase = len(_lowerCAmelCase )
_lowerCAmelCase = num_heads
_lowerCAmelCase = window_size
_lowerCAmelCase = mlp_ratio
_lowerCAmelCase = qkv_bias
_lowerCAmelCase = hidden_dropout_prob
_lowerCAmelCase = attention_probs_dropout_prob
_lowerCAmelCase = drop_path_rate
_lowerCAmelCase = hidden_act
_lowerCAmelCase = use_absolute_embeddings
_lowerCAmelCase = layer_norm_eps
_lowerCAmelCase = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
_lowerCAmelCase = int(embed_dim * 2 ** (len(_lowerCAmelCase ) - 1) )
| 18
| 1
|
def _snake_case (_snake_case : list[list[float]]) -> list[list[float]]:
_lowercase =[]
for data in source_data:
for i, el in enumerate(_snake_case):
if len(_snake_case) < i + 1:
data_lists.append([])
data_lists[i].append(float(_snake_case))
return data_lists
def _snake_case (_snake_case : list[list[float]] , _snake_case : list[int]) -> list[list[float]]:
_lowercase =[]
for dlist, weight in zip(_snake_case , _snake_case):
_lowercase =min(_snake_case)
_lowercase =max(_snake_case)
_lowercase =[]
# for weight 0 score is 1 - actual score
if weight == 0:
for item in dlist:
try:
score.append(1 - ((item - mind) / (maxd - mind)))
except ZeroDivisionError:
score.append(1)
elif weight == 1:
for item in dlist:
try:
score.append((item - mind) / (maxd - mind))
except ZeroDivisionError:
score.append(0)
# weight not 0 or 1
else:
_lowercase =f'''Invalid weight of {weight:f} provided'''
raise ValueError(_snake_case)
score_lists.append(_snake_case)
return score_lists
def _snake_case (_snake_case : list[list[float]]) -> list[float]:
_lowercase =[0 for i in range(len(score_lists[0]))]
for slist in score_lists:
for j, ele in enumerate(_snake_case):
_lowercase =final_scores[j] + ele
return final_scores
def _snake_case (_snake_case : list[list[float]] , _snake_case : list[int]) -> list[list[float]]:
_lowercase =get_data(_snake_case)
_lowercase =calculate_each_score(_snake_case , _snake_case)
_lowercase =generate_final_scores(_snake_case)
# append scores to source data
for i, ele in enumerate(_snake_case):
source_data[i].append(_snake_case)
return source_data
| 557
|
from math import isqrt, loga
def _snake_case (_snake_case : int) -> list[int]:
_lowercase =[True] * max_number
for i in range(2 , isqrt(max_number - 1) + 1):
if is_prime[i]:
for j in range(i**2 , _snake_case , _snake_case):
_lowercase =False
return [i for i in range(2 , _snake_case) if is_prime[i]]
def _snake_case (_snake_case : int = 80_0800 , _snake_case : int = 80_0800) -> int:
_lowercase =degree * loga(_snake_case)
_lowercase =int(_snake_case)
_lowercase =calculate_prime_numbers(_snake_case)
_lowercase =0
_lowercase =0
_lowercase =len(_snake_case) - 1
while left < right:
while (
prime_numbers[right] * loga(prime_numbers[left])
+ prime_numbers[left] * loga(prime_numbers[right])
> upper_bound
):
right -= 1
hybrid_integers_count += right - left
left += 1
return hybrid_integers_count
if __name__ == "__main__":
print(f'''{solution() = }''')
| 557
| 1
|
"""simple docstring"""
import warnings
from contextlib import contextmanager
from ....processing_utils import ProcessorMixin
class UpperCAmelCase (_UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase :int = "MCTCTFeatureExtractor"
_UpperCAmelCase :Dict = "AutoTokenizer"
def __init__( self , _UpperCAmelCase , _UpperCAmelCase ):
super().__init__(_UpperCAmelCase , _UpperCAmelCase )
lowercase__: List[Any] = self.feature_extractor
lowercase__: Dict = False
def __call__( self , *_UpperCAmelCase , **_UpperCAmelCase ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*_UpperCAmelCase , **_UpperCAmelCase )
if "raw_speech" in kwargs:
warnings.warn('''Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.''' )
lowercase__: Union[str, Any] = kwargs.pop('''raw_speech''' )
else:
lowercase__: Union[str, Any] = kwargs.pop('''audio''' , _UpperCAmelCase )
lowercase__: Union[str, Any] = kwargs.pop('''sampling_rate''' , _UpperCAmelCase )
lowercase__: str = kwargs.pop('''text''' , _UpperCAmelCase )
if len(_UpperCAmelCase ) > 0:
lowercase__: Optional[Any] = args[0]
lowercase__: str = args[1:]
if audio is None and text is None:
raise ValueError('''You need to specify either an `audio` or `text` input to process.''' )
if audio is not None:
lowercase__: Dict = self.feature_extractor(_UpperCAmelCase , *_UpperCAmelCase , sampling_rate=_UpperCAmelCase , **_UpperCAmelCase )
if text is not None:
lowercase__: int = self.tokenizer(_UpperCAmelCase , **_UpperCAmelCase )
if text is None:
return inputs
elif audio is None:
return encodings
else:
lowercase__: Optional[int] = encodings['''input_ids''']
return inputs
def _snake_case ( self , *_UpperCAmelCase , **_UpperCAmelCase ):
return self.tokenizer.batch_decode(*_UpperCAmelCase , **_UpperCAmelCase )
def _snake_case ( self , *_UpperCAmelCase , **_UpperCAmelCase ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor.pad(*_UpperCAmelCase , **_UpperCAmelCase )
lowercase__: int = kwargs.pop('''input_features''' , _UpperCAmelCase )
lowercase__: Optional[Any] = kwargs.pop('''labels''' , _UpperCAmelCase )
if len(_UpperCAmelCase ) > 0:
lowercase__: Tuple = args[0]
lowercase__: Optional[int] = args[1:]
if input_features is not None:
lowercase__: Dict = self.feature_extractor.pad(_UpperCAmelCase , *_UpperCAmelCase , **_UpperCAmelCase )
if labels is not None:
lowercase__: Union[str, Any] = self.tokenizer.pad(_UpperCAmelCase , **_UpperCAmelCase )
if labels is None:
return input_features
elif input_features is None:
return labels
else:
lowercase__: Optional[Any] = labels['''input_ids''']
return input_features
def _snake_case ( self , *_UpperCAmelCase , **_UpperCAmelCase ):
return self.tokenizer.decode(*_UpperCAmelCase , **_UpperCAmelCase )
@contextmanager
def _snake_case ( self ):
warnings.warn(
'''`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '''
'''labels by using the argument `text` of the regular `__call__` method (either in the same call as '''
'''your audio inputs, or in a separate call.''' )
lowercase__: Optional[int] = True
lowercase__: Dict = self.tokenizer
yield
lowercase__: Tuple = self.feature_extractor
lowercase__: List[str] = False
| 586
|
"""simple docstring"""
import json
import multiprocessing as mp
import re
from collections import defaultdict
from functools import partial
from typing import Dict, List, Optional, Set, Tuple, Type
from datasets import Dataset
from datasketch import MinHash, MinHashLSH
from dpu_utils.utils.iterators import ThreadedIterator
from tqdm import tqdm
__A = re.compile("[^A-Za-z_0-9]")
# parameters used in DuplicationIndex
__A = 1_0
__A = 2_5_6
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> Optional[MinHash]:
if len(__UpperCAmelCase ) < MIN_NUM_TOKENS:
return None
lowercase__: Tuple = MinHash(num_perm=__UpperCAmelCase )
for token in set(__UpperCAmelCase ):
min_hash.update(token.encode() )
return min_hash
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> Set[str]:
return {t for t in NON_ALPHA.split(__UpperCAmelCase ) if len(t.strip() ) > 0}
class UpperCAmelCase :
"""simple docstring"""
def __init__( self , *,
_UpperCAmelCase = 0.85 , ):
lowercase__: Optional[int] = duplication_jaccard_threshold
lowercase__: str = NUM_PERM
lowercase__: Tuple = MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm )
lowercase__: Optional[int] = defaultdict(_UpperCAmelCase )
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase ):
lowercase__: Any = self._index.query(_UpperCAmelCase )
if code_key in self._index.keys:
print(F"""Duplicate key {code_key}""" )
return
self._index.insert(_UpperCAmelCase , _UpperCAmelCase )
if len(_UpperCAmelCase ) > 0:
for base_duplicate in close_duplicates:
if base_duplicate in self._duplicate_clusters:
self._duplicate_clusters[base_duplicate].add(_UpperCAmelCase )
break
else:
self._duplicate_clusters[close_duplicates[0]].add(_UpperCAmelCase )
def _snake_case ( self ):
lowercase__: List[Any] = []
for base, duplicates in self._duplicate_clusters.items():
lowercase__: Dict = [base] + list(_UpperCAmelCase )
# reformat the cluster to be a list of dict
lowercase__: Union[str, Any] = [{'''base_index''': el[0], '''repo_name''': el[1], '''path''': el[2]} for el in cluster]
duplicate_clusters.append(_UpperCAmelCase )
return duplicate_clusters
def _snake_case ( self , _UpperCAmelCase ):
lowercase__: int = self.get_duplicate_clusters()
with open(_UpperCAmelCase , '''w''' ) as f:
json.dump(_UpperCAmelCase , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> Dict:
lowercase__, lowercase__: Union[str, Any] = element
lowercase__: Any = get_min_hash([t for t in NON_ALPHA.split(data['''content'''] ) if len(t.strip() ) > 0] )
if min_hash is not None:
return (index, data["repo_name"], data["path"]), min_hash
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> Union[str, Any]:
with mp.Pool() as pool:
for data in pool.imap_unordered(
_compute_min_hash , ThreadedIterator(__UpperCAmelCase , max_queue_size=1_0_0_0_0 ) , chunksize=1_0_0 , ):
if data is not None:
yield data
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase ) -> Dict:
lowercase__: Optional[Any] = DuplicationIndex(duplication_jaccard_threshold=__UpperCAmelCase )
for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(__UpperCAmelCase ) ) , max_queue_size=1_0_0 ) ):
di.add(__UpperCAmelCase , __UpperCAmelCase )
# Returns a List[Cluster] where Cluster is List[str] with the filenames.
return di.get_duplicate_clusters()
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase ) -> float:
lowercase__: Optional[Any] = get_tokens(__UpperCAmelCase )
lowercase__: Optional[Any] = get_tokens(__UpperCAmelCase )
return len(tokensa & tokensa ) / len(tokensa | tokensa )
__A = None
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase ) -> Optional[Any]:
lowercase__: Any = []
for elementa in cluster:
lowercase__: List[str] = _shared_dataset[elementa['''base_index''']]['''content''']
for elementa in extremes:
lowercase__: Any = _shared_dataset[elementa['''base_index''']]['''content''']
if jaccard_similarity(__UpperCAmelCase , __UpperCAmelCase ) >= jaccard_threshold:
elementa["copies"] += 1
break
else:
lowercase__: int = 1
extremes.append(__UpperCAmelCase )
return extremes
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> int:
global _shared_dataset
lowercase__: Optional[int] = dataset
lowercase__: Union[str, Any] = []
lowercase__: str = partial(_find_cluster_extremes_shared , jaccard_threshold=__UpperCAmelCase )
with mp.Pool() as pool:
for extremes in tqdm(
pool.imap_unordered(
__UpperCAmelCase , __UpperCAmelCase , ) , total=len(__UpperCAmelCase ) , ):
extremes_list.append(__UpperCAmelCase )
return extremes_list
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase = 0.8_5 ) -> Tuple[Type[Dataset], List[List[Dict]]]:
lowercase__: Any = make_duplicate_clusters(__UpperCAmelCase , __UpperCAmelCase )
lowercase__: Union[str, Any] = {x['''base_index'''] for cluster in duplicate_clusters for x in cluster}
lowercase__: List[str] = {}
lowercase__: int = find_extremes(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
for extremes in extremes_clusters:
for element in extremes:
lowercase__: str = element
lowercase__: List[str] = duplicate_indices - set(extreme_dict.keys() )
lowercase__: List[str] = dataset.filter(lambda __UpperCAmelCase , __UpperCAmelCase : idx not in remove_indices , with_indices=__UpperCAmelCase )
# update duplicate_clusters
for cluster in duplicate_clusters:
for element in cluster:
lowercase__: Optional[int] = element['''base_index'''] in extreme_dict
if element["is_extreme"]:
lowercase__: Optional[int] = extreme_dict[element['''base_index''']]['''copies''']
print(F"""Original dataset size: {len(__UpperCAmelCase )}""" )
print(F"""Number of duplicate clusters: {len(__UpperCAmelCase )}""" )
print(F"""Files in duplicate cluster: {len(__UpperCAmelCase )}""" )
print(F"""Unique files in duplicate cluster: {len(__UpperCAmelCase )}""" )
print(F"""Filtered dataset size: {len(__UpperCAmelCase )}""" )
return ds_filter, duplicate_clusters
| 586
| 1
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__a: Tuple = logging.get_logger(__name__)
__a: Optional[Any] = {
"""microsoft/biogpt""": """https://huggingface.co/microsoft/biogpt/resolve/main/config.json""",
# See all BioGPT models at https://huggingface.co/models?filter=biogpt
}
class UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = "biogpt"
def __init__( self , __lowerCAmelCase=42384 , __lowerCAmelCase=1024 , __lowerCAmelCase=24 , __lowerCAmelCase=16 , __lowerCAmelCase=4096 , __lowerCAmelCase="gelu" , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.1 , __lowerCAmelCase=1024 , __lowerCAmelCase=0.0_2 , __lowerCAmelCase=1E-12 , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=0.0 , __lowerCAmelCase=0.0 , __lowerCAmelCase=1 , __lowerCAmelCase=0 , __lowerCAmelCase=2 , **__lowerCAmelCase , ) -> str:
lowercase__ : Optional[Any] = vocab_size
lowercase__ : Optional[int] = max_position_embeddings
lowercase__ : List[str] = hidden_size
lowercase__ : List[str] = num_hidden_layers
lowercase__ : List[Any] = num_attention_heads
lowercase__ : int = intermediate_size
lowercase__ : Tuple = hidden_act
lowercase__ : str = hidden_dropout_prob
lowercase__ : Tuple = attention_probs_dropout_prob
lowercase__ : List[Any] = initializer_range
lowercase__ : Optional[int] = layer_norm_eps
lowercase__ : List[Any] = scale_embedding
lowercase__ : List[str] = use_cache
lowercase__ : Optional[int] = layerdrop
lowercase__ : Any = activation_dropout
super().__init__(pad_token_id=lowerCamelCase__ , bos_token_id=lowerCamelCase__ , eos_token_id=lowerCamelCase__ , **lowerCamelCase__ )
| 718
|
'''simple docstring'''
from random import randint, random
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = False , UpperCAmelCase = False , UpperCAmelCase = 5 , ):
lowercase__ : Optional[Any] = [[-1] * number_of_cells] # Create a highway without any car
lowercase__ : List[str] = 0
lowercase__ : Optional[Any] = max(UpperCAmelCase , 0 )
while i < number_of_cells:
lowercase__ : str = (
randint(0 , UpperCAmelCase ) if random_speed else initial_speed
) # Place the cars
i += (
randint(1 , max_speed * 2 ) if random_frequency else frequency
) # Arbitrary number, may need tuning
return highway
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ):
lowercase__ : str = 0
lowercase__ : Union[str, Any] = highway_now[car_index + 1 :]
for cell in range(len(UpperCAmelCase ) ): # May need a better name for this
if cells[cell] != -1: # If the cell is not empty then
return distance # we have the distance we wanted
distance += 1
# Here if the car is near the end of the highway
return distance + get_distance(UpperCAmelCase , -1 )
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
lowercase__ : Union[str, Any] = len(UpperCAmelCase )
# Beforce calculations, the highway is empty
lowercase__ : List[Any] = [-1] * number_of_cells
for car_index in range(UpperCAmelCase ):
if highway_now[car_index] != -1:
# Add 1 to the current speed of the car and cap the speed
lowercase__ : int = min(highway_now[car_index] + 1 , UpperCAmelCase )
# Number of empty cell before the next car
lowercase__ : Dict = get_distance(UpperCAmelCase , UpperCAmelCase ) - 1
# We can't have the car causing an accident
lowercase__ : int = min(next_highway[car_index] , UpperCAmelCase )
if random() < probability:
# Randomly, a driver will slow down
lowercase__ : Any = max(next_highway[car_index] - 1 , 0 )
return next_highway
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
lowercase__ : Dict = len(highway[0] )
for i in range(UpperCAmelCase ):
lowercase__ : Union[str, Any] = update(highway[i] , UpperCAmelCase , UpperCAmelCase )
lowercase__ : Dict = [-1] * number_of_cells
for car_index in range(UpperCAmelCase ):
lowercase__ : int = next_speeds_calculated[car_index]
if speed != -1:
# Change the position based on the speed (with % to create the loop)
lowercase__ : List[str] = (car_index + speed) % number_of_cells
# Commit the change of position
lowercase__ : Union[str, Any] = speed
highway.append(UpperCAmelCase )
return highway
if __name__ == "__main__":
import doctest
doctest.testmod()
| 428
| 0
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_download, hf_hub_url
from PIL import Image
from transformers import DetaConfig, DetaForObjectDetection, DetaImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
__A : Tuple = logging.get_logger(__name__)
def __a ( A__ : Optional[int] ):
SCREAMING_SNAKE_CASE = SwinConfig(
embed_dim=192 , depths=(2, 2, 18, 2) , num_heads=(6, 12, 24, 48) , window_size=12 , out_features=["stage2", "stage3", "stage4"] , )
SCREAMING_SNAKE_CASE = DetaConfig(
backbone_config=A__ , num_queries=900 , encoder_ffn_dim=2048 , decoder_ffn_dim=2048 , num_feature_levels=5 , assign_first_stage=A__ , with_box_refine=A__ , two_stage=A__ , )
# set labels
SCREAMING_SNAKE_CASE = "huggingface/label-files"
if "o365" in model_name:
SCREAMING_SNAKE_CASE = 366
SCREAMING_SNAKE_CASE = "object365-id2label.json"
else:
SCREAMING_SNAKE_CASE = 91
SCREAMING_SNAKE_CASE = "coco-detection-id2label.json"
SCREAMING_SNAKE_CASE = num_labels
SCREAMING_SNAKE_CASE = json.load(open(cached_download(hf_hub_url(A__ , A__ , repo_type="dataset" ) ) , "r" ) )
SCREAMING_SNAKE_CASE = {int(A__ ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE = idalabel
SCREAMING_SNAKE_CASE = {v: k for k, v in idalabel.items()}
return config
def __a ( A__ : Any ):
SCREAMING_SNAKE_CASE = []
# stem
# fmt: off
rename_keys.append(("backbone.0.body.patch_embed.proj.weight", "model.backbone.model.embeddings.patch_embeddings.projection.weight") )
rename_keys.append(("backbone.0.body.patch_embed.proj.bias", "model.backbone.model.embeddings.patch_embeddings.projection.bias") )
rename_keys.append(("backbone.0.body.patch_embed.norm.weight", "model.backbone.model.embeddings.norm.weight") )
rename_keys.append(("backbone.0.body.patch_embed.norm.bias", "model.backbone.model.embeddings.norm.bias") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F"backbone.0.body.layers.{i}.blocks.{j}.norm1.weight", F"model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight") )
rename_keys.append((F"backbone.0.body.layers.{i}.blocks.{j}.norm1.bias", F"model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias") )
rename_keys.append((F"backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_bias_table", F"model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table") )
rename_keys.append((F"backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_index", F"model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index") )
rename_keys.append((F"backbone.0.body.layers.{i}.blocks.{j}.attn.proj.weight", F"model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight") )
rename_keys.append((F"backbone.0.body.layers.{i}.blocks.{j}.attn.proj.bias", F"model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias") )
rename_keys.append((F"backbone.0.body.layers.{i}.blocks.{j}.norm2.weight", F"model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight") )
rename_keys.append((F"backbone.0.body.layers.{i}.blocks.{j}.norm2.bias", F"model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias") )
rename_keys.append((F"backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.weight", F"model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight") )
rename_keys.append((F"backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.bias", F"model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias") )
rename_keys.append((F"backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.weight", F"model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.weight") )
rename_keys.append((F"backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.bias", F"model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.bias") )
if i < 3:
rename_keys.append((F"backbone.0.body.layers.{i}.downsample.reduction.weight", F"model.backbone.model.encoder.layers.{i}.downsample.reduction.weight") )
rename_keys.append((F"backbone.0.body.layers.{i}.downsample.norm.weight", F"model.backbone.model.encoder.layers.{i}.downsample.norm.weight") )
rename_keys.append((F"backbone.0.body.layers.{i}.downsample.norm.bias", F"model.backbone.model.encoder.layers.{i}.downsample.norm.bias") )
rename_keys.append(("backbone.0.body.norm1.weight", "model.backbone.model.hidden_states_norms.stage2.weight") )
rename_keys.append(("backbone.0.body.norm1.bias", "model.backbone.model.hidden_states_norms.stage2.bias") )
rename_keys.append(("backbone.0.body.norm2.weight", "model.backbone.model.hidden_states_norms.stage3.weight") )
rename_keys.append(("backbone.0.body.norm2.bias", "model.backbone.model.hidden_states_norms.stage3.bias") )
rename_keys.append(("backbone.0.body.norm3.weight", "model.backbone.model.hidden_states_norms.stage4.weight") )
rename_keys.append(("backbone.0.body.norm3.bias", "model.backbone.model.hidden_states_norms.stage4.bias") )
# transformer encoder
for i in range(config.encoder_layers ):
rename_keys.append((F"transformer.encoder.layers.{i}.self_attn.sampling_offsets.weight", F"model.encoder.layers.{i}.self_attn.sampling_offsets.weight") )
rename_keys.append((F"transformer.encoder.layers.{i}.self_attn.sampling_offsets.bias", F"model.encoder.layers.{i}.self_attn.sampling_offsets.bias") )
rename_keys.append((F"transformer.encoder.layers.{i}.self_attn.attention_weights.weight", F"model.encoder.layers.{i}.self_attn.attention_weights.weight") )
rename_keys.append((F"transformer.encoder.layers.{i}.self_attn.attention_weights.bias", F"model.encoder.layers.{i}.self_attn.attention_weights.bias") )
rename_keys.append((F"transformer.encoder.layers.{i}.self_attn.value_proj.weight", F"model.encoder.layers.{i}.self_attn.value_proj.weight") )
rename_keys.append((F"transformer.encoder.layers.{i}.self_attn.value_proj.bias", F"model.encoder.layers.{i}.self_attn.value_proj.bias") )
rename_keys.append((F"transformer.encoder.layers.{i}.self_attn.output_proj.weight", F"model.encoder.layers.{i}.self_attn.output_proj.weight") )
rename_keys.append((F"transformer.encoder.layers.{i}.self_attn.output_proj.bias", F"model.encoder.layers.{i}.self_attn.output_proj.bias") )
rename_keys.append((F"transformer.encoder.layers.{i}.norm1.weight", F"model.encoder.layers.{i}.self_attn_layer_norm.weight") )
rename_keys.append((F"transformer.encoder.layers.{i}.norm1.bias", F"model.encoder.layers.{i}.self_attn_layer_norm.bias") )
rename_keys.append((F"transformer.encoder.layers.{i}.linear1.weight", F"model.encoder.layers.{i}.fc1.weight") )
rename_keys.append((F"transformer.encoder.layers.{i}.linear1.bias", F"model.encoder.layers.{i}.fc1.bias") )
rename_keys.append((F"transformer.encoder.layers.{i}.linear2.weight", F"model.encoder.layers.{i}.fc2.weight") )
rename_keys.append((F"transformer.encoder.layers.{i}.linear2.bias", F"model.encoder.layers.{i}.fc2.bias") )
rename_keys.append((F"transformer.encoder.layers.{i}.norm2.weight", F"model.encoder.layers.{i}.final_layer_norm.weight") )
rename_keys.append((F"transformer.encoder.layers.{i}.norm2.bias", F"model.encoder.layers.{i}.final_layer_norm.bias") )
# transformer decoder
for i in range(config.decoder_layers ):
rename_keys.append((F"transformer.decoder.layers.{i}.cross_attn.sampling_offsets.weight", F"model.decoder.layers.{i}.encoder_attn.sampling_offsets.weight") )
rename_keys.append((F"transformer.decoder.layers.{i}.cross_attn.sampling_offsets.bias", F"model.decoder.layers.{i}.encoder_attn.sampling_offsets.bias") )
rename_keys.append((F"transformer.decoder.layers.{i}.cross_attn.attention_weights.weight", F"model.decoder.layers.{i}.encoder_attn.attention_weights.weight") )
rename_keys.append((F"transformer.decoder.layers.{i}.cross_attn.attention_weights.bias", F"model.decoder.layers.{i}.encoder_attn.attention_weights.bias") )
rename_keys.append((F"transformer.decoder.layers.{i}.cross_attn.value_proj.weight", F"model.decoder.layers.{i}.encoder_attn.value_proj.weight") )
rename_keys.append((F"transformer.decoder.layers.{i}.cross_attn.value_proj.bias", F"model.decoder.layers.{i}.encoder_attn.value_proj.bias") )
rename_keys.append((F"transformer.decoder.layers.{i}.cross_attn.output_proj.weight", F"model.decoder.layers.{i}.encoder_attn.output_proj.weight") )
rename_keys.append((F"transformer.decoder.layers.{i}.cross_attn.output_proj.bias", F"model.decoder.layers.{i}.encoder_attn.output_proj.bias") )
rename_keys.append((F"transformer.decoder.layers.{i}.norm1.weight", F"model.decoder.layers.{i}.encoder_attn_layer_norm.weight") )
rename_keys.append((F"transformer.decoder.layers.{i}.norm1.bias", F"model.decoder.layers.{i}.encoder_attn_layer_norm.bias") )
rename_keys.append((F"transformer.decoder.layers.{i}.self_attn.out_proj.weight", F"model.decoder.layers.{i}.self_attn.out_proj.weight") )
rename_keys.append((F"transformer.decoder.layers.{i}.self_attn.out_proj.bias", F"model.decoder.layers.{i}.self_attn.out_proj.bias") )
rename_keys.append((F"transformer.decoder.layers.{i}.norm2.weight", F"model.decoder.layers.{i}.self_attn_layer_norm.weight") )
rename_keys.append((F"transformer.decoder.layers.{i}.norm2.bias", F"model.decoder.layers.{i}.self_attn_layer_norm.bias") )
rename_keys.append((F"transformer.decoder.layers.{i}.linear1.weight", F"model.decoder.layers.{i}.fc1.weight") )
rename_keys.append((F"transformer.decoder.layers.{i}.linear1.bias", F"model.decoder.layers.{i}.fc1.bias") )
rename_keys.append((F"transformer.decoder.layers.{i}.linear2.weight", F"model.decoder.layers.{i}.fc2.weight") )
rename_keys.append((F"transformer.decoder.layers.{i}.linear2.bias", F"model.decoder.layers.{i}.fc2.bias") )
rename_keys.append((F"transformer.decoder.layers.{i}.norm3.weight", F"model.decoder.layers.{i}.final_layer_norm.weight") )
rename_keys.append((F"transformer.decoder.layers.{i}.norm3.bias", F"model.decoder.layers.{i}.final_layer_norm.bias") )
# fmt: on
return rename_keys
def __a ( A__ : Optional[Any] , A__ : Union[str, Any] , A__ : Any ):
SCREAMING_SNAKE_CASE = dct.pop(A__ )
SCREAMING_SNAKE_CASE = val
def __a ( A__ : str , A__ : int ):
SCREAMING_SNAKE_CASE = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
SCREAMING_SNAKE_CASE = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
SCREAMING_SNAKE_CASE = state_dict.pop(F"backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.weight" )
SCREAMING_SNAKE_CASE = state_dict.pop(F"backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE = in_proj_weight[:dim, :]
SCREAMING_SNAKE_CASE = in_proj_bias[: dim]
SCREAMING_SNAKE_CASE = in_proj_weight[
dim : dim * 2, :
]
SCREAMING_SNAKE_CASE = in_proj_bias[
dim : dim * 2
]
SCREAMING_SNAKE_CASE = in_proj_weight[
-dim :, :
]
SCREAMING_SNAKE_CASE = in_proj_bias[-dim :]
# fmt: on
def __a ( A__ : Any , A__ : Tuple ):
# transformer decoder self-attention layers
SCREAMING_SNAKE_CASE = config.d_model
for i in range(config.decoder_layers ):
# read in weights + bias of input projection layer of self-attention
SCREAMING_SNAKE_CASE = state_dict.pop(F"transformer.decoder.layers.{i}.self_attn.in_proj_weight" )
SCREAMING_SNAKE_CASE = state_dict.pop(F"transformer.decoder.layers.{i}.self_attn.in_proj_bias" )
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE = in_proj_weight[:hidden_size, :]
SCREAMING_SNAKE_CASE = in_proj_bias[:hidden_size]
SCREAMING_SNAKE_CASE = in_proj_weight[
hidden_size : hidden_size * 2, :
]
SCREAMING_SNAKE_CASE = in_proj_bias[hidden_size : hidden_size * 2]
SCREAMING_SNAKE_CASE = in_proj_weight[-hidden_size:, :]
SCREAMING_SNAKE_CASE = in_proj_bias[-hidden_size:]
def __a ( ):
SCREAMING_SNAKE_CASE = "http://images.cocodataset.org/val2017/000000039769.jpg"
SCREAMING_SNAKE_CASE = Image.open(requests.get(A__ , stream=A__ ).raw )
return im
@torch.no_grad()
def __a ( A__ : List[str] , A__ : Union[str, Any] , A__ : str ):
SCREAMING_SNAKE_CASE = get_deta_config(A__ )
# load original state dict
if model_name == "deta-swin-large":
SCREAMING_SNAKE_CASE = hf_hub_download(repo_id="nielsr/deta-checkpoints" , filename="adet_swin_ft.pth" )
elif model_name == "deta-swin-large-o365":
SCREAMING_SNAKE_CASE = hf_hub_download(repo_id="jozhang97/deta-swin-l-o365" , filename="deta_swin_pt_o365.pth" )
else:
raise ValueError(F"Model name {model_name} not supported" )
SCREAMING_SNAKE_CASE = torch.load(A__ , map_location="cpu" )["model"]
# original state dict
for name, param in state_dict.items():
print(A__ , param.shape )
# rename keys
SCREAMING_SNAKE_CASE = create_rename_keys(A__ )
for src, dest in rename_keys:
rename_key(A__ , A__ , A__ )
read_in_swin_q_k_v(A__ , config.backbone_config )
read_in_decoder_q_k_v(A__ , A__ )
# fix some prefixes
for key in state_dict.copy().keys():
if "transformer.decoder.class_embed" in key or "transformer.decoder.bbox_embed" in key:
SCREAMING_SNAKE_CASE = state_dict.pop(A__ )
SCREAMING_SNAKE_CASE = val
if "input_proj" in key:
SCREAMING_SNAKE_CASE = state_dict.pop(A__ )
SCREAMING_SNAKE_CASE = val
if "level_embed" in key or "pos_trans" in key or "pix_trans" in key or "enc_output" in key:
SCREAMING_SNAKE_CASE = state_dict.pop(A__ )
SCREAMING_SNAKE_CASE = val
# finally, create HuggingFace model and load state dict
SCREAMING_SNAKE_CASE = DetaForObjectDetection(A__ )
model.load_state_dict(A__ )
model.eval()
SCREAMING_SNAKE_CASE = "cuda" if torch.cuda.is_available() else "cpu"
model.to(A__ )
# load image processor
SCREAMING_SNAKE_CASE = DetaImageProcessor(format="coco_detection" )
# verify our conversion on image
SCREAMING_SNAKE_CASE = prepare_img()
SCREAMING_SNAKE_CASE = processor(images=A__ , return_tensors="pt" )
SCREAMING_SNAKE_CASE = encoding["pixel_values"]
SCREAMING_SNAKE_CASE = model(pixel_values.to(A__ ) )
# verify logits
print("Logits:" , outputs.logits[0, :3, :3] )
print("Boxes:" , outputs.pred_boxes[0, :3, :3] )
if model_name == "deta-swin-large":
SCREAMING_SNAKE_CASE = torch.tensor(
[[-7.6_3_0_8, -2.8_4_8_5, -5.3_7_3_7], [-7.2_0_3_7, -4.5_5_0_5, -4.8_0_2_7], [-7.2_9_4_3, -4.2_6_1_1, -4.6_6_1_7]] )
SCREAMING_SNAKE_CASE = torch.tensor([[0.4_9_8_7, 0.4_9_6_9, 0.9_9_9_9], [0.2_5_4_9, 0.5_4_9_8, 0.4_8_0_5], [0.5_4_9_8, 0.2_7_5_7, 0.0_5_6_9]] )
elif model_name == "deta-swin-large-o365":
SCREAMING_SNAKE_CASE = torch.tensor(
[[-8.0_1_2_2, -3.5_7_2_0, -4.9_7_1_7], [-8.1_5_4_7, -3.6_8_8_6, -4.6_3_8_9], [-7.6_6_1_0, -3.6_1_9_4, -5.0_1_3_4]] )
SCREAMING_SNAKE_CASE = torch.tensor([[0.2_5_2_3, 0.5_5_4_9, 0.4_8_8_1], [0.7_7_1_5, 0.4_1_4_9, 0.4_6_0_1], [0.5_5_0_3, 0.2_7_5_3, 0.0_5_7_5]] )
assert torch.allclose(outputs.logits[0, :3, :3] , expected_logits.to(A__ ) , atol=1E-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , expected_boxes.to(A__ ) , atol=1E-4 )
print("Everything ok!" )
if pytorch_dump_folder_path:
# Save model and processor
logger.info(F"Saving PyTorch model and processor to {pytorch_dump_folder_path}..." )
Path(A__ ).mkdir(exist_ok=A__ )
model.save_pretrained(A__ )
processor.save_pretrained(A__ )
# Push to hub
if push_to_hub:
print("Pushing model and processor to hub..." )
model.push_to_hub(F"jozhang97/{model_name}" )
processor.push_to_hub(F"jozhang97/{model_name}" )
if __name__ == "__main__":
__A : str = argparse.ArgumentParser()
parser.add_argument(
'--model_name',
type=str,
default='deta-swin-large',
choices=['deta-swin-large', 'deta-swin-large-o365'],
help='Name of the model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
help='Path to the folder to output PyTorch model.',
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
__A : str = parser.parse_args()
convert_deta_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 16
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
snake_case_ : Union[str, Any] = {
'configuration_transfo_xl': ['TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TransfoXLConfig'],
'tokenization_transfo_xl': ['TransfoXLCorpus', 'TransfoXLTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : str = [
'TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST',
'AdaptiveEmbedding',
'TransfoXLForSequenceClassification',
'TransfoXLLMHeadModel',
'TransfoXLModel',
'TransfoXLPreTrainedModel',
'load_tf_weights_in_transfo_xl',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Any = [
'TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFAdaptiveEmbedding',
'TFTransfoXLForSequenceClassification',
'TFTransfoXLLMHeadModel',
'TFTransfoXLMainLayer',
'TFTransfoXLModel',
'TFTransfoXLPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_transfo_xl import TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, TransfoXLConfig
from .tokenization_transfo_xl import TransfoXLCorpus, TransfoXLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_transfo_xl import (
TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
AdaptiveEmbedding,
TransfoXLForSequenceClassification,
TransfoXLLMHeadModel,
TransfoXLModel,
TransfoXLPreTrainedModel,
load_tf_weights_in_transfo_xl,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_transfo_xl import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFAdaptiveEmbedding,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLMainLayer,
TFTransfoXLModel,
TFTransfoXLPreTrainedModel,
)
else:
import sys
snake_case_ : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 195
| 0
|
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import DistilBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.distilbert.modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertModel,
)
class __SCREAMING_SNAKE_CASE :
def __init__( self : List[Any] , UpperCAmelCase__ : List[Any] , ):
'''simple docstring'''
lowercase : List[str] =parent
lowercase : str =13
lowercase : Optional[int] =7
lowercase : Optional[int] =True
lowercase : Any =True
lowercase : List[Any] =False
lowercase : Optional[Any] =True
lowercase : Dict =99
lowercase : Union[str, Any] =32
lowercase : List[str] =2
lowercase : Union[str, Any] =4
lowercase : int =37
lowercase : Tuple ='''gelu'''
lowercase : Any =0.1
lowercase : List[str] =0.1
lowercase : Dict =512
lowercase : Optional[Any] =16
lowercase : List[str] =2
lowercase : str =0.02
lowercase : int =3
lowercase : int =4
lowercase : Tuple =None
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
lowercase : Dict =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase : str =None
if self.use_input_mask:
lowercase : str =random_attention_mask([self.batch_size, self.seq_length] )
lowercase : List[str] =None
lowercase : Optional[Any] =None
lowercase : str =None
if self.use_labels:
lowercase : List[str] =ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase : Dict =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase : Any =ids_tensor([self.batch_size] , self.num_choices )
lowercase : Optional[int] =DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCamelCase_ ( self : Any , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Optional[int] ):
'''simple docstring'''
lowercase : int =TFDistilBertModel(config=UpperCAmelCase__ )
lowercase : Dict ={'''input_ids''': input_ids, '''attention_mask''': input_mask}
lowercase : List[str] =model(UpperCAmelCase__ )
lowercase : str =[input_ids, input_mask]
lowercase : str =model(UpperCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase_ ( self : Union[str, Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : int ):
'''simple docstring'''
lowercase : str =TFDistilBertForMaskedLM(config=UpperCAmelCase__ )
lowercase : List[Any] ={'''input_ids''': input_ids, '''attention_mask''': input_mask}
lowercase : int =model(UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase_ ( self : List[Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : int , UpperCAmelCase__ : Optional[int] ):
'''simple docstring'''
lowercase : Dict =TFDistilBertForQuestionAnswering(config=UpperCAmelCase__ )
lowercase : Optional[Any] ={
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
}
lowercase : Any =model(UpperCAmelCase__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCamelCase_ ( self : str , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : List[str] ):
'''simple docstring'''
lowercase : Union[str, Any] =self.num_labels
lowercase : Tuple =TFDistilBertForSequenceClassification(UpperCAmelCase__ )
lowercase : Optional[Any] ={'''input_ids''': input_ids, '''attention_mask''': input_mask}
lowercase : List[str] =model(UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase_ ( self : List[Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Union[str, Any] ):
'''simple docstring'''
lowercase : Tuple =self.num_choices
lowercase : Optional[Any] =TFDistilBertForMultipleChoice(UpperCAmelCase__ )
lowercase : Union[str, Any] =tf.tile(tf.expand_dims(UpperCAmelCase__ , 1 ) , (1, self.num_choices, 1) )
lowercase : str =tf.tile(tf.expand_dims(UpperCAmelCase__ , 1 ) , (1, self.num_choices, 1) )
lowercase : Optional[int] ={
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
}
lowercase : List[Any] =model(UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCamelCase_ ( self : Union[str, Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : str , UpperCAmelCase__ : int , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Optional[int] ):
'''simple docstring'''
lowercase : Optional[int] =self.num_labels
lowercase : Dict =TFDistilBertForTokenClassification(UpperCAmelCase__ )
lowercase : int ={'''input_ids''': input_ids, '''attention_mask''': input_mask}
lowercase : Dict =model(UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
lowercase : Optional[int] =self.prepare_config_and_inputs()
(lowercase) : List[Any] =config_and_inputs
lowercase : Union[str, Any] ={'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class __SCREAMING_SNAKE_CASE ( lowercase__ , lowercase__ , unittest.TestCase ):
lowerCamelCase_ = (
(
TFDistilBertModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertForMultipleChoice,
)
if is_tf_available()
else None
)
lowerCamelCase_ = (
{
'feature-extraction': TFDistilBertModel,
'fill-mask': TFDistilBertForMaskedLM,
'question-answering': TFDistilBertForQuestionAnswering,
'text-classification': TFDistilBertForSequenceClassification,
'token-classification': TFDistilBertForTokenClassification,
'zero-shot': TFDistilBertForSequenceClassification,
}
if is_tf_available()
else {}
)
lowerCamelCase_ = False
lowerCamelCase_ = False
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
lowercase : Union[str, Any] =TFDistilBertModelTester(self )
lowercase : str =ConfigTester(self , config_class=UpperCAmelCase__ , dim=37 )
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase : List[str] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*UpperCAmelCase__ )
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
lowercase : int =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*UpperCAmelCase__ )
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
lowercase : Tuple =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*UpperCAmelCase__ )
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
lowercase : str =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*UpperCAmelCase__ )
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
lowercase : str =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*UpperCAmelCase__ )
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
lowercase : Optional[int] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*UpperCAmelCase__ )
@slow
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
for model_name in list(TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1] ):
lowercase : Tuple =TFDistilBertModel.from_pretrained(UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
@require_tf
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@slow
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
lowercase : Tuple =TFDistilBertModel.from_pretrained('''distilbert-base-uncased''' )
lowercase : Optional[Any] =tf.constant([[0, 1, 2, 3, 4, 5]] )
lowercase : Any =model(UpperCAmelCase__ )[0]
lowercase : List[Any] =[1, 6, 768]
self.assertEqual(output.shape , UpperCAmelCase__ )
lowercase : int =tf.constant(
[
[
[0.19_26_18_85, -0.13_73_29_55, 0.4_11_97_99],
[0.22_15_01_56, -0.07_42_26_61, 0.39_03_72_04],
[0.22_75_60_18, -0.0_89_64_14, 0.3_70_14_67],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , UpperCAmelCase__ , atol=1E-4 )
| 714
|
'''simple docstring'''
import unittest
import numpy as np
from transformers import RobertaConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.roberta.modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
)
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __init__( self : Optional[Any] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : str=13 , UpperCAmelCase__ : Optional[Any]=7 , UpperCAmelCase__ : Dict=True , UpperCAmelCase__ : int=True , UpperCAmelCase__ : int=True , UpperCAmelCase__ : Dict=True , UpperCAmelCase__ : Dict=99 , UpperCAmelCase__ : str=32 , UpperCAmelCase__ : Optional[Any]=5 , UpperCAmelCase__ : Any=4 , UpperCAmelCase__ : List[Any]=37 , UpperCAmelCase__ : str="gelu" , UpperCAmelCase__ : Dict=0.1 , UpperCAmelCase__ : List[Any]=0.1 , UpperCAmelCase__ : Optional[Any]=512 , UpperCAmelCase__ : Any=16 , UpperCAmelCase__ : str=2 , UpperCAmelCase__ : Tuple=0.02 , UpperCAmelCase__ : int=4 , ):
'''simple docstring'''
lowercase : int =parent
lowercase : List[str] =batch_size
lowercase : str =seq_length
lowercase : Optional[Any] =is_training
lowercase : Union[str, Any] =use_attention_mask
lowercase : Optional[Any] =use_token_type_ids
lowercase : Tuple =use_labels
lowercase : List[str] =vocab_size
lowercase : List[str] =hidden_size
lowercase : Tuple =num_hidden_layers
lowercase : Any =num_attention_heads
lowercase : List[str] =intermediate_size
lowercase : Optional[Any] =hidden_act
lowercase : Dict =hidden_dropout_prob
lowercase : List[Any] =attention_probs_dropout_prob
lowercase : Optional[Any] =max_position_embeddings
lowercase : Tuple =type_vocab_size
lowercase : Optional[int] =type_sequence_label_size
lowercase : Optional[Any] =initializer_range
lowercase : Optional[int] =num_choices
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
lowercase : Optional[Any] =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase : Union[str, Any] =None
if self.use_attention_mask:
lowercase : List[str] =random_attention_mask([self.batch_size, self.seq_length] )
lowercase : Tuple =None
if self.use_token_type_ids:
lowercase : str =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase : int =RobertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCAmelCase__ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
lowercase : List[Any] =self.prepare_config_and_inputs()
lowercase , lowercase , lowercase , lowercase : str =config_and_inputs
lowercase : Optional[Any] ={'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
lowercase : List[str] =self.prepare_config_and_inputs()
lowercase , lowercase , lowercase , lowercase : Any =config_and_inputs
lowercase : List[str] =True
lowercase : Optional[Any] =floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
lowercase : str =ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class __SCREAMING_SNAKE_CASE ( lowercase__ , unittest.TestCase ):
lowerCamelCase_ = True
lowerCamelCase_ = (
(
FlaxRobertaModel,
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
lowercase : str =FlaxRobertaModelTester(self )
@slow
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
lowercase : Optional[int] =model_class_name.from_pretrained('''roberta-base''' , from_pt=UpperCAmelCase__ )
lowercase : List[Any] =model(np.ones((1, 1) ) )
self.assertIsNotNone(UpperCAmelCase__ )
| 88
| 0
|
'''simple docstring'''
import math
import unittest
def A_ ( _lowerCAmelCase : int ):
"""simple docstring"""
assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(_lowerCAmelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
class UpperCAmelCase__ ( unittest.TestCase ):
def lowerCamelCase_ ( self : Union[str, Any] ):
self.assertTrue(is_prime(2 ) )
self.assertTrue(is_prime(3 ) )
self.assertTrue(is_prime(5 ) )
self.assertTrue(is_prime(7 ) )
self.assertTrue(is_prime(1_1 ) )
self.assertTrue(is_prime(1_3 ) )
self.assertTrue(is_prime(1_7 ) )
self.assertTrue(is_prime(1_9 ) )
self.assertTrue(is_prime(2_3 ) )
self.assertTrue(is_prime(2_9 ) )
def lowerCamelCase_ ( self : Tuple ):
with self.assertRaises(__A ):
is_prime(-1_9 )
self.assertFalse(
is_prime(0 ),"Zero doesn't have any positive factors, primes must have exactly two.",)
self.assertFalse(
is_prime(1 ),"One only has 1 positive factor, primes must have exactly two.",)
self.assertFalse(is_prime(2 * 2 ) )
self.assertFalse(is_prime(2 * 3 ) )
self.assertFalse(is_prime(3 * 3 ) )
self.assertFalse(is_prime(3 * 5 ) )
self.assertFalse(is_prime(3 * 5 * 7 ) )
if __name__ == "__main__":
unittest.main()
| 44
|
# Function to print upper half of diamond (pyramid)
def SCREAMING_SNAKE_CASE__ ( lowerCAmelCase_ : Tuple ) -> Optional[Any]:
"""simple docstring"""
for i in range(0 ,lowerCAmelCase_ ):
for _ in range(0 ,n - i - 1 ): # printing spaces
print(' ' ,end='' )
for _ in range(0 ,i + 1 ): # printing stars
print('* ' ,end='' )
print()
def SCREAMING_SNAKE_CASE__ ( lowerCAmelCase_ : List[str] ) -> str:
"""simple docstring"""
for i in range(lowerCAmelCase_ ,0 ,-1 ):
for _ in range(lowerCAmelCase_ ,0 ,-1 ): # printing stars
print('* ' ,end='' )
print()
for _ in range(n - i + 1 ,0 ,-1 ): # printing spaces
print(' ' ,end='' )
def SCREAMING_SNAKE_CASE__ ( lowerCAmelCase_ : List[Any] ) -> Optional[int]:
"""simple docstring"""
if n <= 0:
print(' ... .... nothing printing :(' )
return
floyd(lowerCAmelCase_ ) # upper half
reverse_floyd(lowerCAmelCase_ ) # lower half
if __name__ == "__main__":
print(r'| /\ | |- | |- |--| |\ /| |-')
print(r'|/ \| |- |_ |_ |__| | \/ | |_')
__SCREAMING_SNAKE_CASE = 1
while K:
__SCREAMING_SNAKE_CASE = int(input('enter the number and , and see the magic : '))
print()
pretty_print(user_number)
__SCREAMING_SNAKE_CASE = int(input('press 0 to exit... and 1 to continue...'))
print('Good Bye...')
| 220
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_torch_available
from ...utils import OptionalDependencyNotAvailable
snake_case_ = {
'configuration_gpt_neox_japanese': ['GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GPTNeoXJapaneseConfig'],
'tokenization_gpt_neox_japanese': ['GPTNeoXJapaneseTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ = [
'GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST',
'GPTNeoXJapaneseForCausalLM',
'GPTNeoXJapaneseLayer',
'GPTNeoXJapaneseModel',
'GPTNeoXJapanesePreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_gpt_neox_japanese import GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXJapaneseConfig
from .tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox_japanese import (
GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXJapaneseForCausalLM,
GPTNeoXJapaneseLayer,
GPTNeoXJapaneseModel,
GPTNeoXJapanesePreTrainedModel,
)
else:
import sys
snake_case_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 68
|
'''simple docstring'''
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ) -> int:
"""simple docstring"""
return int((input_a, input_a).count(1 ) != 0 )
def __lowerCamelCase ( ) -> None:
"""simple docstring"""
assert or_gate(0 , 0 ) == 0
assert or_gate(0 , 1 ) == 1
assert or_gate(1 , 0 ) == 1
assert or_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(or_gate(0, 1))
print(or_gate(1, 0))
print(or_gate(0, 0))
print(or_gate(1, 1))
| 68
| 1
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_barthez import BarthezTokenizer
else:
UpperCamelCase__ : Dict = None
UpperCamelCase__ : Tuple = logging.get_logger(__name__)
UpperCamelCase__ : Union[str, Any] = {'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''}
UpperCamelCase__ : Dict = {
'''vocab_file''': {
'''moussaKam/mbarthez''': '''https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model''',
'''moussaKam/barthez''': '''https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model''',
'''moussaKam/barthez-orangesum-title''': (
'''https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model'''
),
},
'''tokenizer_file''': {
'''moussaKam/mbarthez''': '''https://huggingface.co/moussaKam/mbarthez/resolve/main/tokenizer.json''',
'''moussaKam/barthez''': '''https://huggingface.co/moussaKam/barthez/resolve/main/tokenizer.json''',
'''moussaKam/barthez-orangesum-title''': (
'''https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/tokenizer.json'''
),
},
}
UpperCamelCase__ : Dict = {
'''moussaKam/mbarthez''': 10_24,
'''moussaKam/barthez''': 10_24,
'''moussaKam/barthez-orangesum-title''': 10_24,
}
UpperCamelCase__ : Any = '''▁'''
class lowerCAmelCase_ ( lowerCamelCase_ ):
__a : List[str] = VOCAB_FILES_NAMES
__a : int = PRETRAINED_VOCAB_FILES_MAP
__a : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__a : List[Any] = ["input_ids", "attention_mask"]
__a : Dict = BarthezTokenizer
def __init__( self ,snake_case__=None ,snake_case__=None ,snake_case__="<s>" ,snake_case__="</s>" ,snake_case__="</s>" ,snake_case__="<s>" ,snake_case__="<unk>" ,snake_case__="<pad>" ,snake_case__="<mask>" ,**snake_case__ ,):
# Mask token behave like a normal word, i.e. include the space before it
SCREAMING_SNAKE_CASE_ : str = AddedToken(snake_case__ ,lstrip=snake_case__ ,rstrip=snake_case__ ) if isinstance(snake_case__ ,snake_case__ ) else mask_token
super().__init__(
snake_case__ ,tokenizer_file=snake_case__ ,bos_token=snake_case__ ,eos_token=snake_case__ ,unk_token=snake_case__ ,sep_token=snake_case__ ,cls_token=snake_case__ ,pad_token=snake_case__ ,mask_token=snake_case__ ,**snake_case__ ,)
SCREAMING_SNAKE_CASE_ : List[Any] = vocab_file
SCREAMING_SNAKE_CASE_ : int = False if not self.vocab_file else True
def snake_case ( self ,snake_case__ ,snake_case__ = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
SCREAMING_SNAKE_CASE_ : List[str] = [self.cls_token_id]
SCREAMING_SNAKE_CASE_ : Optional[int] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def snake_case ( self ,snake_case__ ,snake_case__ = None ):
SCREAMING_SNAKE_CASE_ : Dict = [self.sep_token_id]
SCREAMING_SNAKE_CASE_ : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def snake_case ( self ,snake_case__ ,snake_case__ = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(snake_case__ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
SCREAMING_SNAKE_CASE_ : Optional[Any] = os.path.join(
snake_case__ ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case__ ):
copyfile(self.vocab_file ,snake_case__ )
return (out_vocab_file,)
| 105
|
"""simple docstring"""
from __future__ import annotations
def __snake_case ( UpperCamelCase__ ) -> list[int]: # This function is recursive
"""simple docstring"""
A = len(UpperCamelCase__ )
# If the array contains only one element, we return it (it's the stop condition of
# recursion)
if array_length <= 1:
return array
# Else
A = array[0]
A = False
A = 1
A = []
while not is_found and i < array_length:
if array[i] < pivot:
A = True
A = [element for element in array[i:] if element >= array[i]]
A = longest_subsequence(UpperCamelCase__ )
if len(UpperCamelCase__ ) > len(UpperCamelCase__ ):
A = temp_array
else:
i += 1
A = [element for element in array[1:] if element >= pivot]
A = [pivot, *longest_subsequence(UpperCamelCase__ )]
if len(UpperCamelCase__ ) > len(UpperCamelCase__ ):
return temp_array
else:
return longest_subseq
if __name__ == "__main__":
import doctest
doctest.testmod()
| 690
| 0
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_camembert import CamembertTokenizer
else:
__a : Any = None
__a : Dict = logging.get_logger(__name__)
__a : int = {"""vocab_file""": """sentencepiece.bpe.model""", """tokenizer_file""": """tokenizer.json"""}
__a : Optional[int] = {
"""vocab_file""": {
"""camembert-base""": """https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model""",
},
"""tokenizer_file""": {
"""camembert-base""": """https://huggingface.co/camembert-base/resolve/main/tokenizer.json""",
},
}
__a : Optional[Any] = {
"""camembert-base""": 5_12,
}
__a : List[Any] = """▁"""
class A ( lowerCamelCase_ ):
_SCREAMING_SNAKE_CASE : Dict = VOCAB_FILES_NAMES
_SCREAMING_SNAKE_CASE : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
_SCREAMING_SNAKE_CASE : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_SCREAMING_SNAKE_CASE : Optional[int] = ['''input_ids''', '''attention_mask''']
_SCREAMING_SNAKE_CASE : str = CamembertTokenizer
def __init__( self : int , __UpperCAmelCase : Tuple=None , __UpperCAmelCase : str=None , __UpperCAmelCase : int="<s>" , __UpperCAmelCase : Optional[int]="</s>" , __UpperCAmelCase : Dict="</s>" , __UpperCAmelCase : Optional[int]="<s>" , __UpperCAmelCase : Optional[int]="<unk>" , __UpperCAmelCase : List[str]="<pad>" , __UpperCAmelCase : Optional[Any]="<mask>" , __UpperCAmelCase : int=["<s>NOTUSED", "</s>NOTUSED"] , **__UpperCAmelCase : Optional[int] , ) -> str:
"""simple docstring"""
UpperCamelCase_ = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else mask_token
super().__init__(
__UpperCAmelCase , tokenizer_file=__UpperCAmelCase , bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , additional_special_tokens=__UpperCAmelCase , **__UpperCAmelCase , )
UpperCamelCase_ = vocab_file
UpperCamelCase_ = False if not self.vocab_file else True
def lowercase__ ( self : str , __UpperCAmelCase : List[int] , __UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCamelCase_ = [self.cls_token_id]
UpperCamelCase_ = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowercase__ ( self : int , __UpperCAmelCase : List[int] , __UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
UpperCamelCase_ = [self.sep_token_id]
UpperCamelCase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowercase__ ( self : str , __UpperCAmelCase : str , __UpperCAmelCase : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(__UpperCAmelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
UpperCamelCase_ = os.path.join(
__UpperCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCAmelCase ):
copyfile(self.vocab_file , __UpperCAmelCase )
return (out_vocab_file,)
| 559
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_big_bird import BigBirdTokenizer
else:
__a : Optional[int] = None
__a : List[Any] = logging.get_logger(__name__)
__a : int = {"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""}
__a : Optional[Any] = {
"""vocab_file""": {
"""google/bigbird-roberta-base""": """https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model""",
"""google/bigbird-roberta-large""": (
"""https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model"""
),
"""google/bigbird-base-trivia-itc""": (
"""https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model"""
),
},
"""tokenizer_file""": {
"""google/bigbird-roberta-base""": (
"""https://huggingface.co/google/bigbird-roberta-base/resolve/main/tokenizer.json"""
),
"""google/bigbird-roberta-large""": (
"""https://huggingface.co/google/bigbird-roberta-large/resolve/main/tokenizer.json"""
),
"""google/bigbird-base-trivia-itc""": (
"""https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/tokenizer.json"""
),
},
}
__a : int = {
"""google/bigbird-roberta-base""": 40_96,
"""google/bigbird-roberta-large""": 40_96,
"""google/bigbird-base-trivia-itc""": 40_96,
}
__a : Tuple = """▁"""
class A ( lowerCamelCase_ ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = VOCAB_FILES_NAMES
_SCREAMING_SNAKE_CASE : Dict = PRETRAINED_VOCAB_FILES_MAP
_SCREAMING_SNAKE_CASE : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_SCREAMING_SNAKE_CASE : List[str] = BigBirdTokenizer
_SCREAMING_SNAKE_CASE : Optional[int] = ['''input_ids''', '''attention_mask''']
_SCREAMING_SNAKE_CASE : List[int] = []
def __init__( self : Optional[Any] , __UpperCAmelCase : Optional[Any]=None , __UpperCAmelCase : List[str]=None , __UpperCAmelCase : Optional[Any]="<unk>" , __UpperCAmelCase : Optional[int]="<s>" , __UpperCAmelCase : Optional[Any]="</s>" , __UpperCAmelCase : Tuple="<pad>" , __UpperCAmelCase : Dict="[SEP]" , __UpperCAmelCase : Optional[Any]="[MASK]" , __UpperCAmelCase : List[str]="[CLS]" , **__UpperCAmelCase : str , ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase_ = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else bos_token
UpperCamelCase_ = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else eos_token
UpperCamelCase_ = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else unk_token
UpperCamelCase_ = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else pad_token
UpperCamelCase_ = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else cls_token
UpperCamelCase_ = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
UpperCamelCase_ = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else mask_token
super().__init__(
__UpperCAmelCase , tokenizer_file=__UpperCAmelCase , bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , **__UpperCAmelCase , )
UpperCamelCase_ = vocab_file
UpperCamelCase_ = False if not self.vocab_file else True
def lowercase__ ( self : Tuple , __UpperCAmelCase : List[int] , __UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
UpperCamelCase_ = [self.sep_token_id]
UpperCamelCase_ = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def lowercase__ ( self : Union[str, Any] , __UpperCAmelCase : List[int] , __UpperCAmelCase : Optional[List[int]] = None , __UpperCAmelCase : bool = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'You should not supply a second sequence if the provided sequence of '
'ids is already formatted with special tokens for the model.' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is None:
return [1] + ([0] * len(__UpperCAmelCase )) + [1]
return [1] + ([0] * len(__UpperCAmelCase )) + [1] + ([0] * len(__UpperCAmelCase )) + [1]
def lowercase__ ( self : Any , __UpperCAmelCase : List[int] , __UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
UpperCamelCase_ = [self.sep_token_id]
UpperCamelCase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowercase__ ( self : Dict , __UpperCAmelCase : str , __UpperCAmelCase : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(__UpperCAmelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
UpperCamelCase_ = os.path.join(
__UpperCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCAmelCase ):
copyfile(self.vocab_file , __UpperCAmelCase )
return (out_vocab_file,)
| 559
| 1
|
import unittest
from transformers import CamembertTokenizer, CamembertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
lowercase_ = get_tests_dir('fixtures/test_sentencepiece.model')
lowercase_ = get_tests_dir('fixtures/test_sentencepiece_bpe.model')
lowercase_ = 'pt' if is_torch_available() else 'tf'
@require_sentencepiece
@require_tokenizers
class __lowerCAmelCase ( __lowercase , unittest.TestCase ):
_a = CamembertTokenizer
_a = CamembertTokenizerFast
_a = True
_a = True
def A__ ( self ) -> Dict:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
_lowercase =CamembertTokenizer(lowerCAmelCase )
tokenizer.save_pretrained(self.tmpdirname )
def A__ ( self ) -> Tuple:
'''simple docstring'''
_lowercase ="<pad>"
_lowercase =1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase ) , lowerCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase ) , lowerCAmelCase )
def A__ ( self ) -> List[str]:
'''simple docstring'''
_lowercase =list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<s>NOTUSED' )
self.assertEqual(vocab_keys[1] , '<pad>' )
self.assertEqual(vocab_keys[-1] , '<mask>' )
self.assertEqual(len(lowerCAmelCase ) , 1_004 )
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 1_005 )
def A__ ( self ) -> int:
'''simple docstring'''
_lowercase =CamembertTokenizer(lowerCAmelCase )
tokenizer.save_pretrained(self.tmpdirname )
_lowercase =CamembertTokenizerFast.from_pretrained(self.tmpdirname )
_lowercase ="I was born in 92000, and this is falsé."
_lowercase =tokenizer.encode(lowerCAmelCase )
_lowercase =rust_tokenizer.encode(lowerCAmelCase )
self.assertListEqual(lowerCAmelCase , lowerCAmelCase )
_lowercase =tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase )
_lowercase =rust_tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase )
self.assertListEqual(lowerCAmelCase , lowerCAmelCase )
# <unk> tokens are not the same for `rust` than for `slow`.
# Because spm gives back raw token instead of `unk` in EncodeAsPieces
# tokens = tokenizer.tokenize(sequence)
_lowercase =tokenizer.convert_ids_to_tokens(lowerCAmelCase )
_lowercase =rust_tokenizer.tokenize(lowerCAmelCase )
self.assertListEqual(lowerCAmelCase , lowerCAmelCase )
def A__ ( self ) -> Optional[int]:
'''simple docstring'''
if not self.test_rust_tokenizer:
return
_lowercase =self.get_tokenizer()
_lowercase =self.get_rust_tokenizer()
_lowercase ="I was born in 92000, and this is falsé."
_lowercase =tokenizer.tokenize(lowerCAmelCase )
_lowercase =rust_tokenizer.tokenize(lowerCAmelCase )
self.assertListEqual(lowerCAmelCase , lowerCAmelCase )
_lowercase =tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase )
_lowercase =rust_tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase )
self.assertListEqual(lowerCAmelCase , lowerCAmelCase )
_lowercase =self.get_rust_tokenizer()
_lowercase =tokenizer.encode(lowerCAmelCase )
_lowercase =rust_tokenizer.encode(lowerCAmelCase )
self.assertListEqual(lowerCAmelCase , lowerCAmelCase )
@slow
def A__ ( self ) -> List[str]:
'''simple docstring'''
_lowercase ={"input_ids": [[5, 54, 7_196, 297, 30, 23, 776, 18, 11, 3_215, 3_705, 8_252, 22, 3_164, 1_181, 2_116, 29, 16, 813, 25, 791, 3_314, 20, 3_446, 38, 27_575, 120, 6, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [5, 468, 17, 11, 9_088, 20, 1_517, 8, 22_804, 18_818, 10, 38, 629, 607, 607, 142, 19, 7_196, 867, 56, 10_326, 24, 2_267, 20, 416, 5_072, 15_612, 233, 734, 7, 2_399, 27, 16, 3_015, 1_649, 7, 24, 20, 4_338, 2_399, 27, 13, 3_400, 14, 13, 6_189, 8, 930, 9, 6]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# camembert is a french model. So we also use french texts.
_lowercase =[
"Le transformeur est un modèle d'apprentissage profond introduit en 2017, "
"utilisé principalement dans le domaine du traitement automatique des langues (TAL).",
"À l'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus "
"pour gérer des données séquentielles, telles que le langage naturel, pour des tâches "
"telles que la traduction et la synthèse de texte.",
]
self.tokenizer_integration_test_util(
expected_encoding=lowerCAmelCase , model_name='camembert-base' , revision='3a0641d9a1aeb7e848a74299e7e4c4bca216b4cf' , sequences=lowerCAmelCase , )
| 291
|
from __future__ import annotations
class lowerCamelCase__ :
'''simple docstring'''
def __init__( self :Dict , a :str , a :str ) -> Union[str, Any]:
__UpperCamelCase , __UpperCamelCase : Optional[int] = text, pattern
__UpperCamelCase , __UpperCamelCase : Tuple = len(a ), len(a )
def _lowerCamelCase ( self :Any , a :str ) -> int:
for i in range(self.patLen - 1 , -1 , -1 ):
if char == self.pattern[i]:
return i
return -1
def _lowerCamelCase ( self :str , a :int ) -> int:
for i in range(self.patLen - 1 , -1 , -1 ):
if self.pattern[i] != self.text[current_pos + i]:
return current_pos + i
return -1
def _lowerCamelCase ( self :Union[str, Any] ) -> list[int]:
# searches pattern in text and returns index positions
__UpperCamelCase : Any = []
for i in range(self.textLen - self.patLen + 1 ):
__UpperCamelCase : List[Any] = self.mismatch_in_text(a )
if mismatch_index == -1:
positions.append(a )
else:
__UpperCamelCase : Any = self.match_in_pattern(self.text[mismatch_index] )
__UpperCamelCase : Dict = (
mismatch_index - match_index
) # shifting index lgtm [py/multiple-definition]
return positions
lowercase : Any = 'ABAABA'
lowercase : str = 'AB'
lowercase : str = BoyerMooreSearch(text, pattern)
lowercase : Union[str, Any] = bms.bad_character_heuristic()
if len(positions) == 0:
print('No match found')
else:
print('Pattern found in following positions: ')
print(positions)
| 557
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
UpperCAmelCase__ : Optional[int] = {
'configuration_vision_encoder_decoder': ['VisionEncoderDecoderConfig', 'VisionEncoderDecoderOnnxConfig']
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Tuple = ['VisionEncoderDecoderModel']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Tuple = ['TFVisionEncoderDecoderModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : int = ['FlaxVisionEncoderDecoderModel']
if TYPE_CHECKING:
from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel
else:
import sys
UpperCAmelCase__ : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 713
|
from __future__ import annotations
import math
import random
from collections.abc import Collection
from typing import overload
class lowerCAmelCase_ :
def __init__( self : Any , UpperCAmelCase_ : Collection[float] | None = None ) -> None:
'''simple docstring'''
if components is None:
_UpperCAmelCase : Optional[Any] = []
_UpperCAmelCase : Any = list(UpperCAmelCase_ )
def __len__( self : Tuple ) -> int:
'''simple docstring'''
return len(self.__components )
def __str__( self : List[Any] ) -> str:
'''simple docstring'''
return "(" + ",".join(map(UpperCAmelCase_ , self.__components ) ) + ")"
def __add__( self : List[str] , UpperCAmelCase_ : Vector ) -> Vector:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = len(self )
if size == len(UpperCAmelCase_ ):
_UpperCAmelCase : List[str] = [self.__components[i] + other.component(UpperCAmelCase_ ) for i in range(UpperCAmelCase_ )]
return Vector(UpperCAmelCase_ )
else:
raise Exception('''must have the same size''' )
def __sub__( self : Optional[int] , UpperCAmelCase_ : Vector ) -> Vector:
'''simple docstring'''
_UpperCAmelCase : str = len(self )
if size == len(UpperCAmelCase_ ):
_UpperCAmelCase : Any = [self.__components[i] - other.component(UpperCAmelCase_ ) for i in range(UpperCAmelCase_ )]
return Vector(UpperCAmelCase_ )
else: # error case
raise Exception('''must have the same size''' )
@overload
def __mul__( self : List[Any] , UpperCAmelCase_ : float ) -> Vector:
'''simple docstring'''
...
@overload
def __mul__( self : Dict , UpperCAmelCase_ : Vector ) -> float:
'''simple docstring'''
...
def __mul__( self : Dict , UpperCAmelCase_ : float | Vector ) -> float | Vector:
'''simple docstring'''
if isinstance(UpperCAmelCase_ , (float, int) ):
_UpperCAmelCase : Dict = [c * other for c in self.__components]
return Vector(UpperCAmelCase_ )
elif isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) and len(self ) == len(UpperCAmelCase_ ):
_UpperCAmelCase : Optional[int] = len(self )
_UpperCAmelCase : Optional[Any] = [self.__components[i] * other.component(UpperCAmelCase_ ) for i in range(UpperCAmelCase_ )]
return sum(UpperCAmelCase_ )
else: # error case
raise Exception('''invalid operand!''' )
def a_ ( self : Optional[int] ) -> Vector:
'''simple docstring'''
return Vector(self.__components )
def a_ ( self : Optional[int] , UpperCAmelCase_ : int ) -> float:
'''simple docstring'''
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) and -len(self.__components ) <= i < len(self.__components ):
return self.__components[i]
else:
raise Exception('''index out of range''' )
def a_ ( self : Optional[Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : float ) -> None:
'''simple docstring'''
assert -len(self.__components ) <= pos < len(self.__components )
_UpperCAmelCase : Optional[Any] = value
def a_ ( self : List[str] ) -> float:
'''simple docstring'''
if len(self.__components ) == 0:
raise Exception('''Vector is empty''' )
_UpperCAmelCase : str = [c**2 for c in self.__components]
return math.sqrt(sum(UpperCAmelCase_ ) )
def a_ ( self : Optional[int] , UpperCAmelCase_ : Vector , UpperCAmelCase_ : bool = False ) -> float:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = self * other
_UpperCAmelCase : Union[str, Any] = self.euclidean_length() * other.euclidean_length()
if deg:
return math.degrees(math.acos(num / den ) )
else:
return math.acos(num / den )
def _A ( _UpperCamelCase ):
assert isinstance(_UpperCamelCase , _UpperCamelCase )
return Vector([0] * dimension )
def _A ( _UpperCamelCase , _UpperCamelCase ):
assert isinstance(_UpperCamelCase , _UpperCamelCase ) and (isinstance(_UpperCamelCase , _UpperCamelCase ))
_UpperCAmelCase : Optional[Any] = [0] * dimension
_UpperCAmelCase : Optional[Any] = 1
return Vector(_UpperCamelCase )
def _A ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
assert (
isinstance(_UpperCamelCase , _UpperCamelCase )
and isinstance(_UpperCamelCase , _UpperCamelCase )
and (isinstance(_UpperCamelCase , (int, float) ))
)
return x * scalar + y
def _A ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
random.seed(_UpperCamelCase )
_UpperCAmelCase : Tuple = [random.randint(_UpperCamelCase , _UpperCamelCase ) for _ in range(_UpperCamelCase )]
return Vector(_UpperCamelCase )
class lowerCAmelCase_ :
def __init__( self : int , UpperCAmelCase_ : list[list[float]] , UpperCAmelCase_ : int , UpperCAmelCase_ : int ) -> None:
'''simple docstring'''
_UpperCAmelCase : Tuple = matrix
_UpperCAmelCase : Tuple = w
_UpperCAmelCase : Tuple = h
def __str__( self : str ) -> str:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = ''''''
for i in range(self.__height ):
ans += "|"
for j in range(self.__width ):
if j < self.__width - 1:
ans += str(self.__matrix[i][j] ) + ","
else:
ans += str(self.__matrix[i][j] ) + "|\n"
return ans
def __add__( self : str , UpperCAmelCase_ : Matrix ) -> Matrix:
'''simple docstring'''
if self.__width == other.width() and self.__height == other.height():
_UpperCAmelCase : Optional[int] = []
for i in range(self.__height ):
_UpperCAmelCase : Union[str, Any] = [
self.__matrix[i][j] + other.component(UpperCAmelCase_ , UpperCAmelCase_ )
for j in range(self.__width )
]
matrix.append(UpperCAmelCase_ )
return Matrix(UpperCAmelCase_ , self.__width , self.__height )
else:
raise Exception('''matrix must have the same dimension!''' )
def __sub__( self : Dict , UpperCAmelCase_ : Matrix ) -> Matrix:
'''simple docstring'''
if self.__width == other.width() and self.__height == other.height():
_UpperCAmelCase : Optional[Any] = []
for i in range(self.__height ):
_UpperCAmelCase : Union[str, Any] = [
self.__matrix[i][j] - other.component(UpperCAmelCase_ , UpperCAmelCase_ )
for j in range(self.__width )
]
matrix.append(UpperCAmelCase_ )
return Matrix(UpperCAmelCase_ , self.__width , self.__height )
else:
raise Exception('''matrices must have the same dimension!''' )
@overload
def __mul__( self : int , UpperCAmelCase_ : float ) -> Matrix:
'''simple docstring'''
...
@overload
def __mul__( self : Optional[Any] , UpperCAmelCase_ : Vector ) -> Vector:
'''simple docstring'''
...
def __mul__( self : List[str] , UpperCAmelCase_ : float | Vector ) -> Vector | Matrix:
'''simple docstring'''
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ): # matrix-vector
if len(UpperCAmelCase_ ) == self.__width:
_UpperCAmelCase : int = zero_vector(self.__height )
for i in range(self.__height ):
_UpperCAmelCase : Optional[int] = [
self.__matrix[i][j] * other.component(UpperCAmelCase_ )
for j in range(self.__width )
]
ans.change_component(UpperCAmelCase_ , sum(UpperCAmelCase_ ) )
return ans
else:
raise Exception(
'''vector must have the same size as the '''
'''number of columns of the matrix!''' )
elif isinstance(UpperCAmelCase_ , (int, float) ): # matrix-scalar
_UpperCAmelCase : str = [
[self.__matrix[i][j] * other for j in range(self.__width )]
for i in range(self.__height )
]
return Matrix(UpperCAmelCase_ , self.__width , self.__height )
return None
def a_ ( self : Optional[int] ) -> int:
'''simple docstring'''
return self.__height
def a_ ( self : Tuple ) -> int:
'''simple docstring'''
return self.__width
def a_ ( self : int , UpperCAmelCase_ : int , UpperCAmelCase_ : int ) -> float:
'''simple docstring'''
if 0 <= x < self.__height and 0 <= y < self.__width:
return self.__matrix[x][y]
else:
raise Exception('''change_component: indices out of bounds''' )
def a_ ( self : Optional[Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : float ) -> None:
'''simple docstring'''
if 0 <= x < self.__height and 0 <= y < self.__width:
_UpperCAmelCase : Dict = value
else:
raise Exception('''change_component: indices out of bounds''' )
def a_ ( self : str , UpperCAmelCase_ : int , UpperCAmelCase_ : int ) -> float:
'''simple docstring'''
if self.__height != self.__width:
raise Exception('''Matrix is not square''' )
_UpperCAmelCase : Any = self.__matrix[:x] + self.__matrix[x + 1 :]
for i in range(len(UpperCAmelCase_ ) ):
_UpperCAmelCase : Dict = minor[i][:y] + minor[i][y + 1 :]
return Matrix(UpperCAmelCase_ , self.__width - 1 , self.__height - 1 ).determinant()
def a_ ( self : Any , UpperCAmelCase_ : int , UpperCAmelCase_ : int ) -> float:
'''simple docstring'''
if self.__height != self.__width:
raise Exception('''Matrix is not square''' )
if 0 <= x < self.__height and 0 <= y < self.__width:
return (-1) ** (x + y) * self.minor(UpperCAmelCase_ , UpperCAmelCase_ )
else:
raise Exception('''Indices out of bounds''' )
def a_ ( self : Optional[int] ) -> float:
'''simple docstring'''
if self.__height != self.__width:
raise Exception('''Matrix is not square''' )
if self.__height < 1:
raise Exception('''Matrix has no element''' )
elif self.__height == 1:
return self.__matrix[0][0]
elif self.__height == 2:
return (
self.__matrix[0][0] * self.__matrix[1][1]
- self.__matrix[0][1] * self.__matrix[1][0]
)
else:
_UpperCAmelCase : Optional[int] = [
self.__matrix[0][y] * self.cofactor(0 , UpperCAmelCase_ ) for y in range(self.__width )
]
return sum(UpperCAmelCase_ )
def _A ( _UpperCamelCase ):
_UpperCAmelCase : list[list[float]] = [[0] * n for _ in range(_UpperCamelCase )]
return Matrix(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
def _A ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
random.seed(_UpperCamelCase )
_UpperCAmelCase : list[list[float]] = [
[random.randint(_UpperCamelCase , _UpperCamelCase ) for _ in range(_UpperCamelCase )] for _ in range(_UpperCamelCase )
]
return Matrix(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
| 416
| 0
|
'''simple docstring'''
import datasets
import faiss
import numpy as np
import streamlit as st
import torch
from elasticsearch import Elasticsearch
from elia_utils import (
embed_questions_for_retrieval,
make_qa_sas_model,
qa_sas_generate,
query_es_index,
query_qa_dense_index,
)
import transformers
from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer
lowercase__ = '''bart'''
lowercase__ = True
@st.cache(allow_output_mutation=lowercase )
def __snake_case ( ):
if LOAD_DENSE_INDEX:
snake_case_ = AutoTokenizer.from_pretrained("yjernite/retribert-base-uncased" )
snake_case_ = AutoModel.from_pretrained("yjernite/retribert-base-uncased" ).to("cuda:0" )
snake_case_ = qar_model.eval()
else:
snake_case_ , snake_case_ = (None, None)
if MODEL_TYPE == "bart":
snake_case_ = AutoTokenizer.from_pretrained("yjernite/bart_eli5" )
snake_case_ = AutoModelForSeqaSeqLM.from_pretrained("yjernite/bart_eli5" ).to("cuda:0" )
snake_case_ = torch.load("seq2seq_models/eli5_bart_model_blm_2.pth" )
sas_model.load_state_dict(save_dict["model"] )
snake_case_ = sas_model.eval()
else:
snake_case_ , snake_case_ = make_qa_sas_model(
model_name="t5-small" , from_file="seq2seq_models/eli5_t5_model_1024_4.pth" , device="cuda:0" )
return (qar_tokenizer, qar_model, sas_tokenizer, sas_model)
@st.cache(allow_output_mutation=lowercase )
def __snake_case ( ):
if LOAD_DENSE_INDEX:
snake_case_ = faiss.StandardGpuResources()
snake_case_ = datasets.load_dataset(path="wiki_snippets" , name="wiki40b_en_100_0" )["train"]
snake_case_ = np.memmap(
"wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat" , dtype="float32" , mode="r" , shape=(wikiaab_passages.num_rows, 128) , )
snake_case_ = faiss.IndexFlatIP(128 )
snake_case_ = faiss.index_cpu_to_gpu(lowercase , 1 , lowercase )
wikiaab_gpu_index_flat.add(lowercase ) # TODO fix for larger GPU
else:
snake_case_ , snake_case_ = (None, None)
snake_case_ = Elasticsearch([{"host": "localhost", "port": "9200"}] )
return (wikiaab_passages, wikiaab_gpu_index_flat, es_client)
@st.cache(allow_output_mutation=lowercase )
def __snake_case ( ):
snake_case_ = datasets.load_dataset("eli5" , name="LFQA_reddit" )
snake_case_ = elia["train_eli5"]
snake_case_ = np.memmap(
"eli5_questions_reps.dat" , dtype="float32" , mode="r" , shape=(elia_train.num_rows, 128) )
snake_case_ = faiss.IndexFlatIP(128 )
eli5_train_q_index.add(lowercase )
return (elia_train, eli5_train_q_index)
lowercase__ , lowercase__ , lowercase__ = load_indexes()
lowercase__ , lowercase__ , lowercase__ , lowercase__ = load_models()
lowercase__ , lowercase__ = load_train_data()
def __snake_case ( lowercase : Union[str, Any] , lowercase : str=10 ):
snake_case_ = embed_questions_for_retrieval([question] , lowercase , lowercase )
snake_case_ , snake_case_ = eli5_train_q_index.search(lowercase , lowercase )
snake_case_ = [elia_train[int(lowercase )] for i in I[0]]
return nn_examples
def __snake_case ( lowercase : Optional[Any] , lowercase : Union[str, Any]="wiki40b" , lowercase : Any="dense" , lowercase : List[Any]=10 ):
if source == "none":
snake_case_ , snake_case_ = (" <P> ".join(["" for _ in range(11 )] ).strip(), [])
else:
if method == "dense":
snake_case_ , snake_case_ = query_qa_dense_index(
lowercase , lowercase , lowercase , lowercase , lowercase , lowercase )
else:
snake_case_ , snake_case_ = query_es_index(
lowercase , lowercase , index_name="english_wiki40b_snippets_100w" , n_results=lowercase , )
snake_case_ = [
(res["article_title"], res["section_title"].strip(), res["score"], res["passage_text"]) for res in hit_lst
]
snake_case_ = "question: {} context: {}".format(lowercase , lowercase )
return question_doc, support_list
@st.cache(
hash_funcs={
torch.Tensor: (lambda lowercase : None),
transformers.models.bart.tokenization_bart.BartTokenizer: (lambda lowercase : None),
} )
def __snake_case ( lowercase : Tuple , lowercase : str , lowercase : List[Any] , lowercase : Optional[int]=64 , lowercase : Any=256 , lowercase : Tuple=False , lowercase : Dict=2 , lowercase : Any=0.95 , lowercase : Optional[int]=0.8 ):
with torch.no_grad():
snake_case_ = qa_sas_generate(
lowercase , lowercase , lowercase , num_answers=1 , num_beams=lowercase , min_len=lowercase , max_len=lowercase , do_sample=lowercase , temp=lowercase , top_p=lowercase , top_k=lowercase , max_input_length=1_024 , device="cuda:0" , )[0]
return (answer, support_list)
st.title('''Long Form Question Answering with ELI5''')
# Start sidebar
lowercase__ = '''<img src=\'https://huggingface.co/front/assets/huggingface_logo.svg\'>'''
lowercase__ = '''
<html>
<head>
<style>
.img-container {
padding-left: 90px;
padding-right: 90px;
padding-top: 50px;
padding-bottom: 50px;
background-color: #f0f3f9;
}
</style>
</head>
<body>
<span class="img-container"> <!-- Inline parent element -->
%s
</span>
</body>
</html>
''' % (
header_html,
)
st.sidebar.markdown(
header_full,
unsafe_allow_html=True,
)
# Long Form QA with ELI5 and Wikipedia
lowercase__ = '''
This demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).
First, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,
a pre-processed fixed snapshot of Wikipedia.
'''
st.sidebar.markdown(description, unsafe_allow_html=True)
lowercase__ = [
'''Answer the question''',
'''View the retrieved document only''',
'''View the most similar ELI5 question and answer''',
'''Show me everything, please!''',
]
lowercase__ = st.sidebar.checkbox('''Demo options''')
if demo_options:
lowercase__ = st.sidebar.selectbox(
'''''',
action_list,
index=3,
)
lowercase__ = action_list.index(action_st)
lowercase__ = st.sidebar.selectbox(
'''''',
['''Show full text of passages''', '''Show passage section titles'''],
index=0,
)
lowercase__ = show_type == '''Show full text of passages'''
else:
lowercase__ = 3
lowercase__ = True
lowercase__ = st.sidebar.checkbox('''Retrieval options''')
if retrieval_options:
lowercase__ = '''
### Information retriever options
The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding
trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.
The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.
'''
st.sidebar.markdown(retriever_info)
lowercase__ = st.sidebar.selectbox('''Which Wikipedia format should the model use?''', ['''wiki40b''', '''none'''])
lowercase__ = st.sidebar.selectbox('''Which Wikipedia indexer should the model use?''', ['''dense''', '''sparse''', '''mixed'''])
else:
lowercase__ = '''wiki40b'''
lowercase__ = '''dense'''
lowercase__ = '''beam'''
lowercase__ = 2
lowercase__ = 64
lowercase__ = 2_56
lowercase__ = None
lowercase__ = None
lowercase__ = st.sidebar.checkbox('''Generation options''')
if generate_options:
lowercase__ = '''
### Answer generation options
The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)
weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with
**beam** search, or **sample** from the decoder\'s output probabilities.
'''
st.sidebar.markdown(generate_info)
lowercase__ = st.sidebar.selectbox('''Would you like to use beam search or sample an answer?''', ['''beam''', '''sampled'''])
lowercase__ = st.sidebar.slider(
'''Minimum generation length''', min_value=8, max_value=2_56, value=64, step=8, format=None, key=None
)
lowercase__ = st.sidebar.slider(
'''Maximum generation length''', min_value=64, max_value=5_12, value=2_56, step=16, format=None, key=None
)
if sampled == "beam":
lowercase__ = st.sidebar.slider('''Beam size''', min_value=1, max_value=8, value=2, step=None, format=None, key=None)
else:
lowercase__ = st.sidebar.slider(
'''Nucleus sampling p''', min_value=0.1, max_value=1.0, value=0.95, step=0.01, format=None, key=None
)
lowercase__ = st.sidebar.slider(
'''Temperature''', min_value=0.1, max_value=1.0, value=0.7, step=0.01, format=None, key=None
)
lowercase__ = None
# start main text
lowercase__ = [
'''<MY QUESTION>''',
'''How do people make chocolate?''',
'''Why do we get a fever when we are sick?''',
'''How can different animals perceive different colors?''',
'''What is natural language processing?''',
'''What\'s the best way to treat a sunburn?''',
'''What exactly are vitamins ?''',
'''How does nuclear energy provide electricity?''',
'''What\'s the difference between viruses and bacteria?''',
'''Why are flutes classified as woodwinds when most of them are made out of metal ?''',
'''Why do people like drinking coffee even though it tastes so bad?''',
'''What happens when wine ages? How does it make the wine taste better?''',
'''If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?''',
'''How can we set a date to the beginning or end of an artistic period? Doesn\'t the change happen gradually?''',
'''How does New Zealand have so many large bird predators?''',
]
lowercase__ = st.selectbox(
'''What would you like to ask? ---- select <MY QUESTION> to enter a new query''',
questions_list,
index=1,
)
if question_s == "<MY QUESTION>":
lowercase__ = st.text_input('''Enter your question here:''', '''''')
else:
lowercase__ = question_s
if st.button('''Show me!'''):
if action in [0, 1, 3]:
if index_type == "mixed":
lowercase__ , lowercase__ = make_support(question, source=wiki_source, method='''dense''', n_results=10)
lowercase__ , lowercase__ = make_support(question, source=wiki_source, method='''sparse''', n_results=10)
lowercase__ = []
for res_d, res_s in zip(support_list_dense, support_list_sparse):
if tuple(res_d) not in support_list:
support_list += [tuple(res_d)]
if tuple(res_s) not in support_list:
support_list += [tuple(res_s)]
lowercase__ = support_list[:10]
lowercase__ = '''<P> ''' + ''' <P> '''.join([res[-1] for res in support_list])
else:
lowercase__ , lowercase__ = make_support(question, source=wiki_source, method=index_type, n_results=10)
if action in [0, 3]:
lowercase__ , lowercase__ = answer_question(
question_doc,
sas_model,
sas_tokenizer,
min_len=min_len,
max_len=int(max_len),
sampling=(sampled == '''sampled'''),
n_beams=n_beams,
top_p=top_p,
temp=temp,
)
st.markdown('''### The model generated answer is:''')
st.write(answer)
if action in [0, 1, 3] and wiki_source != "none":
st.markdown('''--- \n ### The model is drawing information from the following Wikipedia passages:''')
for i, res in enumerate(support_list):
lowercase__ = '''https://en.wikipedia.org/wiki/{}'''.format(res[0].replace(''' ''', '''_'''))
lowercase__ = res[1].strip()
if sec_titles == "":
lowercase__ = '''[{}]({})'''.format(res[0], wiki_url)
else:
lowercase__ = sec_titles.split(''' & ''')
lowercase__ = ''' & '''.join(
['''[{}]({}#{})'''.format(sec.strip(), wiki_url, sec.strip().replace(''' ''', '''_''')) for sec in sec_list]
)
st.markdown(
'''{0:02d} - **Article**: {1:<18} <br> _Section_: {2}'''.format(i + 1, res[0], sections),
unsafe_allow_html=True,
)
if show_passages:
st.write(
'''> <span style="font-family:arial; font-size:10pt;">''' + res[-1] + '''</span>''', unsafe_allow_html=True
)
if action in [2, 3]:
lowercase__ = find_nearest_training(question)
lowercase__ = nn_train_list[0]
st.markdown(
'''--- \n ### The most similar question in the ELI5 training set was: \n\n {}'''.format(train_exple['''title'''])
)
lowercase__ = [
'''{}. {}'''.format(i + 1, ''' \n'''.join([line.strip() for line in ans.split('''\n''') if line.strip() != '''''']))
for i, (ans, sc) in enumerate(zip(train_exple['''answers''']['''text'''], train_exple['''answers''']['''score''']))
if i == 0 or sc > 2
]
st.markdown('''##### Its answers were: \n\n {}'''.format('''\n'''.join(answers_st)))
lowercase__ = '''
---
**Disclaimer**
*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.
Evaluating biases of such a model and ensuring factual generations are still very much open research problems.
Therefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*
'''
st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
| 508
|
'''simple docstring'''
import math
import os
from copy import deepcopy
import datasets
import evaluate
import torch
import transformers
from datasets import load_dataset
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from accelerate import Accelerator
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import is_tpu_available, set_seed
lowercase__ = '''true'''
def __snake_case ( lowercase : Optional[int] , lowercase : Tuple=82 , lowercase : int=16 ):
set_seed(42 )
snake_case_ = RegressionModel()
snake_case_ = deepcopy(lowercase )
snake_case_ = RegressionDataset(length=lowercase )
snake_case_ = DataLoader(lowercase , batch_size=lowercase )
model.to(accelerator.device )
snake_case_ , snake_case_ = accelerator.prepare(lowercase , lowercase )
return model, ddp_model, dataloader
def __snake_case ( lowercase : Accelerator , lowercase : int=False ):
snake_case_ = AutoTokenizer.from_pretrained("hf-internal-testing/mrpc-bert-base-cased" )
snake_case_ = load_dataset("glue" , "mrpc" , split="validation" )
def tokenize_function(lowercase : Optional[Any] ):
snake_case_ = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=lowercase , max_length=lowercase )
return outputs
with accelerator.main_process_first():
snake_case_ = dataset.map(
lowercase , batched=lowercase , remove_columns=["idx", "sentence1", "sentence2"] , )
snake_case_ = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(lowercase : List[Any] ):
if use_longest:
return tokenizer.pad(lowercase , padding="longest" , return_tensors="pt" )
return tokenizer.pad(lowercase , padding="max_length" , max_length=128 , return_tensors="pt" )
return DataLoader(lowercase , shuffle=lowercase , collate_fn=lowercase , batch_size=16 )
def __snake_case ( lowercase : List[Any] , lowercase : str ):
snake_case_ = Accelerator(dispatch_batches=lowercase , split_batches=lowercase )
snake_case_ = get_dataloader(lowercase , not dispatch_batches )
snake_case_ = AutoModelForSequenceClassification.from_pretrained(
"hf-internal-testing/mrpc-bert-base-cased" , return_dict=lowercase )
snake_case_ , snake_case_ = accelerator.prepare(lowercase , lowercase )
return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator
def __snake_case ( lowercase : List[Any] , lowercase : str , lowercase : Tuple ):
snake_case_ = []
for batch in dataloader:
snake_case_ , snake_case_ = batch.values()
with torch.no_grad():
snake_case_ = model(lowercase )
snake_case_ , snake_case_ = accelerator.gather_for_metrics((logit, target) )
logits_and_targets.append((logit, target) )
snake_case_ , snake_case_ = [], []
for logit, targ in logits_and_targets:
logits.append(lowercase )
targs.append(lowercase )
snake_case_ , snake_case_ = torch.cat(lowercase ), torch.cat(lowercase )
return logits, targs
def __snake_case ( lowercase : Accelerator , lowercase : Union[str, Any]=82 , lowercase : str=False , lowercase : List[Any]=False , lowercase : int=16 ):
snake_case_ , snake_case_ , snake_case_ = get_basic_setup(lowercase , lowercase , lowercase )
snake_case_ , snake_case_ = generate_predictions(lowercase , lowercase , lowercase )
assert (
len(lowercase ) == num_samples
), f'''Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(lowercase )}'''
def __snake_case ( lowercase : bool = False , lowercase : bool = False ):
snake_case_ = evaluate.load("glue" , "mrpc" )
snake_case_ , snake_case_ = get_mrpc_setup(lowercase , lowercase )
# First do baseline
snake_case_ , snake_case_ , snake_case_ = setup["no"]
model.to(lowercase )
model.eval()
for batch in dataloader:
batch.to(lowercase )
with torch.inference_mode():
snake_case_ = model(**lowercase )
snake_case_ = outputs.logits.argmax(dim=-1 )
metric.add_batch(predictions=lowercase , references=batch["labels"] )
snake_case_ = metric.compute()
# Then do distributed
snake_case_ , snake_case_ , snake_case_ = setup["ddp"]
model.eval()
for batch in dataloader:
with torch.inference_mode():
snake_case_ = model(**lowercase )
snake_case_ = outputs.logits.argmax(dim=-1 )
snake_case_ = batch["labels"]
snake_case_ , snake_case_ = accelerator.gather_for_metrics((preds, references) )
metric.add_batch(predictions=lowercase , references=lowercase )
snake_case_ = metric.compute()
for key in "accuracy f1".split():
assert math.isclose(
baseline[key] , distributed[key] ), f'''Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n'''
def __snake_case ( ):
snake_case_ = Accelerator(split_batches=lowercase , dispatch_batches=lowercase )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_warning()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# These are a bit slower so they should only be ran on the GPU or TPU
if torch.cuda.is_available() or is_tpu_available():
if accelerator.is_local_main_process:
print("**Testing gather_for_metrics**" )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
if accelerator.is_local_main_process:
print(f'''With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`''' )
test_mrpc(lowercase , lowercase )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print("**Test torch metrics**" )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
snake_case_ = Accelerator(split_batches=lowercase , dispatch_batches=lowercase )
if accelerator.is_local_main_process:
print(f'''With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99''' )
test_torch_metrics(lowercase , 99 )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print("**Test last batch is not dropped when perfectly divisible**" )
snake_case_ = Accelerator()
test_torch_metrics(lowercase , 512 )
accelerator.state._reset_state()
def __snake_case ( lowercase : int ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 508
| 1
|
import requests
from bsa import BeautifulSoup
def A(__a: str , __a: dict ):
lowerCAmelCase_ = BeautifulSoup(requests.get(__a , params=__a ).content , "html.parser" )
lowerCAmelCase_ = soup.find("div" , attrs={"class": "gs_ri"} )
lowerCAmelCase_ = div.find("div" , attrs={"class": "gs_fl"} ).find_all("a" )
return anchors[2].get_text()
if __name__ == "__main__":
lowerCamelCase__ = {
'''title''': (
'''Precisely geometry controlled microsupercapacitors for ultrahigh areal '''
'''capacitance, volumetric capacitance, and energy density'''
),
'''journal''': '''Chem. Mater.''',
'''volume''': 30,
'''pages''': '''3979-3990''',
'''year''': 20_18,
'''hl''': '''en''',
}
print(get_citation('''https://scholar.google.com/scholar_lookup''', params=params))
| 226
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase__ = {'''configuration_ibert''': ['''IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''IBertConfig''', '''IBertOnnxConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
'''IBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''IBertForMaskedLM''',
'''IBertForMultipleChoice''',
'''IBertForQuestionAnswering''',
'''IBertForSequenceClassification''',
'''IBertForTokenClassification''',
'''IBertModel''',
'''IBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_ibert import IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, IBertConfig, IBertOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ibert import (
IBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
IBertForMaskedLM,
IBertForMultipleChoice,
IBertForQuestionAnswering,
IBertForSequenceClassification,
IBertForTokenClassification,
IBertModel,
IBertPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 226
| 1
|
"""simple docstring"""
import itertools
import os
from collections import Counter, defaultdict
from concurrent.futures import ThreadPoolExecutor, as_completed
import numpy as np
import datasets
from .execute import check_correctness
SCREAMING_SNAKE_CASE_ = '''\\n@misc{chen2021evaluating,\n title={Evaluating Large Language Models Trained on Code},\n author={Mark Chen and Jerry Tworek and Heewoo Jun and Qiming Yuan \\nand Henrique Ponde de Oliveira Pinto and Jared Kaplan and Harri Edwards \\nand Yuri Burda and Nicholas Joseph and Greg Brockman and Alex Ray \\nand Raul Puri and Gretchen Krueger and Michael Petrov and Heidy Khlaaf \\nand Girish Sastry and Pamela Mishkin and Brooke Chan and Scott Gray \\nand Nick Ryder and Mikhail Pavlov and Alethea Power and Lukasz Kaiser \\nand Mohammad Bavarian and Clemens Winter and Philippe Tillet \\nand Felipe Petroski Such and Dave Cummings and Matthias Plappert \\nand Fotios Chantzis and Elizabeth Barnes and Ariel Herbert-Voss \\nand William Hebgen Guss and Alex Nichol and Alex Paino and Nikolas Tezak \\nand Jie Tang and Igor Babuschkin and Suchir Balaji and Shantanu Jain \\nand William Saunders and Christopher Hesse and Andrew N. Carr \\nand Jan Leike and Josh Achiam and Vedant Misra and Evan Morikawa \\nand Alec Radford and Matthew Knight and Miles Brundage and Mira Murati \\nand Katie Mayer and Peter Welinder and Bob McGrew and Dario Amodei \\nand Sam McCandlish and Ilya Sutskever and Wojciech Zaremba},\n year={2021},\n eprint={2107.03374},\n archivePrefix={arXiv},\n primaryClass={cs.LG}\n}\n'''
SCREAMING_SNAKE_CASE_ = '''\\nThis metric implements the evaluation harness for the HumanEval problem solving dataset\ndescribed in the paper "Evaluating Large Language Models Trained on Code"\n(https://arxiv.org/abs/2107.03374).\n'''
SCREAMING_SNAKE_CASE_ = '''\nCalculates how good are predictions given some references, using certain scores\nArgs:\n predictions: list of candidates to evaluate. Each candidates should be a list\n of strings with several code candidates to solve the problem.\n references: a list with a test for each prediction. Each test should evaluate the\n correctness of a code candidate.\n k: number of code candidates to consider in the evaluation (Default: [1, 10, 100])\n num_workers: number of workers used to evaluate the canidate programs (Default: 4).\n timeout:\nReturns:\n pass_at_k: dict with pass rates for each k\n results: dict with granular results of each unittest\nExamples:\n >>> code_eval = datasets.load_metric("code_eval")\n >>> test_cases = ["assert add(2,3)==5"]\n >>> candidates = [["def add(a,b): return a*b", "def add(a, b): return a+b"]]\n >>> pass_at_k, results = code_eval.compute(references=test_cases, predictions=candidates, k=[1, 2])\n >>> print(pass_at_k)\n {\'pass@1\': 0.5, \'pass@2\': 1.0}\n'''
SCREAMING_SNAKE_CASE_ = '''\n################################################################################\n !!!WARNING!!!\n################################################################################\nThe "code_eval" metric executes untrusted model-generated code in Python.\nAlthough it is highly unlikely that model-generated code will do something\novertly malicious in response to this test suite, model-generated code may act\ndestructively due to a lack of model capability or alignment.\nUsers are strongly encouraged to sandbox this evaluation suite so that it\ndoes not perform destructive actions on their host or network. For more\ninformation on how OpenAI sandboxes its code, see the paper "Evaluating Large\nLanguage Models Trained on Code" (https://arxiv.org/abs/2107.03374).\n\nOnce you have read this disclaimer and taken appropriate precautions,\nset the environment variable HF_ALLOW_CODE_EVAL="1". Within Python you can to this\nwith:\n\n>>> import os\n>>> os.environ["HF_ALLOW_CODE_EVAL"] = "1"\n\n################################################################################\\n'''
SCREAMING_SNAKE_CASE_ = '''The MIT License\n\nCopyright (c) OpenAI (https://openai.com)\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the "Software"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\nTHE SOFTWARE.'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class a ( datasets.Metric ):
"""simple docstring"""
def __A ( self ) -> Optional[Any]:
return datasets.MetricInfo(
# This is the description that will appear on the metrics page.
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string" ) ),
"references": datasets.Value("string" ),
} ) , homepage="https://github.com/openai/human-eval" , codebase_urls=["https://github.com/openai/human-eval"] , reference_urls=["https://github.com/openai/human-eval"] , license=_LICENSE , )
def __A ( self , snake_case_ , snake_case_ , snake_case_=[1, 10, 100] , snake_case_=4 , snake_case_=3.0 ) -> int:
if os.getenv("HF_ALLOW_CODE_EVAL" , 0 ) != "1":
raise ValueError(_WARNING )
if os.name == "nt":
raise NotImplementedError("This metric is currently not supported on Windows." )
with ThreadPoolExecutor(max_workers=_SCREAMING_SNAKE_CASE ) as executor:
_UpperCAmelCase = []
_UpperCAmelCase = Counter()
_UpperCAmelCase = 0
_UpperCAmelCase = defaultdict(_SCREAMING_SNAKE_CASE )
for task_id, (candidates, test_case) in enumerate(zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ):
for candidate in candidates:
_UpperCAmelCase = candidate + '''\n''' + test_case
_UpperCAmelCase = (test_program, timeout, task_id, completion_id[task_id])
_UpperCAmelCase = executor.submit(_SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE )
futures.append(_SCREAMING_SNAKE_CASE )
completion_id[task_id] += 1
n_samples += 1
for future in as_completed(_SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = future.result()
results[result["task_id"]].append((result["completion_id"], result) )
_UpperCAmelCase = [], []
for result in results.values():
result.sort()
_UpperCAmelCase = [r[1]['''passed'''] for r in result]
total.append(len(_SCREAMING_SNAKE_CASE ) )
correct.append(sum(_SCREAMING_SNAKE_CASE ) )
_UpperCAmelCase = np.array(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = np.array(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = k
_UpperCAmelCase = {F"""pass@{k}""": estimate_pass_at_k(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).mean() for k in ks if (total >= k).all()}
return pass_at_k, results
def A__ ( A__ , A__ , A__ ) -> int:
'''simple docstring'''
def estimator(A__ , A__ , A__ ) -> float:
if n - c < k:
return 1.0
return 1.0 - np.prod(1.0 - k / np.arange(n - c + 1 , n + 1 ) )
if isinstance(_lowercase , _lowercase ):
_UpperCAmelCase = itertools.repeat(_lowercase , len(_lowercase ) )
else:
assert len(_lowercase ) == len(_lowercase )
_UpperCAmelCase = iter(_lowercase )
return np.array([estimator(int(_lowercase ) , int(_lowercase ) , _lowercase ) for n, c in zip(_lowercase , _lowercase )] )
| 426
|
import os
import sys
import warnings
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen
from ..table import array_cast
from ..utils.file_utils import is_local_path
from ..utils.py_utils import first_non_null_value, no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
import PIL.Image
from .features import FeatureType
__a = None
__a = '<' if sys.byteorder == 'little' else '>'
# Origin: https://github.com/python-pillow/Pillow/blob/698951e19e19972aeed56df686868f1329981c12/src/PIL/Image.py#L3126 minus "|i1" which values are not preserved correctly when saving and loading an image
__a = [
np.dtype('|b1'),
np.dtype('|u1'),
np.dtype('<u2'),
np.dtype('>u2'),
np.dtype('<i2'),
np.dtype('>i2'),
np.dtype('<u4'),
np.dtype('>u4'),
np.dtype('<i4'),
np.dtype('>i4'),
np.dtype('<f4'),
np.dtype('>f4'),
np.dtype('<f8'),
np.dtype('>f8'),
]
@dataclass
class __a:
"""simple docstring"""
lowerCAmelCase = True
lowerCAmelCase = None
# Automatically constructed
lowerCAmelCase = "PIL.Image.Image"
lowerCAmelCase = pa.struct({'''bytes''': pa.binary(), '''path''': pa.string()} )
lowerCAmelCase = field(default='''Image''' , init=_a , repr=_a )
def __call__( self ) -> Tuple:
return self.pa_type
def a__ ( self ,_SCREAMING_SNAKE_CASE ) -> dict:
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('''To support encoding images, please install \'Pillow\'.''' )
if isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ : List[str] = np.array(_SCREAMING_SNAKE_CASE )
if isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ):
return {"path": value, "bytes": None}
elif isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ):
return {"path": None, "bytes": value}
elif isinstance(_SCREAMING_SNAKE_CASE ,np.ndarray ):
# convert the image array to PNG/TIFF bytes
return encode_np_array(_SCREAMING_SNAKE_CASE )
elif isinstance(_SCREAMING_SNAKE_CASE ,PIL.Image.Image ):
# convert the PIL image to bytes (default format is PNG/TIFF)
return encode_pil_image(_SCREAMING_SNAKE_CASE )
elif value.get('''path''' ) is not None and os.path.isfile(value['''path'''] ):
# we set "bytes": None to not duplicate the data if they're already available locally
return {"bytes": None, "path": value.get('''path''' )}
elif value.get('''bytes''' ) is not None or value.get('''path''' ) is not None:
# store the image bytes, and path is used to infer the image format using the file extension
return {"bytes": value.get('''bytes''' ), "path": value.get('''path''' )}
else:
raise ValueError(
f'''An image sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.''' )
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=None ) -> "PIL.Image.Image":
if not self.decode:
raise RuntimeError('''Decoding is disabled for this feature. Please use Image(decode=True) instead.''' )
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('''To support decoding images, please install \'Pillow\'.''' )
if token_per_repo_id is None:
UpperCAmelCase_ : Dict = {}
UpperCAmelCase_, UpperCAmelCase_ : Union[str, Any] = value['''path'''], value['''bytes''']
if bytes_ is None:
if path is None:
raise ValueError(f'''An image should have one of \'path\' or \'bytes\' but both are None in {value}.''' )
else:
if is_local_path(_SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ : Tuple = PIL.Image.open(_SCREAMING_SNAKE_CASE )
else:
UpperCAmelCase_ : Dict = path.split('''::''' )[-1]
try:
UpperCAmelCase_ : Optional[int] = string_to_dict(_SCREAMING_SNAKE_CASE ,config.HUB_DATASETS_URL )['''repo_id''']
UpperCAmelCase_ : Tuple = token_per_repo_id.get(_SCREAMING_SNAKE_CASE )
except ValueError:
UpperCAmelCase_ : Optional[Any] = None
with xopen(_SCREAMING_SNAKE_CASE ,'''rb''' ,use_auth_token=_SCREAMING_SNAKE_CASE ) as f:
UpperCAmelCase_ : List[str] = BytesIO(f.read() )
UpperCAmelCase_ : Optional[Any] = PIL.Image.open(bytes_ )
else:
UpperCAmelCase_ : List[Any] = PIL.Image.open(BytesIO(bytes_ ) )
image.load() # to avoid "Too many open files" errors
return image
def a__ ( self ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
from .features import Value
return (
self
if self.decode
else {
"bytes": Value('''binary''' ),
"path": Value('''string''' ),
}
)
def a__ ( self ,_SCREAMING_SNAKE_CASE ) -> pa.StructArray:
if pa.types.is_string(storage.type ):
UpperCAmelCase_ : Dict = pa.array([None] * len(_SCREAMING_SNAKE_CASE ) ,type=pa.binary() )
UpperCAmelCase_ : Dict = pa.StructArray.from_arrays([bytes_array, storage] ,['''bytes''', '''path'''] ,mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
UpperCAmelCase_ : List[str] = pa.array([None] * len(_SCREAMING_SNAKE_CASE ) ,type=pa.string() )
UpperCAmelCase_ : Tuple = pa.StructArray.from_arrays([storage, path_array] ,['''bytes''', '''path'''] ,mask=storage.is_null() )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index('''bytes''' ) >= 0:
UpperCAmelCase_ : Dict = storage.field('''bytes''' )
else:
UpperCAmelCase_ : Any = pa.array([None] * len(_SCREAMING_SNAKE_CASE ) ,type=pa.binary() )
if storage.type.get_field_index('''path''' ) >= 0:
UpperCAmelCase_ : int = storage.field('''path''' )
else:
UpperCAmelCase_ : List[str] = pa.array([None] * len(_SCREAMING_SNAKE_CASE ) ,type=pa.string() )
UpperCAmelCase_ : Optional[Any] = pa.StructArray.from_arrays([bytes_array, path_array] ,['''bytes''', '''path'''] ,mask=storage.is_null() )
elif pa.types.is_list(storage.type ):
UpperCAmelCase_ : Optional[Any] = pa.array(
[encode_np_array(np.array(_SCREAMING_SNAKE_CASE ) )['''bytes'''] if arr is not None else None for arr in storage.to_pylist()] ,type=pa.binary() ,)
UpperCAmelCase_ : Any = pa.array([None] * len(_SCREAMING_SNAKE_CASE ) ,type=pa.string() )
UpperCAmelCase_ : Dict = pa.StructArray.from_arrays(
[bytes_array, path_array] ,['''bytes''', '''path'''] ,mask=bytes_array.is_null() )
return array_cast(_SCREAMING_SNAKE_CASE ,self.pa_type )
def a__ ( self ,_SCREAMING_SNAKE_CASE ) -> pa.StructArray:
@no_op_if_value_is_null
def path_to_bytes(_SCREAMING_SNAKE_CASE ):
with xopen(_SCREAMING_SNAKE_CASE ,'''rb''' ) as f:
UpperCAmelCase_ : Any = f.read()
return bytes_
UpperCAmelCase_ : Union[str, Any] = pa.array(
[
(path_to_bytes(x['''path'''] ) if x['''bytes'''] is None else x['''bytes''']) if x is not None else None
for x in storage.to_pylist()
] ,type=pa.binary() ,)
UpperCAmelCase_ : List[str] = pa.array(
[os.path.basename(_SCREAMING_SNAKE_CASE ) if path is not None else None for path in storage.field('''path''' ).to_pylist()] ,type=pa.string() ,)
UpperCAmelCase_ : Union[str, Any] = pa.StructArray.from_arrays([bytes_array, path_array] ,['''bytes''', '''path'''] ,mask=bytes_array.is_null() )
return array_cast(_SCREAMING_SNAKE_CASE ,self.pa_type )
def lowerCamelCase__ ( ):
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('''To support encoding images, please install \'Pillow\'.''' )
global _IMAGE_COMPRESSION_FORMATS
if _IMAGE_COMPRESSION_FORMATS is None:
PIL.Image.init()
UpperCAmelCase_ : Optional[int] = list(set(PIL.Image.OPEN.keys() ) & set(PIL.Image.SAVE.keys() ) )
return _IMAGE_COMPRESSION_FORMATS
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = BytesIO()
if image.format in list_image_compression_formats():
UpperCAmelCase_ : int = image.format
else:
UpperCAmelCase_ : List[Any] = '''PNG''' if image.mode in ['''1''', '''L''', '''LA''', '''RGB''', '''RGBA'''] else '''TIFF'''
image.save(_lowercase , format=_lowercase )
return buffer.getvalue()
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
if hasattr(_lowercase , '''filename''' ) and image.filename != "":
return {"path": image.filename, "bytes": None}
else:
return {"path": None, "bytes": image_to_bytes(_lowercase )}
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('''To support encoding images, please install \'Pillow\'.''' )
UpperCAmelCase_ : Tuple = array.dtype
UpperCAmelCase_ : List[str] = dtype.byteorder if dtype.byteorder != '''=''' else _NATIVE_BYTEORDER
UpperCAmelCase_ : Dict = dtype.kind
UpperCAmelCase_ : Union[str, Any] = dtype.itemsize
UpperCAmelCase_ : Optional[Any] = None
# Multi-channel array case (only np.dtype("|u1") is allowed)
if array.shape[2:]:
UpperCAmelCase_ : Tuple = np.dtype('''|u1''' )
if dtype_kind not in ["u", "i"]:
raise TypeError(
f'''Unsupported array dtype {dtype} for image encoding. Only {dest_dtype} is supported for multi-channel arrays.''' )
if dtype is not dest_dtype:
warnings.warn(f'''Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'''' )
# Exact match
elif dtype in _VALID_IMAGE_ARRAY_DTPYES:
UpperCAmelCase_ : Union[str, Any] = dtype
else: # Downcast the type within the kind (np.can_cast(from_type, to_type, casting="same_kind") doesn't behave as expected, so do it manually)
while dtype_itemsize >= 1:
UpperCAmelCase_ : Union[str, Any] = dtype_byteorder + dtype_kind + str(_lowercase )
UpperCAmelCase_ : str = np.dtype(_lowercase )
if dest_dtype in _VALID_IMAGE_ARRAY_DTPYES:
warnings.warn(f'''Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'''' )
break
else:
dtype_itemsize //= 2
if dest_dtype is None:
raise TypeError(
f'''Cannot convert dtype {dtype} to a valid image dtype. Valid image dtypes: {_VALID_IMAGE_ARRAY_DTPYES}''' )
UpperCAmelCase_ : Any = PIL.Image.fromarray(array.astype(_lowercase ) )
return {"path": None, "bytes": image_to_bytes(_lowercase )}
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('''To support encoding images, please install \'Pillow\'.''' )
if objs:
UpperCAmelCase_, UpperCAmelCase_ : Tuple = first_non_null_value(_lowercase )
if isinstance(_lowercase , _lowercase ):
return [{"path": obj, "bytes": None} if obj is not None else None for obj in objs]
if isinstance(_lowercase , np.ndarray ):
UpperCAmelCase_ : Any = no_op_if_value_is_null(_lowercase )
return [obj_to_image_dict_func(_lowercase ) for obj in objs]
elif isinstance(_lowercase , PIL.Image.Image ):
UpperCAmelCase_ : Union[str, Any] = no_op_if_value_is_null(_lowercase )
return [obj_to_image_dict_func(_lowercase ) for obj in objs]
else:
return objs
else:
return objs
| 30
| 0
|
'''simple docstring'''
import unittest
import numpy as np
from transformers import RobertaConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.roberta.modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
)
class UpperCAmelCase ( unittest.TestCase ):
def __init__(self : List[Any] , A__ : Dict , A__ : str=1_3 , A__ : List[str]=7 , A__ : int=True , A__ : Tuple=True , A__ : str=True , A__ : List[str]=True , A__ : str=9_9 , A__ : str=3_2 , A__ : Tuple=5 , A__ : Dict=4 , A__ : Optional[Any]=3_7 , A__ : Dict="gelu" , A__ : Optional[int]=0.1 , A__ : Any=0.1 , A__ : Dict=5_1_2 , A__ : Any=1_6 , A__ : Tuple=2 , A__ : Optional[Any]=0.0_2 , A__ : Any=4 , ) -> str:
lowercase = parent
lowercase = batch_size
lowercase = seq_length
lowercase = is_training
lowercase = use_attention_mask
lowercase = use_token_type_ids
lowercase = use_labels
lowercase = vocab_size
lowercase = hidden_size
lowercase = num_hidden_layers
lowercase = num_attention_heads
lowercase = intermediate_size
lowercase = hidden_act
lowercase = hidden_dropout_prob
lowercase = attention_probs_dropout_prob
lowercase = max_position_embeddings
lowercase = type_vocab_size
lowercase = type_sequence_label_size
lowercase = initializer_range
lowercase = num_choices
def UpperCAmelCase__ (self : Optional[Any] ) -> int:
lowercase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase = None
if self.use_attention_mask:
lowercase = random_attention_mask([self.batch_size, self.seq_length] )
lowercase = None
if self.use_token_type_ids:
lowercase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase = RobertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=A__ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def UpperCAmelCase__ (self : Tuple ) -> Tuple:
lowercase = self.prepare_config_and_inputs()
lowercase , lowercase , lowercase , lowercase = config_and_inputs
lowercase = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
def UpperCAmelCase__ (self : List[Any] ) -> Tuple:
lowercase = self.prepare_config_and_inputs()
lowercase , lowercase , lowercase , lowercase = config_and_inputs
lowercase = True
lowercase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
lowercase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class UpperCAmelCase ( _lowercase , unittest.TestCase ):
UpperCAmelCase : str = True
UpperCAmelCase : List[str] = (
(
FlaxRobertaModel,
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
)
if is_flax_available()
else ()
)
def UpperCAmelCase__ (self : Dict ) -> List[Any]:
lowercase = FlaxRobertaModelTester(self )
@slow
def UpperCAmelCase__ (self : Tuple ) -> Tuple:
for model_class_name in self.all_model_classes:
lowercase = model_class_name.from_pretrained("roberta-base" , from_pt=A__ )
lowercase = model(np.ones((1, 1) ) )
self.assertIsNotNone(A__ )
| 701
|
'''simple docstring'''
import warnings
from contextlib import contextmanager
from ....processing_utils import ProcessorMixin
class UpperCAmelCase ( _lowercase ):
UpperCAmelCase : Optional[Any] = '''MCTCTFeatureExtractor'''
UpperCAmelCase : Tuple = '''AutoTokenizer'''
def __init__(self : int , A__ : Tuple , A__ : Union[str, Any] ) -> Dict:
super().__init__(A__ , A__ )
lowercase = self.feature_extractor
lowercase = False
def __call__(self : Tuple , *A__ : str , **A__ : Dict ) -> int:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*A__ , **A__ )
if "raw_speech" in kwargs:
warnings.warn("Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead." )
lowercase = kwargs.pop("raw_speech" )
else:
lowercase = kwargs.pop("audio" , A__ )
lowercase = kwargs.pop("sampling_rate" , A__ )
lowercase = kwargs.pop("text" , A__ )
if len(A__ ) > 0:
lowercase = args[0]
lowercase = args[1:]
if audio is None and text is None:
raise ValueError("You need to specify either an `audio` or `text` input to process." )
if audio is not None:
lowercase = self.feature_extractor(A__ , *A__ , sampling_rate=A__ , **A__ )
if text is not None:
lowercase = self.tokenizer(A__ , **A__ )
if text is None:
return inputs
elif audio is None:
return encodings
else:
lowercase = encodings["input_ids"]
return inputs
def UpperCAmelCase__ (self : Tuple , *A__ : str , **A__ : str ) -> str:
return self.tokenizer.batch_decode(*A__ , **A__ )
def UpperCAmelCase__ (self : Any , *A__ : List[Any] , **A__ : List[str] ) -> Tuple:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor.pad(*A__ , **A__ )
lowercase = kwargs.pop("input_features" , A__ )
lowercase = kwargs.pop("labels" , A__ )
if len(A__ ) > 0:
lowercase = args[0]
lowercase = args[1:]
if input_features is not None:
lowercase = self.feature_extractor.pad(A__ , *A__ , **A__ )
if labels is not None:
lowercase = self.tokenizer.pad(A__ , **A__ )
if labels is None:
return input_features
elif input_features is None:
return labels
else:
lowercase = labels["input_ids"]
return input_features
def UpperCAmelCase__ (self : Tuple , *A__ : Optional[int] , **A__ : Optional[int] ) -> Tuple:
return self.tokenizer.decode(*A__ , **A__ )
@contextmanager
def UpperCAmelCase__ (self : Optional[Any] ) -> Union[str, Any]:
warnings.warn(
"`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your "
"labels by using the argument `text` of the regular `__call__` method (either in the same call as "
"your audio inputs, or in a separate call." )
lowercase = True
lowercase = self.tokenizer
yield
lowercase = self.feature_extractor
lowercase = False
| 459
| 0
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_big_bird import BigBirdTokenizer
else:
_lowerCamelCase = None
_lowerCamelCase = logging.get_logger(__name__)
_lowerCamelCase = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
_lowerCamelCase = {
'vocab_file': {
'google/bigbird-roberta-base': 'https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model',
'google/bigbird-roberta-large': (
'https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model'
),
'google/bigbird-base-trivia-itc': (
'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model'
),
},
'tokenizer_file': {
'google/bigbird-roberta-base': (
'https://huggingface.co/google/bigbird-roberta-base/resolve/main/tokenizer.json'
),
'google/bigbird-roberta-large': (
'https://huggingface.co/google/bigbird-roberta-large/resolve/main/tokenizer.json'
),
'google/bigbird-base-trivia-itc': (
'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/tokenizer.json'
),
},
}
_lowerCamelCase = {
'google/bigbird-roberta-base': 4096,
'google/bigbird-roberta-large': 4096,
'google/bigbird-base-trivia-itc': 4096,
}
_lowerCamelCase = '▁'
class UpperCamelCase_ ( UpperCamelCase__ ):
lowerCamelCase_ = VOCAB_FILES_NAMES
lowerCamelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase_ = BigBirdTokenizer
lowerCamelCase_ = ["input_ids", "attention_mask"]
lowerCamelCase_ = []
def __init__( self :str , __A :Tuple=None , __A :Optional[Any]=None , __A :Any="<unk>" , __A :Any="<s>" , __A :Union[str, Any]="</s>" , __A :List[str]="<pad>" , __A :Optional[Any]="[SEP]" , __A :str="[MASK]" , __A :Optional[Any]="[CLS]" , **__A :Union[str, Any] , ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else bos_token
SCREAMING_SNAKE_CASE__ = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else eos_token
SCREAMING_SNAKE_CASE__ = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else unk_token
SCREAMING_SNAKE_CASE__ = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else pad_token
SCREAMING_SNAKE_CASE__ = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else cls_token
SCREAMING_SNAKE_CASE__ = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
SCREAMING_SNAKE_CASE__ = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else mask_token
super().__init__(
__A , tokenizer_file=__A , bos_token=__A , eos_token=__A , unk_token=__A , sep_token=__A , pad_token=__A , cls_token=__A , mask_token=__A , **__A , )
SCREAMING_SNAKE_CASE__ = vocab_file
SCREAMING_SNAKE_CASE__ = False if not self.vocab_file else True
def _snake_case ( self :List[Any] , __A :List[int] , __A :Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = [self.sep_token_id]
SCREAMING_SNAKE_CASE__ = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def _snake_case ( self :Optional[int] , __A :List[int] , __A :Optional[List[int]] = None , __A :bool = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"""You should not supply a second sequence if the provided sequence of """
"""ids is already formatted with special tokens for the model.""" )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is None:
return [1] + ([0] * len(__A )) + [1]
return [1] + ([0] * len(__A )) + [1] + ([0] * len(__A )) + [1]
def _snake_case ( self :Optional[Any] , __A :List[int] , __A :Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = [self.sep_token_id]
SCREAMING_SNAKE_CASE__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _snake_case ( self :int , __A :str , __A :Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(__A ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
SCREAMING_SNAKE_CASE__ = os.path.join(
__A , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__A ):
copyfile(self.vocab_file , __A )
return (out_vocab_file,)
| 6
|
'''simple docstring'''
import os
from pickle import UnpicklingError
from typing import Dict, Tuple
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict, unflatten_dict
import transformers
from .utils import logging
a_ : List[Any] = logging.get_logger(__name__)
def a_ ( __snake_case : Optional[int] , __snake_case : str , __snake_case : List[Any] , __snake_case : int=False ) -> List[str]:
"""simple docstring"""
try:
import torch # noqa: F401
except ImportError:
logger.error(
'''Loading a PyTorch model in Flax, requires both PyTorch and Flax to be installed. Please see'''
''' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation'''
''' instructions.''' )
raise
if not is_sharded:
lowerCamelCase_ =os.path.abspath(__snake_case )
logger.info(F'''Loading PyTorch weights from {pt_path}''' )
lowerCamelCase_ =torch.load(__snake_case , map_location='''cpu''' )
logger.info(F'''PyTorch checkpoint contains {sum(t.numel() for t in pt_state_dict.values() ):,} parameters.''' )
lowerCamelCase_ =convert_pytorch_state_dict_to_flax(__snake_case , __snake_case )
else:
# model is sharded and pytorch_checkpoint_path already contains the list of .pt shard files
lowerCamelCase_ =convert_pytorch_sharded_state_dict_to_flax(__snake_case , __snake_case )
return flax_state_dict
def a_ ( __snake_case : Tuple[str] , __snake_case : np.ndarray , __snake_case : Dict[str, jnp.ndarray] , __snake_case : str , ) -> (Tuple[str], np.ndarray):
"""simple docstring"""
def is_key_or_prefix_key_in_dict(__snake_case : Tuple[str] ) -> bool:
return len(set(__snake_case ) & {key, (model_prefix,) + key} ) > 0
# layer norm
lowerCamelCase_ =pt_tuple_key[:-1] + ('''scale''',)
if pt_tuple_key[-1] in ["weight", "gamma"] and is_key_or_prefix_key_in_dict(__snake_case ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer mean
lowerCamelCase_ =pt_tuple_key[:-1] + ('''mean''',)
if pt_tuple_key[-1] == "running_mean" and not is_key_or_prefix_key_in_dict(__snake_case ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer var
lowerCamelCase_ =pt_tuple_key[:-1] + ('''var''',)
if pt_tuple_key[-1] == "running_var" and not is_key_or_prefix_key_in_dict(__snake_case ):
return renamed_pt_tuple_key, pt_tensor
# embedding
lowerCamelCase_ =pt_tuple_key[:-1] + ('''embedding''',)
if pt_tuple_key[-1] == "weight" and is_key_or_prefix_key_in_dict(__snake_case ):
return renamed_pt_tuple_key, pt_tensor
# conv layer
lowerCamelCase_ =pt_tuple_key[:-1] + ('''kernel''',)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4 and not is_key_or_prefix_key_in_dict(__snake_case ):
lowerCamelCase_ =pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
lowerCamelCase_ =pt_tuple_key[:-1] + ('''kernel''',)
if pt_tuple_key[-1] == "weight" and not is_key_or_prefix_key_in_dict(__snake_case ):
lowerCamelCase_ =pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
lowerCamelCase_ =pt_tuple_key[:-1] + ('''weight''',)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
lowerCamelCase_ =pt_tuple_key[:-1] + ('''bias''',)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
lowerCamelCase_ =None
if pt_tuple_key[-3::2] == ("parametrizations", "original0"):
lowerCamelCase_ =pt_tuple_key[-2] + '''_g'''
elif pt_tuple_key[-3::2] == ("parametrizations", "original1"):
lowerCamelCase_ =pt_tuple_key[-2] + '''_v'''
if name is not None:
lowerCamelCase_ =pt_tuple_key[:-3] + (name,)
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def a_ ( __snake_case : Union[str, Any] , __snake_case : str ) -> str:
"""simple docstring"""
# convert pytorch tensor to numpy
lowerCamelCase_ ={k: v.numpy() for k, v in pt_state_dict.items()}
lowerCamelCase_ =flax_model.base_model_prefix
# use params dict if the model contains batch norm layers
if "params" in flax_model.params:
lowerCamelCase_ =flax_model.params['''params''']
else:
lowerCamelCase_ =flax_model.params
lowerCamelCase_ =flatten_dict(__snake_case )
# add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
lowerCamelCase_ =flatten_dict(flax_model.params['''batch_stats'''] )
random_flax_state_dict.update(__snake_case )
lowerCamelCase_ ={}
lowerCamelCase_ =(model_prefix not in flax_model_params) and (
model_prefix in {k.split('''.''' )[0] for k in pt_state_dict.keys()}
)
lowerCamelCase_ =(model_prefix in flax_model_params) and (
model_prefix not in {k.split('''.''' )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
lowerCamelCase_ =tuple(pt_key.split('''.''' ) )
# remove base model prefix if necessary
lowerCamelCase_ =pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
lowerCamelCase_ =pt_tuple_key[1:]
# Correctly rename weight parameters
lowerCamelCase_, lowerCamelCase_ =rename_key_and_reshape_tensor(
__snake_case , __snake_case , __snake_case , __snake_case )
# add model prefix if necessary
lowerCamelCase_ =(model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
lowerCamelCase_ =(model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F'''PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '''
F'''{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.''' )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1] or "var" in flax_key[-1]:
lowerCamelCase_ =jnp.asarray(__snake_case )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(__snake_case , __snake_case )
continue
# also add unexpected weight so that warning is thrown
lowerCamelCase_ =jnp.asarray(__snake_case )
else:
# also add unexpected weight so that warning is thrown
lowerCamelCase_ =jnp.asarray(__snake_case )
return unflatten_dict(__snake_case )
def a_ ( __snake_case : Union[str, Any] , __snake_case : List[Any] ) -> Optional[Any]:
"""simple docstring"""
import torch
# Load the index
lowerCamelCase_ ={}
for shard_file in shard_filenames:
# load using msgpack utils
lowerCamelCase_ =torch.load(__snake_case )
lowerCamelCase_ ={k: v.numpy() for k, v in pt_state_dict.items()}
lowerCamelCase_ =flax_model.base_model_prefix
# use params dict if the model contains batch norm layers and then add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
lowerCamelCase_ =flax_model.params['''params''']
lowerCamelCase_ =flatten_dict(__snake_case )
random_flax_state_dict.update(flatten_dict(flax_model.params['''batch_stats'''] ) )
else:
lowerCamelCase_ =flax_model.params
lowerCamelCase_ =flatten_dict(__snake_case )
lowerCamelCase_ =(model_prefix not in flax_model_params) and (
model_prefix in {k.split('''.''' )[0] for k in pt_state_dict.keys()}
)
lowerCamelCase_ =(model_prefix in flax_model_params) and (
model_prefix not in {k.split('''.''' )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
lowerCamelCase_ =tuple(pt_key.split('''.''' ) )
# remove base model prefix if necessary
lowerCamelCase_ =pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
lowerCamelCase_ =pt_tuple_key[1:]
# Correctly rename weight parameters
lowerCamelCase_, lowerCamelCase_ =rename_key_and_reshape_tensor(
__snake_case , __snake_case , __snake_case , __snake_case )
# add model prefix if necessary
lowerCamelCase_ =(model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
lowerCamelCase_ =(model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F'''PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '''
F'''{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.''' )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1]:
lowerCamelCase_ =jnp.asarray(__snake_case )
continue
if "var" in flax_key[-1]:
lowerCamelCase_ =jnp.asarray(__snake_case )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(__snake_case , __snake_case )
continue
# also add unexpected weight so that warning is thrown
lowerCamelCase_ =jnp.asarray(__snake_case )
else:
# also add unexpected weight so that warning is thrown
lowerCamelCase_ =jnp.asarray(__snake_case )
return unflatten_dict(__snake_case )
def a_ ( __snake_case : List[str] , __snake_case : Dict ) -> str:
"""simple docstring"""
lowerCamelCase_ =os.path.abspath(__snake_case )
logger.info(F'''Loading Flax weights from {flax_checkpoint_path}''' )
# import correct flax class
lowerCamelCase_ =getattr(__snake_case , '''Flax''' + model.__class__.__name__ )
# load flax weight dict
with open(__snake_case , '''rb''' ) as state_f:
try:
lowerCamelCase_ =from_bytes(__snake_case , state_f.read() )
except UnpicklingError:
raise EnvironmentError(F'''Unable to convert {flax_checkpoint_path} to Flax deserializable object. ''' )
return load_flax_weights_in_pytorch_model(__snake_case , __snake_case )
def a_ ( __snake_case : Optional[Any] , __snake_case : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
try:
import torch # noqa: F401
except ImportError:
logger.error(
'''Loading a Flax weights in PyTorch, requires both PyTorch and Flax to be installed. Please see'''
''' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation'''
''' instructions.''' )
raise
# check if we have bf16 weights
lowerCamelCase_ =flatten_dict(jax.tree_util.tree_map(lambda __snake_case : x.dtype == jnp.bfloataa , __snake_case ) ).values()
if any(__snake_case ):
# convert all weights to fp32 if the are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
'''Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` '''
'''before loading those in PyTorch model.''' )
lowerCamelCase_ =jax.tree_util.tree_map(
lambda __snake_case : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , __snake_case )
lowerCamelCase_ =flatten_dict(__snake_case )
lowerCamelCase_ =pt_model.state_dict()
lowerCamelCase_ =(pt_model.base_model_prefix in flax_state) and (
pt_model.base_model_prefix not in {k.split('''.''' )[0] for k in pt_model_dict.keys()}
)
lowerCamelCase_ =(pt_model.base_model_prefix not in flax_state) and (
pt_model.base_model_prefix in {k.split('''.''' )[0] for k in pt_model_dict.keys()}
)
# keep track of unexpected & missing keys
lowerCamelCase_ =[]
lowerCamelCase_ =set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
lowerCamelCase_ =flax_key_tuple[0] == pt_model.base_model_prefix
lowerCamelCase_ ='''.'''.join((pt_model.base_model_prefix,) + flax_key_tuple ) in pt_model_dict
# adapt flax_key to prepare for loading from/to base model only
if load_model_with_head_into_base_model and has_base_model_prefix:
lowerCamelCase_ =flax_key_tuple[1:]
elif load_base_model_into_model_with_head and require_base_model_prefix:
lowerCamelCase_ =(pt_model.base_model_prefix,) + flax_key_tuple
# rename flax weights to PyTorch format
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 4 and ".".join(__snake_case ) not in pt_model_dict:
# conv layer
lowerCamelCase_ =flax_key_tuple[:-1] + ('''weight''',)
lowerCamelCase_ =jnp.transpose(__snake_case , (3, 2, 0, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(__snake_case ) not in pt_model_dict:
# linear layer
lowerCamelCase_ =flax_key_tuple[:-1] + ('''weight''',)
lowerCamelCase_ =flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
lowerCamelCase_ =flax_key_tuple[:-1] + ('''weight''',)
# adding batch stats from flax batch norm to pt
elif "mean" in flax_key_tuple[-1]:
lowerCamelCase_ =flax_key_tuple[:-1] + ('''running_mean''',)
elif "var" in flax_key_tuple[-1]:
lowerCamelCase_ =flax_key_tuple[:-1] + ('''running_var''',)
if "batch_stats" in flax_state:
lowerCamelCase_ ='''.'''.join(flax_key_tuple[1:] ) # Remove the params/batch_stats header
else:
lowerCamelCase_ ='''.'''.join(__snake_case )
# We also need to look at `pt_model_dict` and see if there are keys requiring further transformation.
lowerCamelCase_ ={}
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
for key in pt_model_dict:
lowerCamelCase_ =key.split('''.''' )
lowerCamelCase_ =None
if key_components[-3::2] == ["parametrizations", "original0"]:
lowerCamelCase_ =key_components[-2] + '''_g'''
elif key_components[-3::2] == ["parametrizations", "original1"]:
lowerCamelCase_ =key_components[-2] + '''_v'''
if name is not None:
lowerCamelCase_ =key_components[:-3] + [name]
lowerCamelCase_ ='''.'''.join(__snake_case )
lowerCamelCase_ =key
if flax_key in special_pt_names:
lowerCamelCase_ =special_pt_names[flax_key]
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
F'''Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected '''
F'''to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.''' )
else:
# add weight to pytorch dict
lowerCamelCase_ =np.asarray(__snake_case ) if not isinstance(__snake_case , np.ndarray ) else flax_tensor
lowerCamelCase_ =torch.from_numpy(__snake_case )
# remove from missing keys
missing_keys.remove(__snake_case )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(__snake_case )
pt_model.load_state_dict(__snake_case )
# re-transform missing_keys to list
lowerCamelCase_ =list(__snake_case )
if len(__snake_case ) > 0:
logger.warning(
'''Some weights of the Flax model were not used when initializing the PyTorch model'''
F''' {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing'''
F''' {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture'''
''' (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This'''
F''' IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect'''
''' to be exactly identical (e.g. initializing a BertForSequenceClassification model from a'''
''' FlaxBertForSequenceClassification model).''' )
else:
logger.warning(F'''All Flax model weights were used when initializing {pt_model.__class__.__name__}.\n''' )
if len(__snake_case ) > 0:
logger.warning(
F'''Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly'''
F''' initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to'''
''' use it for predictions and inference.''' )
else:
logger.warning(
F'''All the weights of {pt_model.__class__.__name__} were initialized from the Flax model.\n'''
'''If your task is similar to the task the model of the checkpoint was trained on, '''
F'''you can already use {pt_model.__class__.__name__} for predictions without further training.''' )
return pt_model
| 676
| 0
|
from __future__ import annotations
def _snake_case( SCREAMING_SNAKE_CASE__ : list[int] , SCREAMING_SNAKE_CASE__ : int ) -> int:
'''simple docstring'''
if len(SCREAMING_SNAKE_CASE__ ) < k or k < 0:
raise ValueError('Invalid Input' )
A__ = A__ = sum(array[:k] )
for i in range(len(SCREAMING_SNAKE_CASE__ ) - k ):
A__ = current_sum - array[i] + array[i + k]
A__ = max(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return max_sum
if __name__ == "__main__":
from doctest import testmod
from random import randint
testmod()
lowercase_ = [randint(-1000, 1000) for i in range(100)]
lowercase_ = randint(0, 110)
print(f"""The maximum sum of {k} consecutive elements is {max_sum_in_array(array,k)}""")
| 586
|
import importlib.metadata
import operator
import re
import sys
from typing import Optional
from packaging import version
lowercase_ = {
"<": operator.lt,
"<=": operator.le,
"==": operator.eq,
"!=": operator.ne,
">=": operator.ge,
">": operator.gt,
}
def _snake_case( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Tuple:
'''simple docstring'''
if got_ver is None or want_ver is None:
raise ValueError(
f'Unable to compare versions for {requirement}: need={want_ver} found={got_ver}. This is unusual. Consider'
f' reinstalling {pkg}.' )
if not ops[op](version.parse(SCREAMING_SNAKE_CASE__ ) , version.parse(SCREAMING_SNAKE_CASE__ ) ):
raise ImportError(
f'{requirement} is required for a normal functioning of this module, but found {pkg}=={got_ver}.{hint}' )
def _snake_case( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[str] = None ) -> None:
'''simple docstring'''
A__ = f'\n{hint}' if hint is not None else ''
# non-versioned check
if re.match(R'^[\w_\-\d]+$' , SCREAMING_SNAKE_CASE__ ):
A__ , A__ , A__ = requirement, None, None
else:
A__ = re.findall(R'^([^!=<>\s]+)([\s!=<>]{1,2}.+)' , SCREAMING_SNAKE_CASE__ )
if not match:
raise ValueError(
'requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23, but'
f' got {requirement}' )
A__ , A__ = match[0]
A__ = want_full.split(',' ) # there could be multiple requirements
A__ = {}
for w in want_range:
A__ = re.findall(R'^([\s!=<>]{1,2})(.+)' , SCREAMING_SNAKE_CASE__ )
if not match:
raise ValueError(
'requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23,'
f' but got {requirement}' )
A__ , A__ = match[0]
A__ = want_ver
if op not in ops:
raise ValueError(f'{requirement}: need one of {list(ops.keys() )}, but got {op}' )
# special case
if pkg == "python":
A__ = '.'.join([str(SCREAMING_SNAKE_CASE__ ) for x in sys.version_info[:3]] )
for op, want_ver in wanted.items():
_compare_versions(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return
# check if any version is installed
try:
A__ = importlib.metadata.version(SCREAMING_SNAKE_CASE__ )
except importlib.metadata.PackageNotFoundError:
raise importlib.metadata.PackageNotFoundError(
f'The \'{requirement}\' distribution was not found and is required by this application. {hint}' )
# check that the right version is installed if version number or a range was provided
if want_ver is not None:
for op, want_ver in wanted.items():
_compare_versions(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def _snake_case( SCREAMING_SNAKE_CASE__ : Dict ) -> Dict:
'''simple docstring'''
A__ = 'Try: pip install transformers -U or pip install -e \'.[dev]\' if you\'re working with git main'
return require_version(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
| 586
| 1
|
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Any , UpperCamelCase__: Any , UpperCamelCase__: List[str] , UpperCamelCase__: List[Any] ):
global f # a global dp table for knapsack
if f[i][j] < 0:
if j < wt[i - 1]:
SCREAMING_SNAKE_CASE__ = mf_knapsack(i - 1 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
else:
SCREAMING_SNAKE_CASE__ = max(
mf_knapsack(i - 1 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) , mf_knapsack(i - 1 , UpperCamelCase__ , UpperCamelCase__ , j - wt[i - 1] ) + val[i - 1] , )
SCREAMING_SNAKE_CASE__ = val
return f[i][j]
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Optional[Any] , UpperCamelCase__: Any , UpperCamelCase__: str , UpperCamelCase__: int ):
SCREAMING_SNAKE_CASE__ = [[0] * (w + 1) for _ in range(n + 1 )]
for i in range(1 , n + 1 ):
for w_ in range(1 , w + 1 ):
if wt[i - 1] <= w_:
SCREAMING_SNAKE_CASE__ = max(val[i - 1] + dp[i - 1][w_ - wt[i - 1]] , dp[i - 1][w_] )
else:
SCREAMING_SNAKE_CASE__ = dp[i - 1][w_]
return dp[n][w_], dp
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: int , UpperCamelCase__: list , UpperCamelCase__: list ):
if not (isinstance(UpperCamelCase__ , (list, tuple) ) and isinstance(UpperCamelCase__ , (list, tuple) )):
raise ValueError(
"""Both the weights and values vectors must be either lists or tuples""" )
SCREAMING_SNAKE_CASE__ = len(UpperCamelCase__ )
if num_items != len(UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ = (
"""The number of weights must be the same as the number of values.\n"""
f'''But got {num_items} weights and {len(UpperCamelCase__ )} values'''
)
raise ValueError(UpperCamelCase__ )
for i in range(UpperCamelCase__ ):
if not isinstance(wt[i] , UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ = (
"""All weights must be integers but got weight of """
f'''type {type(wt[i] )} at index {i}'''
)
raise TypeError(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = knapsack(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = set()
_construct_solution(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
return optimal_val, example_optional_set
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: list , UpperCamelCase__: list , UpperCamelCase__: int , UpperCamelCase__: int , UpperCamelCase__: set ):
# for the current item i at a maximum weight j to be part of an optimal subset,
# the optimal value at (i, j) must be greater than the optimal value at (i-1, j).
# where i - 1 means considering only the previous items at the given maximum weight
if i > 0 and j > 0:
if dp[i - 1][j] == dp[i][j]:
_construct_solution(UpperCamelCase__ , UpperCamelCase__ , i - 1 , UpperCamelCase__ , UpperCamelCase__ )
else:
optimal_set.add(UpperCamelCase__ )
_construct_solution(UpperCamelCase__ , UpperCamelCase__ , i - 1 , j - wt[i - 1] , UpperCamelCase__ )
if __name__ == "__main__":
_lowerCamelCase = [3, 2, 4, 4]
_lowerCamelCase = [4, 3, 2, 3]
_lowerCamelCase = 4
_lowerCamelCase = 6
_lowerCamelCase = [[0] * (w + 1)] + [[0] + [-1] * (w + 1) for _ in range(n + 1)]
_lowerCamelCase , _lowerCamelCase = knapsack(w, wt, val, n)
print(optimal_solution)
print(mf_knapsack(n, wt, val, w)) # switched the n and w
# testing the dynamic programming problem with example
# the optimal subset for the above example are items 3 and 4
_lowerCamelCase , _lowerCamelCase = knapsack_with_example_solution(w, wt, val)
assert optimal_solution == 8
assert optimal_subset == {3, 4}
print('optimal_value = ', optimal_solution)
print('An optimal subset corresponding to the optimal value', optimal_subset)
| 6
|
'''simple docstring'''
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase : str = logging.get_logger(__name__)
__lowerCamelCase : Union[str, Any] = {
"asapp/sew-d-tiny-100k": "https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json",
# See all SEW-D models at https://huggingface.co/models?filter=sew-d
}
class UpperCAmelCase ( _lowercase ):
UpperCAmelCase : Union[str, Any] = '''sew-d'''
def __init__(self : Tuple , A__ : str=3_2 , A__ : str=7_6_8 , A__ : Union[str, Any]=1_2 , A__ : List[str]=1_2 , A__ : List[Any]=3_0_7_2 , A__ : List[Any]=2 , A__ : Dict=5_1_2 , A__ : Any=2_5_6 , A__ : str=True , A__ : Dict=True , A__ : str=("p2c", "c2p") , A__ : Optional[int]="layer_norm" , A__ : Optional[int]="gelu_python" , A__ : List[Any]=0.1 , A__ : List[str]=0.1 , A__ : Optional[Any]=0.1 , A__ : Optional[Any]=0.0 , A__ : str=0.1 , A__ : Optional[int]=0.0_2 , A__ : Union[str, Any]=1e-7 , A__ : List[str]=1e-5 , A__ : Union[str, Any]="group" , A__ : List[Any]="gelu" , A__ : Dict=(6_4, 1_2_8, 1_2_8, 1_2_8, 1_2_8, 2_5_6, 2_5_6, 2_5_6, 2_5_6, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , A__ : Optional[Any]=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , A__ : Optional[Any]=(1_0, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , A__ : Optional[Any]=False , A__ : List[str]=1_2_8 , A__ : int=1_6 , A__ : List[str]=True , A__ : Dict=0.0_5 , A__ : Any=1_0 , A__ : str=2 , A__ : Optional[int]=0.0 , A__ : str=1_0 , A__ : List[Any]=0 , A__ : Tuple="mean" , A__ : Union[str, Any]=False , A__ : Optional[Any]=False , A__ : Optional[int]=2_5_6 , A__ : Dict=0 , A__ : List[str]=1 , A__ : str=2 , **A__ : Tuple , ) -> List[Any]:
super().__init__(**A__ , pad_token_id=A__ , bos_token_id=A__ , eos_token_id=A__ )
lowercase = hidden_size
lowercase = feat_extract_norm
lowercase = feat_extract_activation
lowercase = list(A__ )
lowercase = list(A__ )
lowercase = list(A__ )
lowercase = conv_bias
lowercase = num_conv_pos_embeddings
lowercase = num_conv_pos_embedding_groups
lowercase = len(self.conv_dim )
lowercase = num_hidden_layers
lowercase = intermediate_size
lowercase = squeeze_factor
lowercase = max_position_embeddings
lowercase = position_buckets
lowercase = share_att_key
lowercase = relative_attention
lowercase = norm_rel_ebd
lowercase = list(A__ )
lowercase = hidden_act
lowercase = num_attention_heads
lowercase = hidden_dropout
lowercase = attention_dropout
lowercase = activation_dropout
lowercase = feat_proj_dropout
lowercase = final_dropout
lowercase = layer_norm_eps
lowercase = feature_layer_norm_eps
lowercase = initializer_range
lowercase = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect."
"It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,"
f'but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)'
f'= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
lowercase = apply_spec_augment
lowercase = mask_time_prob
lowercase = mask_time_length
lowercase = mask_time_min_masks
lowercase = mask_feature_prob
lowercase = mask_feature_length
lowercase = mask_feature_min_masks
# ctc loss
lowercase = ctc_loss_reduction
lowercase = ctc_zero_infinity
# sequence classification
lowercase = use_weighted_layer_sum
lowercase = classifier_proj_size
@property
def UpperCAmelCase__ (self : str ) -> Optional[int]:
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 310
| 0
|
import argparse
import json
import os
from tensorflow.core.protobuf.saved_model_pba import SavedModel
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
__lowerCAmelCase : Optional[Any] ='.'
# Internal TensorFlow ops that can be safely ignored (mostly specific to a saved model)
__lowerCAmelCase : Union[str, Any] =[
'Assert',
'AssignVariableOp',
'EmptyTensorList',
'MergeV2Checkpoints',
'ReadVariableOp',
'ResourceGather',
'RestoreV2',
'SaveV2',
'ShardedFilename',
'StatefulPartitionedCall',
'StaticRegexFullMatch',
'VarHandleOp',
]
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : Any = SavedModel()
__SCREAMING_SNAKE_CASE : List[str] = []
with open(os.path.join(lowercase__ , '''utils''' , '''tf_ops''' , '''onnx.json''' ) ) as f:
__SCREAMING_SNAKE_CASE : str = json.load(lowercase__ )['''opsets''']
for i in range(1 , opset + 1 ):
onnx_ops.extend(onnx_opsets[str(lowercase__ )] )
with open(lowercase__ , '''rb''' ) as f:
saved_model.ParseFromString(f.read() )
__SCREAMING_SNAKE_CASE : List[str] = set()
# Iterate over every metagraph in case there is more than one (a saved model can contain multiple graphs)
for meta_graph in saved_model.meta_graphs:
# Add operations in the graph definition
model_op_names.update(node.op for node in meta_graph.graph_def.node )
# Go through the functions in the graph definition
for func in meta_graph.graph_def.library.function:
# Add operations in each function
model_op_names.update(node.op for node in func.node_def )
# Convert to list, sorted if you want
__SCREAMING_SNAKE_CASE : Optional[int] = sorted(lowercase__ )
__SCREAMING_SNAKE_CASE : Tuple = []
for op in model_op_names:
if op not in onnx_ops and op not in INTERNAL_OPS:
incompatible_ops.append(lowercase__ )
if strict and len(lowercase__ ) > 0:
raise Exception(F'''Found the following incompatible ops for the opset {opset}:\n''' + incompatible_ops )
elif len(lowercase__ ) > 0:
print(F'''Found the following incompatible ops for the opset {opset}:''' )
print(*lowercase__ , sep='''\n''' )
else:
print(F'''The saved model {saved_model_path} can properly be converted with ONNX.''' )
if __name__ == "__main__":
__lowerCAmelCase : Union[str, Any] =argparse.ArgumentParser()
parser.add_argument('--saved_model_path', help='Path of the saved model to check (the .pb file).')
parser.add_argument(
'--opset', default=1_2, type=int, help='The ONNX opset against which the model has to be tested.'
)
parser.add_argument(
'--framework', choices=['onnx'], default='onnx', help='Frameworks against which to test the saved model.'
)
parser.add_argument(
'--strict', action='store_true', help='Whether make the checking strict (raise errors) or not (raise warnings)'
)
__lowerCAmelCase : Optional[Any] =parser.parse_args()
if args.framework == "onnx":
onnx_compliancy(args.saved_model_path, args.strict, args.opset)
| 260
|
import torch
from diffusers import EulerDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class _lowercase ( A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = (EulerDiscreteScheduler,)
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 10
def __magic_name__( self :Dict , **lowerCAmelCase__ :Any ) -> int:
__SCREAMING_SNAKE_CASE : List[str] = {
'''num_train_timesteps''': 1_100,
'''beta_start''': 0.0001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
}
config.update(**lowerCAmelCase__ )
return config
def __magic_name__( self :str ) -> Optional[Any]:
for timesteps in [10, 50, 100, 1_000]:
self.check_over_configs(num_train_timesteps=lowerCAmelCase__ )
def __magic_name__( self :str ) -> List[str]:
for beta_start, beta_end in zip([0.0_0001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ):
self.check_over_configs(beta_start=lowerCAmelCase__ , beta_end=lowerCAmelCase__ )
def __magic_name__( self :Dict ) -> Any:
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=lowerCAmelCase__ )
def __magic_name__( self :List[Any] ) -> List[str]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCAmelCase__ )
def __magic_name__( self :Dict ) -> int:
__SCREAMING_SNAKE_CASE : Dict = self.scheduler_classes[0]
__SCREAMING_SNAKE_CASE : List[str] = self.get_scheduler_config()
__SCREAMING_SNAKE_CASE : Dict = scheduler_class(**lowerCAmelCase__ )
scheduler.set_timesteps(self.num_inference_steps )
__SCREAMING_SNAKE_CASE : Union[str, Any] = torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE : List[Any] = self.dummy_model()
__SCREAMING_SNAKE_CASE : Optional[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
__SCREAMING_SNAKE_CASE : Any = sample.to(lowerCAmelCase__ )
for i, t in enumerate(scheduler.timesteps ):
__SCREAMING_SNAKE_CASE : List[Any] = scheduler.scale_model_input(lowerCAmelCase__ , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = model(lowerCAmelCase__ , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : str = scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , generator=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = output.prev_sample
__SCREAMING_SNAKE_CASE : Optional[Any] = torch.sum(torch.abs(lowerCAmelCase__ ) )
__SCREAMING_SNAKE_CASE : Union[str, Any] = torch.mean(torch.abs(lowerCAmelCase__ ) )
assert abs(result_sum.item() - 10.0807 ) < 1E-2
assert abs(result_mean.item() - 0.0131 ) < 1E-3
def __magic_name__( self :Union[str, Any] ) -> int:
__SCREAMING_SNAKE_CASE : Tuple = self.scheduler_classes[0]
__SCREAMING_SNAKE_CASE : int = self.get_scheduler_config(prediction_type='''v_prediction''' )
__SCREAMING_SNAKE_CASE : Optional[int] = scheduler_class(**lowerCAmelCase__ )
scheduler.set_timesteps(self.num_inference_steps )
__SCREAMING_SNAKE_CASE : Dict = torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE : Optional[Any] = self.dummy_model()
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
__SCREAMING_SNAKE_CASE : Dict = sample.to(lowerCAmelCase__ )
for i, t in enumerate(scheduler.timesteps ):
__SCREAMING_SNAKE_CASE : str = scheduler.scale_model_input(lowerCAmelCase__ , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[Any] = model(lowerCAmelCase__ , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , generator=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = output.prev_sample
__SCREAMING_SNAKE_CASE : Tuple = torch.sum(torch.abs(lowerCAmelCase__ ) )
__SCREAMING_SNAKE_CASE : Tuple = torch.mean(torch.abs(lowerCAmelCase__ ) )
assert abs(result_sum.item() - 0.0002 ) < 1E-2
assert abs(result_mean.item() - 2.2_6_7_6E-0_6 ) < 1E-3
def __magic_name__( self :Optional[int] ) -> List[str]:
__SCREAMING_SNAKE_CASE : Any = self.scheduler_classes[0]
__SCREAMING_SNAKE_CASE : Optional[int] = self.get_scheduler_config()
__SCREAMING_SNAKE_CASE : List[str] = scheduler_class(**lowerCAmelCase__ )
scheduler.set_timesteps(self.num_inference_steps , device=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE : int = self.dummy_model()
__SCREAMING_SNAKE_CASE : int = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
__SCREAMING_SNAKE_CASE : Optional[Any] = sample.to(lowerCAmelCase__ )
for t in scheduler.timesteps:
__SCREAMING_SNAKE_CASE : Optional[Any] = scheduler.scale_model_input(lowerCAmelCase__ , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : str = model(lowerCAmelCase__ , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , generator=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = output.prev_sample
__SCREAMING_SNAKE_CASE : Dict = torch.sum(torch.abs(lowerCAmelCase__ ) )
__SCREAMING_SNAKE_CASE : int = torch.mean(torch.abs(lowerCAmelCase__ ) )
assert abs(result_sum.item() - 10.0807 ) < 1E-2
assert abs(result_mean.item() - 0.0131 ) < 1E-3
def __magic_name__( self :List[Any] ) -> int:
__SCREAMING_SNAKE_CASE : str = self.scheduler_classes[0]
__SCREAMING_SNAKE_CASE : Tuple = self.get_scheduler_config()
__SCREAMING_SNAKE_CASE : Any = scheduler_class(**lowerCAmelCase__ , use_karras_sigmas=lowerCAmelCase__ )
scheduler.set_timesteps(self.num_inference_steps , device=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Any = torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE : Optional[int] = self.dummy_model()
__SCREAMING_SNAKE_CASE : List[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
__SCREAMING_SNAKE_CASE : List[str] = sample.to(lowerCAmelCase__ )
for t in scheduler.timesteps:
__SCREAMING_SNAKE_CASE : Any = scheduler.scale_model_input(lowerCAmelCase__ , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : int = model(lowerCAmelCase__ , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , generator=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : str = output.prev_sample
__SCREAMING_SNAKE_CASE : Any = torch.sum(torch.abs(lowerCAmelCase__ ) )
__SCREAMING_SNAKE_CASE : Optional[Any] = torch.mean(torch.abs(lowerCAmelCase__ ) )
assert abs(result_sum.item() - 124.52_2994_9951_1719 ) < 1E-2
assert abs(result_mean.item() - 0.1_6213_9326_3339_9963 ) < 1E-3
| 260
| 1
|
'''simple docstring'''
import os
from glob import glob
import imageio
import torch
import torchvision
import wandb
from img_processing import custom_to_pil, loop_post_process, preprocess, preprocess_vqgan
from loaders import load_vqgan
from PIL import Image
from torch import nn
from transformers import CLIPModel, CLIPTokenizerFast
from utils import get_device, get_timestamp, show_pil
class A :
def __init__( self , SCREAMING_SNAKE_CASE = "cpu" , SCREAMING_SNAKE_CASE = "openai/clip-vit-large-patch14" ) -> None:
"""simple docstring"""
A : Optional[int] = device
A : List[Any] = CLIPTokenizerFast.from_pretrained(SCREAMING_SNAKE_CASE )
A : Optional[Any] = [0.48_145_466, 0.4_578_275, 0.40_821_073]
A : Optional[int] = [0.26_862_954, 0.26_130_258, 0.27_577_711]
A : Tuple = torchvision.transforms.Normalize(self.image_mean , self.image_std )
A : List[str] = torchvision.transforms.Resize(224 )
A : List[Any] = torchvision.transforms.CenterCrop(224 )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
A : str = self.resize(SCREAMING_SNAKE_CASE )
A : Optional[int] = self.center_crop(SCREAMING_SNAKE_CASE )
A : Optional[int] = self.normalize(SCREAMING_SNAKE_CASE )
return images
def __call__( self , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , **SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
A : str = self.tokenizer(text=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
A : str = self.preprocess_img(SCREAMING_SNAKE_CASE )
A : List[str] = {key: value.to(self.device ) for (key, value) in encoding.items()}
return encoding
class A ( nn.Module ):
def __init__( self , SCREAMING_SNAKE_CASE=10 , SCREAMING_SNAKE_CASE=0.01 , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE="image" , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=False , ) -> None:
"""simple docstring"""
super().__init__()
A : List[Any] = None
A : Tuple = device if device else get_device()
if vqgan:
A : Optional[int] = vqgan
else:
A : str = load_vqgan(self.device , conf_path=SCREAMING_SNAKE_CASE , ckpt_path=SCREAMING_SNAKE_CASE )
self.vqgan.eval()
if clip:
A : int = clip
else:
A : Tuple = CLIPModel.from_pretrained('''openai/clip-vit-base-patch32''' )
self.clip.to(self.device )
A : Any = ProcessorGradientFlow(device=self.device )
A : str = iterations
A : List[Any] = lr
A : Optional[int] = log
A : str = make_grid
A : Any = return_val
A : int = quantize
A : List[str] = self.vqgan.decoder.z_shape
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=5 , SCREAMING_SNAKE_CASE=True ) -> Dict:
"""simple docstring"""
A : Any = []
if output_path is None:
A : Tuple = '''./animation.gif'''
if input_path is None:
A : Tuple = self.save_path
A : Union[str, Any] = sorted(glob(input_path + '''/*''' ) )
if not len(SCREAMING_SNAKE_CASE ):
raise ValueError(
'''No images found in save path, aborting (did you pass save_intermediate=True to the generate'''
''' function?)''' )
if len(SCREAMING_SNAKE_CASE ) == 1:
print('''Only one image found in save path, (did you pass save_intermediate=True to the generate function?)''' )
A : Optional[Any] = total_duration / len(SCREAMING_SNAKE_CASE )
A : str = [frame_duration] * len(SCREAMING_SNAKE_CASE )
if extend_frames:
A : List[str] = 1.5
A : List[Any] = 3
for file_name in paths:
if file_name.endswith('''.png''' ):
images.append(imageio.imread(SCREAMING_SNAKE_CASE ) )
imageio.mimsave(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , duration=SCREAMING_SNAKE_CASE )
print(F'gif saved to {output_path}' )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None ) -> Optional[Any]:
"""simple docstring"""
if not (path or img):
raise ValueError('''Input either path or tensor''' )
if img is not None:
raise NotImplementedError
A : Optional[int] = preprocess(Image.open(SCREAMING_SNAKE_CASE ) , target_image_size=256 ).to(self.device )
A : Tuple = preprocess_vqgan(SCREAMING_SNAKE_CASE )
A, *A : int = self.vqgan.encode(SCREAMING_SNAKE_CASE )
return z
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
A : Any = self.latent.detach().requires_grad_()
A : Dict = base_latent + transform_vector
if self.quantize:
A, *A : Dict = self.vqgan.quantize(SCREAMING_SNAKE_CASE )
else:
A : str = trans_latent
return self.vqgan.decode(SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None ) -> List[str]:
"""simple docstring"""
A : Tuple = self.clip_preprocessor(text=SCREAMING_SNAKE_CASE , images=SCREAMING_SNAKE_CASE , return_tensors='''pt''' , padding=SCREAMING_SNAKE_CASE )
A : Tuple = self.clip(**SCREAMING_SNAKE_CASE )
A : Optional[Any] = clip_outputs.logits_per_image
if weights is not None:
A : Tuple = similarity_logits * weights
return similarity_logits.sum()
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
"""simple docstring"""
A : Any = self._get_clip_similarity(pos_prompts['''prompts'''] , SCREAMING_SNAKE_CASE , weights=(1 / pos_prompts['''weights''']) )
if neg_prompts:
A : Dict = self._get_clip_similarity(neg_prompts['''prompts'''] , SCREAMING_SNAKE_CASE , weights=neg_prompts['''weights'''] )
else:
A : List[str] = torch.tensor([1] , device=self.device )
A : Optional[int] = -torch.log(SCREAMING_SNAKE_CASE ) + torch.log(SCREAMING_SNAKE_CASE )
return loss
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Optional[Any]:
"""simple docstring"""
A : Dict = torch.randn_like(self.latent , requires_grad=SCREAMING_SNAKE_CASE , device=self.device )
A : List[Any] = torch.optim.Adam([vector] , lr=self.lr )
for i in range(self.iterations ):
optim.zero_grad()
A : Optional[int] = self._add_vector(SCREAMING_SNAKE_CASE )
A : Union[str, Any] = loop_post_process(SCREAMING_SNAKE_CASE )
A : Tuple = self._get_CLIP_loss(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
print('''CLIP loss''' , SCREAMING_SNAKE_CASE )
if self.log:
wandb.log({'''CLIP Loss''': clip_loss} )
clip_loss.backward(retain_graph=SCREAMING_SNAKE_CASE )
optim.step()
if self.return_val == "image":
yield custom_to_pil(transformed_img[0] )
else:
yield vector
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
wandb.init(reinit=SCREAMING_SNAKE_CASE , project='''face-editor''' )
wandb.config.update({'''Positive Prompts''': positive_prompts} )
wandb.config.update({'''Negative Prompts''': negative_prompts} )
wandb.config.update({'''lr''': self.lr, '''iterations''': self.iterations} )
if image_path:
A : Optional[Any] = Image.open(SCREAMING_SNAKE_CASE )
A : List[str] = image.resize((256, 256) )
wandb.log('''Original Image''' , wandb.Image(SCREAMING_SNAKE_CASE ) )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> List[str]:
"""simple docstring"""
if not prompts:
return []
A : int = []
A : Optional[int] = []
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
A : Union[str, Any] = [prompt.strip() for prompt in prompts.split('''|''' )]
for prompt in prompts:
if isinstance(SCREAMING_SNAKE_CASE , (tuple, list) ):
A : Union[str, Any] = prompt[0]
A : Any = float(prompt[1] )
elif ":" in prompt:
A, A : Union[str, Any] = prompt.split(''':''' )
A : Optional[Any] = float(SCREAMING_SNAKE_CASE )
else:
A : Dict = prompt
A : int = 1.0
processed_prompts.append(SCREAMING_SNAKE_CASE )
weights.append(SCREAMING_SNAKE_CASE )
return {
"prompts": processed_prompts,
"weights": torch.tensor(SCREAMING_SNAKE_CASE , device=self.device ),
}
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=None , ) -> int:
"""simple docstring"""
if image_path:
A : List[str] = self._get_latent(SCREAMING_SNAKE_CASE )
else:
A : List[Any] = torch.randn(self.latent_dim , device=self.device )
if self.log:
self._init_logging(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
assert pos_prompts, "You must provide at least one positive prompt."
A : List[Any] = self.process_prompts(SCREAMING_SNAKE_CASE )
A : List[Any] = self.process_prompts(SCREAMING_SNAKE_CASE )
if save_final and save_path is None:
A : List[str] = os.path.join('''./outputs/''' , '''_'''.join(pos_prompts['''prompts'''] ) )
if not os.path.exists(SCREAMING_SNAKE_CASE ):
os.makedirs(SCREAMING_SNAKE_CASE )
else:
A : Union[str, Any] = save_path + '''_''' + get_timestamp()
os.makedirs(SCREAMING_SNAKE_CASE )
A : str = save_path
A : Dict = self.vqgan.decode(self.latent )[0]
if show_intermediate:
print('''Original Image''' )
show_pil(custom_to_pil(SCREAMING_SNAKE_CASE ) )
A : Any = loop_post_process(SCREAMING_SNAKE_CASE )
for iter, transformed_img in enumerate(self._optimize_CLIP(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) ):
if show_intermediate:
show_pil(SCREAMING_SNAKE_CASE )
if save_intermediate:
transformed_img.save(os.path.join(self.save_path , F'iter_{iter:03d}.png' ) )
if self.log:
wandb.log({'''Image''': wandb.Image(SCREAMING_SNAKE_CASE )} )
if show_final:
show_pil(SCREAMING_SNAKE_CASE )
if save_final:
transformed_img.save(os.path.join(self.save_path , F'iter_{iter:03d}_final.png' ) )
| 634
|
def _lowerCAmelCase ( A__ = 50_000_000 ):
lowercase__ = set()
lowercase__ = int((limit - 24) ** (1 / 2) )
lowercase__ = set(range(3 , prime_square_limit + 1 , 2 ) )
primes.add(2 )
for p in range(3 , prime_square_limit + 1 , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , prime_square_limit + 1 , A__ ) ) )
for primea in primes:
lowercase__ = primea * primea
for primea in primes:
lowercase__ = primea * primea * primea
if square + cube >= limit - 16:
break
for primea in primes:
lowercase__ = primea * primea * primea * primea
lowercase__ = square + cube + tetr
if total >= limit:
break
ret.add(A__ )
return len(A__ )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 622
| 0
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
"""bert-base-uncased""": """https://huggingface.co/bert-base-uncased/resolve/main/config.json""",
"""bert-large-uncased""": """https://huggingface.co/bert-large-uncased/resolve/main/config.json""",
"""bert-base-cased""": """https://huggingface.co/bert-base-cased/resolve/main/config.json""",
"""bert-large-cased""": """https://huggingface.co/bert-large-cased/resolve/main/config.json""",
"""bert-base-multilingual-uncased""": """https://huggingface.co/bert-base-multilingual-uncased/resolve/main/config.json""",
"""bert-base-multilingual-cased""": """https://huggingface.co/bert-base-multilingual-cased/resolve/main/config.json""",
"""bert-base-chinese""": """https://huggingface.co/bert-base-chinese/resolve/main/config.json""",
"""bert-base-german-cased""": """https://huggingface.co/bert-base-german-cased/resolve/main/config.json""",
"""bert-large-uncased-whole-word-masking""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/config.json"""
),
"""bert-large-cased-whole-word-masking""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/config.json"""
),
"""bert-large-uncased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/config.json"""
),
"""bert-large-cased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/config.json"""
),
"""bert-base-cased-finetuned-mrpc""": """https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/config.json""",
"""bert-base-german-dbmdz-cased""": """https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/config.json""",
"""bert-base-german-dbmdz-uncased""": """https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/config.json""",
"""cl-tohoku/bert-base-japanese""": """https://huggingface.co/cl-tohoku/bert-base-japanese/resolve/main/config.json""",
"""cl-tohoku/bert-base-japanese-whole-word-masking""": (
"""https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking/resolve/main/config.json"""
),
"""cl-tohoku/bert-base-japanese-char""": (
"""https://huggingface.co/cl-tohoku/bert-base-japanese-char/resolve/main/config.json"""
),
"""cl-tohoku/bert-base-japanese-char-whole-word-masking""": (
"""https://huggingface.co/cl-tohoku/bert-base-japanese-char-whole-word-masking/resolve/main/config.json"""
),
"""TurkuNLP/bert-base-finnish-cased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/config.json"""
),
"""TurkuNLP/bert-base-finnish-uncased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/config.json"""
),
"""wietsedv/bert-base-dutch-cased""": """https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/config.json""",
# See all BERT models at https://huggingface.co/models?filter=bert
}
class lowercase ( _lowercase ):
"""simple docstring"""
a__ = "bert"
def __init__( self , __snake_case=3_05_22 , __snake_case=7_68 , __snake_case=12 , __snake_case=12 , __snake_case=30_72 , __snake_case="gelu" , __snake_case=0.1 , __snake_case=0.1 , __snake_case=5_12 , __snake_case=2 , __snake_case=0.0_2 , __snake_case=1e-12 , __snake_case=0 , __snake_case="absolute" , __snake_case=True , __snake_case=None , **__snake_case , ):
super().__init__(pad_token_id=__snake_case , **__snake_case)
_UpperCamelCase : int = vocab_size
_UpperCamelCase : Optional[Any] = hidden_size
_UpperCamelCase : Optional[Any] = num_hidden_layers
_UpperCamelCase : List[str] = num_attention_heads
_UpperCamelCase : int = hidden_act
_UpperCamelCase : Optional[Any] = intermediate_size
_UpperCamelCase : Union[str, Any] = hidden_dropout_prob
_UpperCamelCase : Tuple = attention_probs_dropout_prob
_UpperCamelCase : Optional[int] = max_position_embeddings
_UpperCamelCase : str = type_vocab_size
_UpperCamelCase : Optional[Any] = initializer_range
_UpperCamelCase : List[str] = layer_norm_eps
_UpperCamelCase : Any = position_embedding_type
_UpperCamelCase : Any = use_cache
_UpperCamelCase : Any = classifier_dropout
class lowercase ( _lowercase ):
"""simple docstring"""
@property
def A__ ( self):
if self.task == "multiple-choice":
_UpperCamelCase : Union[str, Any] = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
_UpperCamelCase : Optional[Any] = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('token_type_ids', dynamic_axis),
])
| 648
|
import inspect
import unittest
from transformers import BitConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel
from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class lowercase :
"""simple docstring"""
def __init__( self , __snake_case , __snake_case=3 , __snake_case=32 , __snake_case=3 , __snake_case=10 , __snake_case=[8, 16, 32, 64] , __snake_case=[1, 1, 2, 1] , __snake_case=True , __snake_case=True , __snake_case="relu" , __snake_case=3 , __snake_case=None , __snake_case=["stage2", "stage3", "stage4"] , __snake_case=[2, 3, 4] , __snake_case=1 , ):
_UpperCamelCase : List[Any] = parent
_UpperCamelCase : Dict = batch_size
_UpperCamelCase : Optional[int] = image_size
_UpperCamelCase : str = num_channels
_UpperCamelCase : Optional[Any] = embeddings_size
_UpperCamelCase : Tuple = hidden_sizes
_UpperCamelCase : Dict = depths
_UpperCamelCase : str = is_training
_UpperCamelCase : Optional[int] = use_labels
_UpperCamelCase : str = hidden_act
_UpperCamelCase : Optional[int] = num_labels
_UpperCamelCase : Optional[int] = scope
_UpperCamelCase : Tuple = len(__snake_case)
_UpperCamelCase : Dict = out_features
_UpperCamelCase : Union[str, Any] = out_indices
_UpperCamelCase : int = num_groups
def A__ ( self):
_UpperCamelCase : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
_UpperCamelCase : str = None
if self.use_labels:
_UpperCamelCase : Optional[int] = ids_tensor([self.batch_size] , self.num_labels)
_UpperCamelCase : str = self.get_config()
return config, pixel_values, labels
def A__ ( self):
return BitConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , )
def A__ ( self , __snake_case , __snake_case , __snake_case):
_UpperCamelCase : str = BitModel(config=__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : Optional[Any] = model(__snake_case)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def A__ ( self , __snake_case , __snake_case , __snake_case):
_UpperCamelCase : Dict = self.num_labels
_UpperCamelCase : Dict = BitForImageClassification(__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : Dict = model(__snake_case , labels=__snake_case)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def A__ ( self , __snake_case , __snake_case , __snake_case):
_UpperCamelCase : Optional[Any] = BitBackbone(config=__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : List[Any] = model(__snake_case)
# verify feature maps
self.parent.assertEqual(len(result.feature_maps) , len(config.out_features))
self.parent.assertListEqual(list(result.feature_maps[0].shape) , [self.batch_size, self.hidden_sizes[1], 4, 4])
# verify channels
self.parent.assertEqual(len(model.channels) , len(config.out_features))
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:])
# verify backbone works with out_features=None
_UpperCamelCase : Any = None
_UpperCamelCase : str = BitBackbone(config=__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : Any = model(__snake_case)
# verify feature maps
self.parent.assertEqual(len(result.feature_maps) , 1)
self.parent.assertListEqual(list(result.feature_maps[0].shape) , [self.batch_size, self.hidden_sizes[-1], 1, 1])
# verify channels
self.parent.assertEqual(len(model.channels) , 1)
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]])
def A__ ( self):
_UpperCamelCase : Optional[int] = self.prepare_config_and_inputs()
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase : int = config_and_inputs
_UpperCamelCase : int = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class lowercase ( _lowercase , _lowercase , unittest.TestCase ):
"""simple docstring"""
a__ = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else ()
a__ = (
{"feature-extraction": BitModel, "image-classification": BitForImageClassification}
if is_torch_available()
else {}
)
a__ = False
a__ = False
a__ = False
a__ = False
a__ = False
def A__ ( self):
_UpperCamelCase : Dict = BitModelTester(self)
_UpperCamelCase : Optional[Any] = ConfigTester(self , config_class=__snake_case , has_text_modality=__snake_case)
def A__ ( self):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def A__ ( self):
return
@unittest.skip(reason='Bit does not output attentions')
def A__ ( self):
pass
@unittest.skip(reason='Bit does not use inputs_embeds')
def A__ ( self):
pass
@unittest.skip(reason='Bit does not support input and output embeddings')
def A__ ( self):
pass
def A__ ( self):
_UpperCamelCase , _UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase : int = model_class(__snake_case)
_UpperCamelCase : List[Any] = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCamelCase : Optional[int] = [*signature.parameters.keys()]
_UpperCamelCase : List[Any] = ['pixel_values']
self.assertListEqual(arg_names[:1] , __snake_case)
def A__ ( self):
_UpperCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__snake_case)
def A__ ( self):
_UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*__snake_case)
def A__ ( self):
_UpperCamelCase , _UpperCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase : Union[str, Any] = model_class(config=__snake_case)
for name, module in model.named_modules():
if isinstance(__snake_case , (nn.BatchNormad, nn.GroupNorm)):
self.assertTrue(
torch.all(module.weight == 1) , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
self.assertTrue(
torch.all(module.bias == 0) , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
def A__ ( self):
def check_hidden_states_output(__snake_case , __snake_case , __snake_case):
_UpperCamelCase : str = model_class(__snake_case)
model.to(__snake_case)
model.eval()
with torch.no_grad():
_UpperCamelCase : Union[str, Any] = model(**self._prepare_for_class(__snake_case , __snake_case))
_UpperCamelCase : List[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_UpperCamelCase : str = self.model_tester.num_stages
self.assertEqual(len(__snake_case) , expected_num_stages + 1)
# Bit's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:]) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
_UpperCamelCase , _UpperCamelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCamelCase : List[str] = ['preactivation', 'bottleneck']
for model_class in self.all_model_classes:
for layer_type in layers_type:
_UpperCamelCase : Any = layer_type
_UpperCamelCase : Tuple = True
check_hidden_states_output(__snake_case , __snake_case , __snake_case)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_UpperCamelCase : List[str] = True
check_hidden_states_output(__snake_case , __snake_case , __snake_case)
@unittest.skip(reason='Bit does not use feedforward chunking')
def A__ ( self):
pass
def A__ ( self):
_UpperCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__snake_case)
@slow
def A__ ( self):
for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase : Optional[Any] = BitModel.from_pretrained(__snake_case)
self.assertIsNotNone(__snake_case)
def lowerCamelCase_ ( ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase : List[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class lowercase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def A__ ( self):
return (
BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0]) if is_vision_available() else None
)
@slow
def A__ ( self):
_UpperCamelCase : Optional[Any] = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0]).to(__snake_case)
_UpperCamelCase : str = self.default_image_processor
_UpperCamelCase : List[str] = prepare_img()
_UpperCamelCase : int = image_processor(images=__snake_case , return_tensors='pt').to(__snake_case)
# forward pass
with torch.no_grad():
_UpperCamelCase : Any = model(**__snake_case)
# verify the logits
_UpperCamelCase : Dict = torch.Size((1, 10_00))
self.assertEqual(outputs.logits.shape , __snake_case)
_UpperCamelCase : Optional[int] = torch.tensor([[-0.6_5_2_6, -0.5_2_6_3, -1.4_3_9_8]]).to(__snake_case)
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __snake_case , atol=1e-4))
@require_torch
class lowercase ( _lowercase , unittest.TestCase ):
"""simple docstring"""
a__ = (BitBackbone,) if is_torch_available() else ()
a__ = BitConfig
a__ = False
def A__ ( self):
_UpperCamelCase : List[str] = BitModelTester(self)
| 648
| 1
|
import json
import os
import unittest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class A_ ( UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = CLIPTokenizer
SCREAMING_SNAKE_CASE_ : Optional[int] = CLIPTokenizerFast
SCREAMING_SNAKE_CASE_ : Dict = True
SCREAMING_SNAKE_CASE_ : Tuple = {}
SCREAMING_SNAKE_CASE_ : str = False
def __UpperCAmelCase ( self : Optional[int] ) -> int:
super().setUp()
# fmt: off
_lowercase = ['l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'lo', 'l</w>', 'w</w>', 'r</w>', 't</w>', 'low</w>', 'er</w>', 'lowest</w>', 'newer</w>', 'wider', '<unk>', '<|startoftext|>', '<|endoftext|>']
# fmt: on
_lowercase = dict(zip(__A ,range(len(__A ) ) ) )
_lowercase = ['#version: 0.2', 'l o', 'lo w</w>', 'e r</w>']
_lowercase = {'unk_token': '<unk>'}
_lowercase = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['vocab_file'] )
_lowercase = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file ,'w' ,encoding='utf-8' ) as fp:
fp.write(json.dumps(__A ) + '\n' )
with open(self.merges_file ,'w' ,encoding='utf-8' ) as fp:
fp.write('\n'.join(__A ) )
def __UpperCAmelCase ( self : Tuple ,**__A : Tuple ) -> str:
kwargs.update(self.special_tokens_map )
return CLIPTokenizer.from_pretrained(self.tmpdirname ,**__A )
def __UpperCAmelCase ( self : Union[str, Any] ,**__A : List[str] ) -> Dict:
kwargs.update(self.special_tokens_map )
return CLIPTokenizerFast.from_pretrained(self.tmpdirname ,**__A )
def __UpperCAmelCase ( self : List[str] ,__A : Any ) -> Optional[Any]:
_lowercase = 'lower newer'
_lowercase = 'lower newer'
return input_text, output_text
def __UpperCAmelCase ( self : Dict ) -> List[str]:
_lowercase = CLIPTokenizer(self.vocab_file ,self.merges_file ,**self.special_tokens_map )
_lowercase = 'lower newer'
_lowercase = ['lo', 'w', 'er</w>', 'n', 'e', 'w', 'er</w>']
_lowercase = tokenizer.tokenize(__A )
self.assertListEqual(__A ,__A )
_lowercase = tokens + [tokenizer.unk_token]
_lowercase = [10, 2, 16, 9, 3, 2, 16, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__A ) ,__A )
@require_ftfy
def __UpperCAmelCase ( self : int ) -> List[Any]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
_lowercase = self.tokenizer_class.from_pretrained(__A ,**__A )
_lowercase = self.rust_tokenizer_class.from_pretrained(__A ,**__A )
_lowercase = 'A\n\'ll 11p223RF☆ho!!to?\'d\'d\'\'d of a cat to-$\'\'d.'
_lowercase = tokenizer_s.tokenize(__A )
_lowercase = tokenizer_r.tokenize(__A )
self.assertListEqual(__A ,__A )
# Test that the tokenization is identical on an example containing a character (Latin Small Letter A
# with Tilde) encoded in 2 different ways
_lowercase = 'xa\u0303y' + ' ' + 'x\xe3y'
_lowercase = tokenizer_s.tokenize(__A )
_lowercase = tokenizer_r.tokenize(__A )
self.assertListEqual(__A ,__A )
# Test that the tokenization is identical on unicode of space type
_lowercase = [
'\u0009', # (horizontal tab, '\t')
'\u000B', # (vertical tab)
'\u000C', # (form feed)
'\u0020', # (space, ' ')
'\u200E', # (left-to-right mark):w
'\u200F', # (right-to-left mark)
]
for unicode_seq in spaces_unicodes:
_lowercase = tokenizer_s.tokenize(__A )
_lowercase = tokenizer_r.tokenize(__A )
self.assertListEqual(__A ,__A )
# Test that the tokenization is identical on unicode of line break type
_lowercase = [
'\u000A', # (line feed, '\n')
'\r\n', # (carriage return and line feed, '\r\n')
'\u000D', # (carriage return, '\r')
'\r', # (carriage return, '\r')
'\u000D', # (carriage return, '\r')
'\u2028', # (line separator)
'\u2029', # (paragraph separator)
# "\u0085", # (next line)
]
# The tokenization is not identical for the character "\u0085" (next line). The slow version using ftfy transforms
# it into the Horizontal Ellipsis character "…" ("\u2026") while the fast version transforms it into a
# space (and thus into an empty list).
for unicode_seq in line_break_unicodes:
_lowercase = tokenizer_s.tokenize(__A )
_lowercase = tokenizer_r.tokenize(__A )
self.assertListEqual(__A ,__A )
def __UpperCAmelCase ( self : Dict ) -> Union[str, Any]:
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
_lowercase = 'hello' # `hello` is a token in the vocabulary of `pretrained_name`
_lowercase = F"""{text_of_1_token} {text_of_1_token}"""
_lowercase = self.rust_tokenizer_class.from_pretrained(
__A ,use_fast=__A ,)
_lowercase = tokenizer_r(__A ,return_offsets_mapping=__A ,add_special_tokens=__A )
self.assertEqual(encoding.offset_mapping[0] ,(0, len(__A )) )
self.assertEqual(
encoding.offset_mapping[1] ,(len(__A ) + 1, len(__A ) + 1 + len(__A )) ,)
_lowercase = F""" {text}"""
_lowercase = self.rust_tokenizer_class.from_pretrained(
__A ,use_fast=__A ,)
_lowercase = tokenizer_r(__A ,return_offsets_mapping=__A ,add_special_tokens=__A )
self.assertEqual(encoding.offset_mapping[0] ,(1, 1 + len(__A )) )
self.assertEqual(
encoding.offset_mapping[1] ,(1 + len(__A ) + 1, 1 + len(__A ) + 1 + len(__A )) ,)
def __UpperCAmelCase ( self : int ) -> Any:
# Test related to the breaking change introduced in transformers v4.17.0
# We need to check that an error in raised when the user try to load a previous version of the tokenizer.
with self.assertRaises(__A ) as context:
self.rust_tokenizer_class.from_pretrained('robot-test/old-clip-tokenizer' )
self.assertTrue(
context.exception.args[0].startswith(
'The `backend_tokenizer` provided does not match the expected format.' ) )
@require_ftfy
def __UpperCAmelCase ( self : List[Any] ) -> str:
super().test_tokenization_python_rust_equals()
def __UpperCAmelCase ( self : int ) -> Union[str, Any]:
# CLIP always lower cases letters
pass
| 67
|
import os
import posixpath
import uuid
from dataclasses import dataclass
from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union
import numpy as np
import pyarrow as pa
import datasets
from datasets.arrow_writer import ArrowWriter, ParquetWriter
from datasets.config import MAX_SHARD_SIZE
from datasets.filesystems import (
is_remote_filesystem,
rename,
)
from datasets.iterable_dataset import _BaseExamplesIterable
from datasets.utils.py_utils import convert_file_size_to_int
UpperCamelCase__ = datasets.utils.logging.get_logger(__name__)
if TYPE_CHECKING:
import pyspark
@dataclass
class UpperCAmelCase__ ( datasets.BuilderConfig ):
'''simple docstring'''
UpperCAmelCase_ = None
def UpperCamelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ , ) -> List[str]:
'''simple docstring'''
import pyspark
def generate_fn():
_lowercase : Any = df.select('''*''' , pyspark.sql.functions.spark_partition_id().alias('''part_id''' ) )
for partition_id in partition_order:
_lowercase : int = df_with_partition_id.select('''*''' ).where(F'part_id = {partition_id}' ).drop('''part_id''' )
_lowercase : Union[str, Any] = partition_df.collect()
_lowercase : int = 0
for row in rows:
yield F'{partition_id}_{row_id}', row.asDict()
row_id += 1
return generate_fn
class UpperCAmelCase__ ( _BaseExamplesIterable ):
'''simple docstring'''
def __init__( self : Tuple , UpperCamelCase : "pyspark.sql.DataFrame" , UpperCamelCase : List[str]=None , ):
"""simple docstring"""
_lowercase : List[Any] = df
_lowercase : List[str] = partition_order or range(self.df.rdd.getNumPartitions() )
_lowercase : Optional[Any] = _generate_iterable_examples(self.df , self.partition_order )
def __iter__( self : List[Any] ):
"""simple docstring"""
yield from self.generate_examples_fn()
def lowerCAmelCase_ ( self : Tuple , UpperCamelCase : np.random.Generator ):
"""simple docstring"""
_lowercase : Dict = list(range(self.df.rdd.getNumPartitions() ) )
generator.shuffle(UpperCamelCase )
return SparkExamplesIterable(self.df , partition_order=UpperCamelCase )
def lowerCAmelCase_ ( self : List[Any] , UpperCamelCase : int , UpperCamelCase : int ):
"""simple docstring"""
_lowercase : List[Any] = self.split_shard_indices_by_worker(UpperCamelCase , UpperCamelCase )
return SparkExamplesIterable(self.df , partition_order=UpperCamelCase )
@property
def lowerCAmelCase_ ( self : List[str] ):
"""simple docstring"""
return len(self.partition_order )
class UpperCAmelCase__ ( datasets.DatasetBuilder ):
'''simple docstring'''
UpperCAmelCase_ = SparkConfig
def __init__( self : Optional[int] , UpperCamelCase : "pyspark.sql.DataFrame" , UpperCamelCase : str = None , UpperCamelCase : str = None , **UpperCamelCase : Union[str, Any] , ):
"""simple docstring"""
import pyspark
_lowercase : List[str] = pyspark.sql.SparkSession.builder.getOrCreate()
_lowercase : Any = df
_lowercase : Dict = working_dir
super().__init__(
cache_dir=UpperCamelCase , config_name=str(self.df.semanticHash() ) , **UpperCamelCase , )
def lowerCAmelCase_ ( self : List[Any] ):
"""simple docstring"""
def create_cache_and_write_probe(UpperCamelCase : Tuple ):
# makedirs with exist_ok will recursively create the directory. It will not throw an error if directories
# already exist.
os.makedirs(self._cache_dir , exist_ok=UpperCamelCase )
_lowercase : Tuple = os.path.join(self._cache_dir , '''fs_test''' + uuid.uuida().hex )
# Opening the file in append mode will create a new file unless it already exists, in which case it will not
# change the file contents.
open(UpperCamelCase , '''a''' )
return [probe_file]
if self._spark.conf.get('''spark.master''' , '''''' ).startswith('''local''' ):
return
# If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS
# accessible to the driver.
# TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error.
if self._cache_dir:
_lowercase : Any = (
self._spark.sparkContext.parallelize(range(1 ) , 1 ).mapPartitions(UpperCamelCase ).collect()
)
if os.path.isfile(probe[0] ):
return
raise ValueError(
'''When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir''' )
def lowerCAmelCase_ ( self : str ):
"""simple docstring"""
return datasets.DatasetInfo(features=self.config.features )
def lowerCAmelCase_ ( self : str , UpperCamelCase : datasets.download.download_manager.DownloadManager ):
"""simple docstring"""
return [datasets.SplitGenerator(name=datasets.Split.TRAIN )]
def lowerCAmelCase_ ( self : str , UpperCamelCase : Union[str, Any] ):
"""simple docstring"""
import pyspark
def get_arrow_batch_size(UpperCamelCase : int ):
for batch in it:
yield pa.RecordBatch.from_pydict({'''batch_bytes''': [batch.nbytes]} )
_lowercase : int = self.df.count()
_lowercase : Optional[Any] = df_num_rows if df_num_rows <= 1_00 else 1_00
# Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample.
_lowercase : List[Any] = (
self.df.limit(UpperCamelCase )
.repartition(1 )
.mapInArrow(UpperCamelCase , '''batch_bytes: long''' )
.agg(pyspark.sql.functions.sum('''batch_bytes''' ).alias('''sample_bytes''' ) )
.collect()[0]
.sample_bytes
/ sample_num_rows
)
_lowercase : int = approx_bytes_per_row * df_num_rows
if approx_total_size > max_shard_size:
# Make sure there is at least one row per partition.
_lowercase : List[Any] = min(UpperCamelCase , int(approx_total_size / max_shard_size ) )
_lowercase : Optional[int] = self.df.repartition(UpperCamelCase )
def lowerCAmelCase_ ( self : str , UpperCamelCase : str , UpperCamelCase : str , UpperCamelCase : int , ):
"""simple docstring"""
import pyspark
_lowercase : Optional[Any] = ParquetWriter if file_format == '''parquet''' else ArrowWriter
_lowercase : List[str] = os.path.join(self._working_dir , os.path.basename(UpperCamelCase ) ) if self._working_dir else fpath
_lowercase : str = file_format == '''parquet'''
# Define these so that we don't reference self in write_arrow, which will result in a pickling error due to
# pickling the SparkContext.
_lowercase : Tuple = self.config.features
_lowercase : Tuple = self._writer_batch_size
_lowercase : Optional[Any] = self._fs.storage_options
def write_arrow(UpperCamelCase : Dict ):
# Within the same SparkContext, no two task attempts will share the same attempt ID.
_lowercase : str = pyspark.TaskContext().taskAttemptId()
_lowercase : List[Any] = next(UpperCamelCase , UpperCamelCase )
if first_batch is None:
# Some partitions might not receive any data.
return pa.RecordBatch.from_arrays(
[[task_id], [0], [0]] , names=['''task_id''', '''num_examples''', '''num_bytes'''] , )
_lowercase : Dict = 0
_lowercase : Optional[int] = writer_class(
features=UpperCamelCase , path=working_fpath.replace('''SSSSS''' , F'{shard_id:05d}' ).replace('''TTTTT''' , F'{task_id:05d}' ) , writer_batch_size=UpperCamelCase , storage_options=UpperCamelCase , embed_local_files=UpperCamelCase , )
_lowercase : int = pa.Table.from_batches([first_batch] )
writer.write_table(UpperCamelCase )
for batch in it:
if max_shard_size is not None and writer._num_bytes >= max_shard_size:
_lowercase , _lowercase : int = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=['''task_id''', '''num_examples''', '''num_bytes'''] , )
shard_id += 1
_lowercase : List[str] = writer_class(
features=writer._features , path=working_fpath.replace('''SSSSS''' , F'{shard_id:05d}' ).replace('''TTTTT''' , F'{task_id:05d}' ) , writer_batch_size=UpperCamelCase , storage_options=UpperCamelCase , embed_local_files=UpperCamelCase , )
_lowercase : List[str] = pa.Table.from_batches([batch] )
writer.write_table(UpperCamelCase )
if writer._num_bytes > 0:
_lowercase , _lowercase : List[Any] = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=['''task_id''', '''num_examples''', '''num_bytes'''] , )
if working_fpath != fpath:
for file in os.listdir(os.path.dirname(UpperCamelCase ) ):
_lowercase : List[str] = os.path.join(os.path.dirname(UpperCamelCase ) , os.path.basename(UpperCamelCase ) )
shutil.move(UpperCamelCase , UpperCamelCase )
_lowercase : int = (
self.df.mapInArrow(UpperCamelCase , '''task_id: long, num_examples: long, num_bytes: long''' )
.groupBy('''task_id''' )
.agg(
pyspark.sql.functions.sum('''num_examples''' ).alias('''total_num_examples''' ) , pyspark.sql.functions.sum('''num_bytes''' ).alias('''total_num_bytes''' ) , pyspark.sql.functions.count('''num_bytes''' ).alias('''num_shards''' ) , pyspark.sql.functions.collect_list('''num_examples''' ).alias('''shard_lengths''' ) , )
.collect()
)
for row in stats:
yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths)
def lowerCAmelCase_ ( self : Dict , UpperCamelCase : "datasets.SplitGenerator" , UpperCamelCase : str = "arrow" , UpperCamelCase : Optional[Union[str, int]] = None , UpperCamelCase : Optional[int] = None , **UpperCamelCase : Union[str, Any] , ):
"""simple docstring"""
self._validate_cache_dir()
_lowercase : List[str] = convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE )
self._repartition_df_if_needed(UpperCamelCase )
_lowercase : Optional[Any] = not is_remote_filesystem(self._fs )
_lowercase : List[Any] = os.path.join if is_local else posixpath.join
_lowercase : Optional[int] = '''-TTTTT-SSSSS-of-NNNNN'''
_lowercase : str = F'{self.name}-{split_generator.name}{SUFFIX}.{file_format}'
_lowercase : List[str] = path_join(self._output_dir , UpperCamelCase )
_lowercase : Optional[Any] = 0
_lowercase : Any = 0
_lowercase : Tuple = 0
_lowercase : int = []
_lowercase : Union[str, Any] = []
for task_id, content in self._prepare_split_single(UpperCamelCase , UpperCamelCase , UpperCamelCase ):
(
(
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) ,
) : Union[str, Any] = content
if num_bytes > 0:
total_num_examples += num_examples
total_num_bytes += num_bytes
total_shards += num_shards
task_id_and_num_shards.append((task_id, num_shards) )
all_shard_lengths.extend(UpperCamelCase )
_lowercase : Optional[Any] = total_num_examples
_lowercase : int = total_num_bytes
# should rename everything at the end
logger.debug(F'Renaming {total_shards} shards.' )
if total_shards > 1:
_lowercase : List[Any] = all_shard_lengths
# Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a
# pickling error due to pickling the SparkContext.
_lowercase : Any = self._fs
# use the -SSSSS-of-NNNNN pattern
def _rename_shard(
UpperCamelCase : int , UpperCamelCase : int , UpperCamelCase : int , ):
rename(
UpperCamelCase , fpath.replace('''SSSSS''' , F'{shard_id:05d}' ).replace('''TTTTT''' , F'{task_id:05d}' ) , fpath.replace('''TTTTT-SSSSS''' , F'{global_shard_id:05d}' ).replace('''NNNNN''' , F'{total_shards:05d}' ) , )
_lowercase : List[Any] = []
_lowercase : int = 0
for i in range(len(UpperCamelCase ) ):
_lowercase , _lowercase : Optional[Any] = task_id_and_num_shards[i]
for shard_id in range(UpperCamelCase ):
args.append([task_id, shard_id, global_shard_id] )
global_shard_id += 1
self._spark.sparkContext.parallelize(UpperCamelCase , len(UpperCamelCase ) ).map(lambda UpperCamelCase : _rename_shard(*UpperCamelCase ) ).collect()
else:
# don't use any pattern
_lowercase : Optional[int] = 0
_lowercase : Optional[Any] = task_id_and_num_shards[0][0]
self._rename(
fpath.replace('''SSSSS''' , F'{shard_id:05d}' ).replace('''TTTTT''' , F'{task_id:05d}' ) , fpath.replace(UpperCamelCase , '''''' ) , )
def lowerCAmelCase_ ( self : Optional[int] , UpperCamelCase : "datasets.SplitGenerator" , ):
"""simple docstring"""
return SparkExamplesIterable(self.df )
| 322
| 0
|
from __future__ import annotations
def lowercase_ ( __snake_case : dict , __snake_case : str ) -> set[str]:
'''simple docstring'''
snake_case__ , snake_case__ :Tuple = set(__snake_case ), [start]
while stack:
snake_case__ :Dict = stack.pop()
explored.add(__snake_case )
# Differences from BFS:
# 1) pop last element instead of first one
# 2) add adjacent elements to stack without exploring them
for adj in reversed(graph[v] ):
if adj not in explored:
stack.append(__snake_case )
return explored
__UpperCAmelCase : Optional[int] = {
"A": ["B", "C", "D"],
"B": ["A", "D", "E"],
"C": ["A", "F"],
"D": ["B", "D"],
"E": ["B", "F"],
"F": ["C", "E", "G"],
"G": ["F"],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
print(depth_first_search(G, "A"))
| 57
|
import os
import sys
import unittest
__UpperCAmelCase : str = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, "utils"))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
__UpperCAmelCase : Tuple = os.path.join(git_repo_path, "src", "diffusers")
class _snake_case ( unittest.TestCase ):
def lowerCAmelCase_ ( self ) -> Union[str, Any]:
snake_case__ :Tuple = find_backend(" if not is_torch_available():" )
self.assertEqual(UpperCamelCase ,"torch" )
# backend_with_underscore = find_backend(" if not is_tensorflow_text_available():")
# self.assertEqual(backend_with_underscore, "tensorflow_text")
snake_case__ :Tuple = find_backend(" if not (is_torch_available() and is_transformers_available()):" )
self.assertEqual(UpperCamelCase ,"torch_and_transformers" )
# double_backend_with_underscore = find_backend(
# " if not (is_sentencepiece_available() and is_tensorflow_text_available()):"
# )
# self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text")
snake_case__ :str = find_backend(
" if not (is_torch_available() and is_transformers_available() and is_onnx_available()):" )
self.assertEqual(UpperCamelCase ,"torch_and_transformers_and_onnx" )
def lowerCAmelCase_ ( self ) -> str:
snake_case__ :int = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn("torch" ,UpperCamelCase )
self.assertIn("torch_and_transformers" ,UpperCamelCase )
self.assertIn("flax_and_transformers" ,UpperCamelCase )
self.assertIn("torch_and_transformers_and_onnx" ,UpperCamelCase )
# Likewise, we can't assert on the exact content of a key
self.assertIn("UNet2DModel" ,objects["torch"] )
self.assertIn("FlaxUNet2DConditionModel" ,objects["flax"] )
self.assertIn("StableDiffusionPipeline" ,objects["torch_and_transformers"] )
self.assertIn("FlaxStableDiffusionPipeline" ,objects["flax_and_transformers"] )
self.assertIn("LMSDiscreteScheduler" ,objects["torch_and_scipy"] )
self.assertIn("OnnxStableDiffusionPipeline" ,objects["torch_and_transformers_and_onnx"] )
def lowerCAmelCase_ ( self ) -> Any:
snake_case__ :Union[str, Any] = create_dummy_object("CONSTANT" ,"'torch'" )
self.assertEqual(UpperCamelCase ,"\nCONSTANT = None\n" )
snake_case__ :Optional[Any] = create_dummy_object("function" ,"'torch'" )
self.assertEqual(
UpperCamelCase ,"\ndef function(*args, **kwargs):\n requires_backends(function, 'torch')\n" )
snake_case__ :str = "\nclass FakeClass(metaclass=DummyObject):\n _backends = 'torch'\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, 'torch')\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, 'torch')\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, 'torch')\n"
snake_case__ :List[str] = create_dummy_object("FakeClass" ,"'torch'" )
self.assertEqual(UpperCamelCase ,UpperCamelCase )
def lowerCAmelCase_ ( self ) -> List[Any]:
snake_case__ :Tuple = "# This file is autogenerated by the command `make fix-copies`, do not edit.\nfrom ..utils import DummyObject, requires_backends\n\n\nCONSTANT = None\n\n\ndef function(*args, **kwargs):\n requires_backends(function, [\"torch\"])\n\n\nclass FakeClass(metaclass=DummyObject):\n _backends = [\"torch\"]\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, [\"torch\"])\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, [\"torch\"])\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, [\"torch\"])\n"
snake_case__ :int = create_dummy_files({"torch": ["CONSTANT", "function", "FakeClass"]} )
self.assertEqual(dummy_files["torch"] ,UpperCamelCase )
| 57
| 1
|
import argparse
from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline
if __name__ == "__main__":
lowerCamelCase : int = argparse.ArgumentParser()
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
parser.add_argument(
'--txt2img_unclip',
default='kakaobrain/karlo-v1-alpha',
type=str,
required=False,
help='The pretrained txt2img unclip.',
)
lowerCamelCase : Dict = parser.parse_args()
lowerCamelCase : Optional[Any] = UnCLIPPipeline.from_pretrained(args.txtaimg_unclip)
lowerCamelCase : List[str] = CLIPImageProcessor()
lowerCamelCase : List[Any] = CLIPVisionModelWithProjection.from_pretrained('openai/clip-vit-large-patch14')
lowerCamelCase : List[str] = UnCLIPImageVariationPipeline(
decoder=txtaimg.decoder,
text_encoder=txtaimg.text_encoder,
tokenizer=txtaimg.tokenizer,
text_proj=txtaimg.text_proj,
feature_extractor=feature_extractor,
image_encoder=image_encoder,
super_res_first=txtaimg.super_res_first,
super_res_last=txtaimg.super_res_last,
decoder_scheduler=txtaimg.decoder_scheduler,
super_res_scheduler=txtaimg.super_res_scheduler,
)
imgaimg.save_pretrained(args.dump_path)
| 587
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase : Optional[int] = logging.get_logger(__name__)
lowerCamelCase : List[str] = {
'camembert-base': 'https://huggingface.co/camembert-base/resolve/main/config.json',
'umberto-commoncrawl-cased-v1': (
'https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json'
),
'umberto-wikipedia-uncased-v1': (
'https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json'
),
}
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = """camembert"""
def __init__( self , A=3_0_5_2_2 , A=7_6_8 , A=1_2 , A=1_2 , A=3_0_7_2 , A="gelu" , A=0.1 , A=0.1 , A=5_1_2 , A=2 , A=0.02 , A=1e-1_2 , A=1 , A=0 , A=2 , A="absolute" , A=True , A=None , **A , ) -> Any:
super().__init__(pad_token_id=A , bos_token_id=A , eos_token_id=A , **A )
snake_case : int = vocab_size
snake_case : Dict = hidden_size
snake_case : Any = num_hidden_layers
snake_case : Optional[Any] = num_attention_heads
snake_case : Any = hidden_act
snake_case : List[str] = intermediate_size
snake_case : List[str] = hidden_dropout_prob
snake_case : List[Any] = attention_probs_dropout_prob
snake_case : Dict = max_position_embeddings
snake_case : str = type_vocab_size
snake_case : List[Any] = initializer_range
snake_case : int = layer_norm_eps
snake_case : Optional[int] = position_embedding_type
snake_case : Tuple = use_cache
snake_case : Tuple = classifier_dropout
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
@property
def UpperCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
snake_case : Union[str, Any] = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
snake_case : Optional[int] = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 587
| 1
|
'''simple docstring'''
import os
import unittest
from tempfile import TemporaryDirectory
import torch
import torch.nn as nn
from accelerate.utils import (
OffloadedWeightsLoader,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
)
class __snake_case ( nn.Module ):
def __init__( self):
super().__init__()
SCREAMING_SNAKE_CASE_ = nn.Linear(3 , 4)
SCREAMING_SNAKE_CASE_ = nn.BatchNormad(4)
SCREAMING_SNAKE_CASE_ = nn.Linear(4 , 5)
def lowerCAmelCase__ ( self , _A):
return self.lineara(self.batchnorm(self.lineara(_A)))
class __snake_case ( unittest.TestCase ):
def lowerCAmelCase__ ( self):
SCREAMING_SNAKE_CASE_ = ModelForTest()
with TemporaryDirectory() as tmp_dir:
offload_state_dict(_A , model.state_dict())
SCREAMING_SNAKE_CASE_ = os.path.join(_A , 'index.json')
self.assertTrue(os.path.isfile(_A))
# TODO: add tests on what is inside the index
for key in ["linear1.weight", "linear1.bias", "linear2.weight", "linear2.bias"]:
SCREAMING_SNAKE_CASE_ = os.path.join(_A , f"""{key}.dat""")
self.assertTrue(os.path.isfile(_A))
# TODO: add tests on the fact weights are properly loaded
def lowerCAmelCase__ ( self):
SCREAMING_SNAKE_CASE_ = [torch.floataa, torch.floataa, torch.bfloataa]
for dtype in dtypes:
SCREAMING_SNAKE_CASE_ = torch.randn(2 , 3 , dtype=_A)
with TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE_ = offload_weight(_A , 'weight' , _A , {})
SCREAMING_SNAKE_CASE_ = os.path.join(_A , 'weight.dat')
self.assertTrue(os.path.isfile(_A))
self.assertDictEqual(_A , {'weight': {'shape': [2, 3], 'dtype': str(_A).split('.')[1]}})
SCREAMING_SNAKE_CASE_ = load_offloaded_weight(_A , index['weight'])
self.assertTrue(torch.equal(_A , _A))
def lowerCAmelCase__ ( self):
SCREAMING_SNAKE_CASE_ = ModelForTest()
SCREAMING_SNAKE_CASE_ = model.state_dict()
SCREAMING_SNAKE_CASE_ = {k: v for k, v in state_dict.items() if 'linear2' not in k}
SCREAMING_SNAKE_CASE_ = {k: v for k, v in state_dict.items() if 'linear2' in k}
with TemporaryDirectory() as tmp_dir:
offload_state_dict(_A , _A)
SCREAMING_SNAKE_CASE_ = OffloadedWeightsLoader(state_dict=_A , save_folder=_A)
# Every key is there with the right value
self.assertEqual(sorted(_A) , sorted(state_dict.keys()))
for key, param in state_dict.items():
self.assertTrue(torch.allclose(_A , weight_map[key]))
SCREAMING_SNAKE_CASE_ = {k: v for k, v in state_dict.items() if 'weight' in k}
SCREAMING_SNAKE_CASE_ = {k: v for k, v in state_dict.items() if 'weight' not in k}
with TemporaryDirectory() as tmp_dir:
offload_state_dict(_A , _A)
SCREAMING_SNAKE_CASE_ = OffloadedWeightsLoader(state_dict=_A , save_folder=_A)
# Every key is there with the right value
self.assertEqual(sorted(_A) , sorted(state_dict.keys()))
for key, param in state_dict.items():
self.assertTrue(torch.allclose(_A , weight_map[key]))
with TemporaryDirectory() as tmp_dir:
offload_state_dict(_A , _A)
# Duplicates are removed
SCREAMING_SNAKE_CASE_ = OffloadedWeightsLoader(state_dict=_A , save_folder=_A)
# Every key is there with the right value
self.assertEqual(sorted(_A) , sorted(state_dict.keys()))
for key, param in state_dict.items():
self.assertTrue(torch.allclose(_A , weight_map[key]))
def lowerCAmelCase__ ( self):
SCREAMING_SNAKE_CASE_ = {'a.1': 0, 'a.10': 1, 'a.2': 2}
SCREAMING_SNAKE_CASE_ = extract_submodules_state_dict(_A , ['a.1', 'a.2'])
self.assertDictEqual(_A , {'a.1': 0, 'a.2': 2})
SCREAMING_SNAKE_CASE_ = {'a.1.a': 0, 'a.10.a': 1, 'a.2.a': 2}
SCREAMING_SNAKE_CASE_ = extract_submodules_state_dict(_A , ['a.1', 'a.2'])
self.assertDictEqual(_A , {'a.1.a': 0, 'a.2.a': 2})
| 714
|
def _UpperCAmelCase ( ):
"""simple docstring"""
for n in range(1 , 1_000_000 ):
yield n * (n + 1) // 2
def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = 1
SCREAMING_SNAKE_CASE_ = 2
while i * i <= n:
SCREAMING_SNAKE_CASE_ = 0
while n % i == 0:
n //= i
multiplicity += 1
divisors_count *= multiplicity + 1
i += 1
if n > 1:
divisors_count *= 2
return divisors_count
def _UpperCAmelCase ( ):
"""simple docstring"""
return next(i for i in triangle_number_generator() if count_divisors(_SCREAMING_SNAKE_CASE ) > 500 )
if __name__ == "__main__":
print(solution())
| 620
| 0
|
'''simple docstring'''
import os
from typing import BinaryIO, Optional, Union
import numpy as np
import pyarrow.parquet as pq
from .. import Audio, Dataset, Features, Image, NamedSplit, Value, config
from ..features.features import FeatureType, _visit
from ..formatting import query_table
from ..packaged_modules import _PACKAGED_DATASETS_MODULES
from ..packaged_modules.parquet.parquet import Parquet
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
def A_( A : Features):
UpperCamelCase = np.inf
def set_batch_size(A : FeatureType) -> None:
nonlocal batch_size
if isinstance(A , A):
UpperCamelCase = min(A , config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS)
elif isinstance(A , A):
UpperCamelCase = min(A , config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS)
elif isinstance(A , A) and feature.dtype == "binary":
UpperCamelCase = min(A , config.PARQUET_ROW_GROUP_SIZE_FOR_BINARY_DATASETS)
_visit(A , A)
return None if batch_size is np.inf else batch_size
class SCREAMING_SNAKE_CASE__ ( snake_case_):
def __init__( self , A_ , A_ = None , A_ = None , A_ = None , A_ = False , A_ = False , A_ = None , **A_ , )-> Optional[Any]:
'''simple docstring'''
super().__init__(
A_ , split=A_ , features=A_ , cache_dir=A_ , keep_in_memory=A_ , streaming=A_ , num_proc=A_ , **A_ , )
UpperCamelCase = path_or_paths if isinstance(A_ , A_ ) else {self.split: path_or_paths}
UpperCamelCase = _PACKAGED_DATASETS_MODULES['parquet'][1]
UpperCamelCase = Parquet(
cache_dir=A_ , data_files=A_ , features=A_ , hash=A_ , **A_ , )
def UpperCAmelCase_ ( self )-> int:
'''simple docstring'''
if self.streaming:
UpperCamelCase = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = None
self.builder.download_and_prepare(
download_config=A_ , download_mode=A_ , verification_mode=A_ , base_path=A_ , num_proc=self.num_proc , )
UpperCamelCase = self.builder.as_dataset(
split=self.split , verification_mode=A_ , in_memory=self.keep_in_memory )
return dataset
class SCREAMING_SNAKE_CASE__ :
def __init__( self , A_ , A_ , A_ = None , **A_ , )-> Dict:
'''simple docstring'''
UpperCamelCase = dataset
UpperCamelCase = path_or_buf
UpperCamelCase = batch_size or get_writer_batch_size(dataset.features )
UpperCamelCase = parquet_writer_kwargs
def UpperCAmelCase_ ( self )-> int:
'''simple docstring'''
UpperCamelCase = self.batch_size if self.batch_size else config.DEFAULT_MAX_BATCH_SIZE
if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ):
with open(self.path_or_buf , 'wb+' ) as buffer:
UpperCamelCase = self._write(file_obj=A_ , batch_size=A_ , **self.parquet_writer_kwargs )
else:
UpperCamelCase = self._write(file_obj=self.path_or_buf , batch_size=A_ , **self.parquet_writer_kwargs )
return written
def UpperCAmelCase_ ( self , A_ , A_ , **A_ )-> int:
'''simple docstring'''
UpperCamelCase = 0
UpperCamelCase = parquet_writer_kwargs.pop('path_or_buf' , A_ )
UpperCamelCase = self.dataset.features.arrow_schema
UpperCamelCase = pq.ParquetWriter(A_ , schema=A_ , **A_ )
for offset in logging.tqdm(
range(0 , len(self.dataset ) , A_ ) , unit='ba' , disable=not logging.is_progress_bar_enabled() , desc='Creating parquet from Arrow format' , ):
UpperCamelCase = query_table(
table=self.dataset._data , key=slice(A_ , offset + batch_size ) , indices=self.dataset._indices if self.dataset._indices is not None else None , )
writer.write_table(A_ )
written += batch.nbytes
writer.close()
return written
| 3
|
'''simple docstring'''
import math
from numpy import inf
from scipy.integrate import quad
def snake_case_ ( _lowerCAmelCase : float ) -> float:
if num <= 0:
raise ValueError('''math domain error''' )
return quad(_lowerCAmelCase , 0 , _lowerCAmelCase , args=(_lowerCAmelCase) )[0]
def snake_case_ ( _lowerCAmelCase : float , _lowerCAmelCase : float ) -> float:
return math.pow(_lowerCAmelCase , z - 1 ) * math.exp(-x )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 127
| 0
|
from copy import deepcopy
from typing import Optional, Union
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_tf_available, is_torch_available
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
class A( __lowercase ):
'''simple docstring'''
UpperCamelCase = ['''image_processor''']
UpperCamelCase = '''SamImageProcessor'''
def __init__( self : List[str] , A_ : int ) -> Any:
"""simple docstring"""
super().__init__(_A )
lowerCamelCase_ = self.image_processor
lowerCamelCase_ = -10
lowerCamelCase_ = self.image_processor.size['longest_edge']
def __call__( self : Optional[Any] , A_ : Tuple=None , A_ : List[Any]=None , A_ : Dict=None , A_ : Any=None , A_ : Optional[Union[str, TensorType]] = None , **A_ : List[str] , ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ = self.image_processor(
_A , return_tensors=_A , **_A , )
# pop arguments that are not used in the foward but used nevertheless
lowerCamelCase_ = encoding_image_processor['original_sizes']
if hasattr(_A , 'numpy' ): # Checks if Torch or TF tensor
lowerCamelCase_ = original_sizes.numpy()
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = self._check_and_preprocess_points(
input_points=_A , input_labels=_A , input_boxes=_A , )
lowerCamelCase_ = self._normalize_and_convert(
_A , _A , input_points=_A , input_labels=_A , input_boxes=_A , return_tensors=_A , )
return encoding_image_processor
def a__ ( self : str , A_ : List[Any] , A_ : Optional[int] , A_ : Union[str, Any]=None , A_ : Optional[int]=None , A_ : Optional[Any]=None , A_ : Dict="pt" , ) -> Optional[Any]:
"""simple docstring"""
if input_points is not None:
if len(_A ) != len(_A ):
lowerCamelCase_ = [
self._normalize_coordinates(self.target_size , _A , original_sizes[0] ) for point in input_points
]
else:
lowerCamelCase_ = [
self._normalize_coordinates(self.target_size , _A , _A )
for point, original_size in zip(_A , _A )
]
# check that all arrays have the same shape
if not all(point.shape == input_points[0].shape for point in input_points ):
if input_labels is not None:
lowerCamelCase_ , lowerCamelCase_ = self._pad_points_and_labels(_A , _A )
lowerCamelCase_ = np.array(_A )
if input_labels is not None:
lowerCamelCase_ = np.array(_A )
if input_boxes is not None:
if len(_A ) != len(_A ):
lowerCamelCase_ = [
self._normalize_coordinates(self.target_size , _A , original_sizes[0] , is_bounding_box=_A )
for box in input_boxes
]
else:
lowerCamelCase_ = [
self._normalize_coordinates(self.target_size , _A , _A , is_bounding_box=_A )
for box, original_size in zip(_A , _A )
]
lowerCamelCase_ = np.array(_A )
if input_boxes is not None:
if return_tensors == "pt":
lowerCamelCase_ = torch.from_numpy(_A )
# boxes batch size of 1 by default
lowerCamelCase_ = input_boxes.unsqueeze(1 ) if len(input_boxes.shape ) != 3 else input_boxes
elif return_tensors == "tf":
lowerCamelCase_ = tf.convert_to_tensor(_A )
# boxes batch size of 1 by default
lowerCamelCase_ = tf.expand_dims(_A , 1 ) if len(input_boxes.shape ) != 3 else input_boxes
encoding_image_processor.update({'input_boxes': input_boxes} )
if input_points is not None:
if return_tensors == "pt":
lowerCamelCase_ = torch.from_numpy(_A )
# point batch size of 1 by default
lowerCamelCase_ = input_points.unsqueeze(1 ) if len(input_points.shape ) != 4 else input_points
elif return_tensors == "tf":
lowerCamelCase_ = tf.convert_to_tensor(_A )
# point batch size of 1 by default
lowerCamelCase_ = tf.expand_dims(_A , 1 ) if len(input_points.shape ) != 4 else input_points
encoding_image_processor.update({'input_points': input_points} )
if input_labels is not None:
if return_tensors == "pt":
lowerCamelCase_ = torch.from_numpy(_A )
# point batch size of 1 by default
lowerCamelCase_ = input_labels.unsqueeze(1 ) if len(input_labels.shape ) != 3 else input_labels
elif return_tensors == "tf":
lowerCamelCase_ = tf.convert_to_tensor(_A )
# point batch size of 1 by default
lowerCamelCase_ = tf.expand_dims(_A , 1 ) if len(input_labels.shape ) != 3 else input_labels
encoding_image_processor.update({'input_labels': input_labels} )
return encoding_image_processor
def a__ ( self : Any , A_ : str , A_ : Dict ) -> int:
"""simple docstring"""
lowerCamelCase_ = max([point.shape[0] for point in input_points] )
lowerCamelCase_ = []
for i, point in enumerate(_A ):
if point.shape[0] != expected_nb_points:
lowerCamelCase_ = np.concatenate(
[point, np.zeros((expected_nb_points - point.shape[0], 2) ) + self.point_pad_value] , axis=0 )
lowerCamelCase_ = np.append(input_labels[i] , [self.point_pad_value] )
processed_input_points.append(_A )
lowerCamelCase_ = processed_input_points
return input_points, input_labels
def a__ ( self : List[Any] , A_ : int , A_ : np.ndarray , A_ : Any , A_ : Dict=False ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ , lowerCamelCase_ = original_size
lowerCamelCase_ , lowerCamelCase_ = self.image_processor._get_preprocess_shape(_A , longest_edge=_A )
lowerCamelCase_ = deepcopy(_A ).astype(_A )
if is_bounding_box:
lowerCamelCase_ = coords.reshape(-1 , 2 , 2 )
lowerCamelCase_ = coords[..., 0] * (new_w / old_w)
lowerCamelCase_ = coords[..., 1] * (new_h / old_h)
if is_bounding_box:
lowerCamelCase_ = coords.reshape(-1 , 4 )
return coords
def a__ ( self : Any , A_ : Optional[int]=None , A_ : List[str]=None , A_ : Optional[int]=None , ) -> Union[str, Any]:
"""simple docstring"""
if input_points is not None:
if hasattr(_A , 'numpy' ): # Checks for TF or Torch tensor
lowerCamelCase_ = input_points.numpy().tolist()
if not isinstance(_A , _A ) or not isinstance(input_points[0] , _A ):
raise ValueError('Input points must be a list of list of floating points.' )
lowerCamelCase_ = [np.array(_A ) for input_point in input_points]
else:
lowerCamelCase_ = None
if input_labels is not None:
if hasattr(_A , 'numpy' ):
lowerCamelCase_ = input_labels.numpy().tolist()
if not isinstance(_A , _A ) or not isinstance(input_labels[0] , _A ):
raise ValueError('Input labels must be a list of list integers.' )
lowerCamelCase_ = [np.array(_A ) for label in input_labels]
else:
lowerCamelCase_ = None
if input_boxes is not None:
if hasattr(_A , 'numpy' ):
lowerCamelCase_ = input_boxes.numpy().tolist()
if (
not isinstance(_A , _A )
or not isinstance(input_boxes[0] , _A )
or not isinstance(input_boxes[0][0] , _A )
):
raise ValueError('Input boxes must be a list of list of list of floating points.' )
lowerCamelCase_ = [np.array(_A ).astype(np.floataa ) for box in input_boxes]
else:
lowerCamelCase_ = None
return input_points, input_labels, input_boxes
@property
def a__ ( self : List[str] ) -> str:
"""simple docstring"""
lowerCamelCase_ = self.image_processor.model_input_names
return list(dict.fromkeys(_A ) )
def a__ ( self : List[Any] , *A_ : Union[str, Any] , **A_ : Optional[Any] ) -> int:
"""simple docstring"""
return self.image_processor.post_process_masks(*_A , **_A )
| 705
|
import collections
import inspect
import unittest
from transformers import FocalNetConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
)
from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class A:
'''simple docstring'''
def __init__( self : Optional[Any] , A_ : Union[str, Any] , A_ : str=13 , A_ : List[Any]=32 , A_ : Tuple=2 , A_ : Dict=3 , A_ : Union[str, Any]=16 , A_ : List[str]=[32, 64, 128] , A_ : Optional[Any]=[1, 2, 1] , A_ : Tuple=[2, 2, 4] , A_ : Dict=2 , A_ : Optional[Any]=2.0 , A_ : List[str]=True , A_ : Dict=0.0 , A_ : List[str]=0.0 , A_ : Optional[int]=0.1 , A_ : str="gelu" , A_ : Optional[Any]=False , A_ : Any=True , A_ : Optional[Any]=0.02 , A_ : Dict=1E-5 , A_ : int=True , A_ : Optional[int]=None , A_ : List[str]=True , A_ : Tuple=10 , A_ : Any=8 , A_ : Dict=["stage1", "stage2"] , A_ : Optional[Any]=[1, 2] , ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ = parent
lowerCamelCase_ = batch_size
lowerCamelCase_ = image_size
lowerCamelCase_ = patch_size
lowerCamelCase_ = num_channels
lowerCamelCase_ = embed_dim
lowerCamelCase_ = hidden_sizes
lowerCamelCase_ = depths
lowerCamelCase_ = num_heads
lowerCamelCase_ = window_size
lowerCamelCase_ = mlp_ratio
lowerCamelCase_ = qkv_bias
lowerCamelCase_ = hidden_dropout_prob
lowerCamelCase_ = attention_probs_dropout_prob
lowerCamelCase_ = drop_path_rate
lowerCamelCase_ = hidden_act
lowerCamelCase_ = use_absolute_embeddings
lowerCamelCase_ = patch_norm
lowerCamelCase_ = layer_norm_eps
lowerCamelCase_ = initializer_range
lowerCamelCase_ = is_training
lowerCamelCase_ = scope
lowerCamelCase_ = use_labels
lowerCamelCase_ = type_sequence_label_size
lowerCamelCase_ = encoder_stride
lowerCamelCase_ = out_features
lowerCamelCase_ = out_indices
def a__ ( self : List[str] ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase_ = None
if self.use_labels:
lowerCamelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase_ = self.get_config()
return config, pixel_values, labels
def a__ ( self : List[Any] ) -> Any:
"""simple docstring"""
return FocalNetConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , hidden_sizes=self.hidden_sizes , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def a__ ( self : Union[str, Any] , A_ : Dict , A_ : int , A_ : Optional[int] ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ = FocalNetModel(config=A_ )
model.to(A_ )
model.eval()
lowerCamelCase_ = model(A_ )
lowerCamelCase_ = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
lowerCamelCase_ = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def a__ ( self : Tuple , A_ : List[str] , A_ : Optional[int] , A_ : Optional[Any] ) -> Dict:
"""simple docstring"""
lowerCamelCase_ = FocalNetBackbone(config=A_ )
model.to(A_ )
model.eval()
lowerCamelCase_ = model(A_ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size, 8, 8] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[:-1] )
# verify backbone works with out_features=None
lowerCamelCase_ = None
lowerCamelCase_ = FocalNetBackbone(config=A_ )
model.to(A_ )
model.eval()
lowerCamelCase_ = model(A_ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size * 2, 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def a__ ( self : int , A_ : Optional[Any] , A_ : Optional[int] , A_ : Any ) -> Any:
"""simple docstring"""
lowerCamelCase_ = FocalNetForMaskedImageModeling(config=A_ )
model.to(A_ )
model.eval()
lowerCamelCase_ = model(A_ )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
lowerCamelCase_ = 1
lowerCamelCase_ = FocalNetForMaskedImageModeling(A_ )
model.to(A_ )
model.eval()
lowerCamelCase_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCamelCase_ = model(A_ )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def a__ ( self : Tuple , A_ : List[Any] , A_ : int , A_ : Dict ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = self.type_sequence_label_size
lowerCamelCase_ = FocalNetForImageClassification(A_ )
model.to(A_ )
model.eval()
lowerCamelCase_ = model(A_ , labels=A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowerCamelCase_ = 1
lowerCamelCase_ = FocalNetForImageClassification(A_ )
model.to(A_ )
model.eval()
lowerCamelCase_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCamelCase_ = model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def a__ ( self : int ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_ = self.prepare_config_and_inputs()
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = config_and_inputs
lowerCamelCase_ = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class A( UpperCamelCase , UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = (
(
FocalNetModel,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetBackbone,
)
if is_torch_available()
else ()
)
UpperCamelCase = (
{'''feature-extraction''': FocalNetModel, '''image-classification''': FocalNetForImageClassification}
if is_torch_available()
else {}
)
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
def a__ ( self : List[Any] ) -> Dict:
"""simple docstring"""
lowerCamelCase_ = FocalNetModelTester(self )
lowerCamelCase_ = ConfigTester(self , config_class=A_ , embed_dim=37 , has_text_modality=A_ )
def a__ ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def a__ ( self : Any ) -> Optional[int]:
"""simple docstring"""
return
def a__ ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A_ )
def a__ ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*A_ )
def a__ ( self : Dict ) -> int:
"""simple docstring"""
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*A_ )
def a__ ( self : List[str] ) -> Any:
"""simple docstring"""
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A_ )
@unittest.skip(reason='FocalNet does not use inputs_embeds' )
def a__ ( self : int ) -> int:
"""simple docstring"""
pass
@unittest.skip(reason='FocalNet does not use feedforward chunking' )
def a__ ( self : Tuple ) -> List[str]:
"""simple docstring"""
pass
def a__ ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
lowerCamelCase_ = model_class(A_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowerCamelCase_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(A_ , nn.Linear ) )
def a__ ( self : Any ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
lowerCamelCase_ = model_class(A_ )
lowerCamelCase_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase_ = [*signature.parameters.keys()]
lowerCamelCase_ = ['pixel_values']
self.assertListEqual(arg_names[:1] , A_ )
def a__ ( self : int , A_ : List[Any] , A_ : int , A_ : Dict , A_ : Dict ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ = model_class(A_ )
model.to(A_ )
model.eval()
with torch.no_grad():
lowerCamelCase_ = model(**self._prepare_for_class(A_ , A_ ) )
lowerCamelCase_ = outputs.hidden_states
lowerCamelCase_ = getattr(
self.model_tester , 'expected_num_hidden_layers' , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(A_ ) , A_ )
# FocalNet has a different seq_length
lowerCamelCase_ = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowerCamelCase_ = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
lowerCamelCase_ = outputs.reshaped_hidden_states
self.assertEqual(len(A_ ) , A_ )
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = reshaped_hidden_states[0].shape
lowerCamelCase_ = (
reshaped_hidden_states[0].view(A_ , A_ , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def a__ ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase_ = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes[:-1]:
lowerCamelCase_ = True
self.check_hidden_states_output(A_ , A_ , A_ , A_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase_ = True
self.check_hidden_states_output(A_ , A_ , A_ , A_ )
def a__ ( self : List[str] ) -> Dict:
"""simple docstring"""
lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase_ = 3
lowerCamelCase_ = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
lowerCamelCase_ = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowerCamelCase_ = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
lowerCamelCase_ = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes[:-1]:
lowerCamelCase_ = True
self.check_hidden_states_output(A_ , A_ , A_ , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase_ = True
self.check_hidden_states_output(A_ , A_ , A_ , (padded_height, padded_width) )
@slow
def a__ ( self : str ) -> Optional[Any]:
"""simple docstring"""
for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ = FocalNetModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
def a__ ( self : List[Any] ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase_ = _config_zero_init(A_ )
for model_class in self.all_model_classes:
lowerCamelCase_ = model_class(config=A_ )
for name, param in model.named_parameters():
if "embeddings" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@require_vision
@require_torch
class A( unittest.TestCase ):
'''simple docstring'''
@cached_property
def a__ ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
return AutoImageProcessor.from_pretrained('microsoft/focalnet-tiny' ) if is_vision_available() else None
@slow
def a__ ( self : Tuple ) -> Any:
"""simple docstring"""
lowerCamelCase_ = FocalNetForImageClassification.from_pretrained('microsoft/focalnet-tiny' ).to(A_ )
lowerCamelCase_ = self.default_image_processor
lowerCamelCase_ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
lowerCamelCase_ = image_processor(images=A_ , return_tensors='pt' ).to(A_ )
# forward pass
with torch.no_grad():
lowerCamelCase_ = model(**A_ )
# verify the logits
lowerCamelCase_ = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , A_ )
lowerCamelCase_ = torch.tensor([0.2166, -0.4368, 0.2191] ).to(A_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , A_ , atol=1E-4 ) )
self.assertTrue(outputs.logits.argmax(dim=-1 ).item() , 281 )
@require_torch
class A( UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = (FocalNetBackbone,) if is_torch_available() else ()
UpperCamelCase = FocalNetConfig
UpperCamelCase = False
def a__ ( self : List[str] ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ = FocalNetModelTester(self )
| 651
| 0
|
from PIL import Image
def UpperCAmelCase ( a_ , a_ ) -> Image:
"""simple docstring"""
def brightness(a_ ) -> float:
return 1_2_8 + level + (c - 1_2_8)
if not -255.0 <= level <= 255.0:
raise ValueError("level must be between -255.0 (black) and 255.0 (white)" )
return img.point(a_ )
if __name__ == "__main__":
# Load image
with Image.open('image_data/lena.jpg') as img:
# Change brightness to 100
SCREAMING_SNAKE_CASE :Union[str, Any] = change_brightness(img, 100)
brigt_img.save('image_data/lena_brightness.png', format='png')
| 55
|
import pickle
import unittest
import torch
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils import require_cpu
@require_cpu
class _a ( unittest.TestCase ):
"""simple docstring"""
def __A ( self : List[str] ):
A_ = torch.nn.Linear(10 , 10 )
A_ = torch.optim.SGD(model.parameters() , 0.1 )
A_ = Accelerator()
A_ = accelerator.prepare(UpperCAmelCase )
try:
pickle.loads(pickle.dumps(UpperCAmelCase ) )
except Exception as e:
self.fail(f'''Accelerated optimizer pickling failed with {e}''' )
AcceleratorState._reset_state()
| 86
| 0
|
from typing import List
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ : Dict = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : Tuple = {
"snap-research/efficientformer-l1-300": (
"https://huggingface.co/snap-research/efficientformer-l1-300/resolve/main/config.json"
),
}
class __lowerCAmelCase( __lowerCAmelCase ):
__snake_case : str = "efficientformer"
def __init__( self : Any , SCREAMING_SNAKE_CASE : List[int] = [3, 2, 6, 4] , SCREAMING_SNAKE_CASE : List[int] = [48, 96, 224, 448] , SCREAMING_SNAKE_CASE : List[bool] = [True, True, True, True] , SCREAMING_SNAKE_CASE : int = 448 , SCREAMING_SNAKE_CASE : int = 32 , SCREAMING_SNAKE_CASE : int = 4 , SCREAMING_SNAKE_CASE : int = 7 , SCREAMING_SNAKE_CASE : int = 5 , SCREAMING_SNAKE_CASE : int = 8 , SCREAMING_SNAKE_CASE : int = 4 , SCREAMING_SNAKE_CASE : float = 0.0 , SCREAMING_SNAKE_CASE : int = 16 , SCREAMING_SNAKE_CASE : int = 3 , SCREAMING_SNAKE_CASE : int = 3 , SCREAMING_SNAKE_CASE : int = 3 , SCREAMING_SNAKE_CASE : int = 2 , SCREAMING_SNAKE_CASE : int = 1 , SCREAMING_SNAKE_CASE : float = 0.0 , SCREAMING_SNAKE_CASE : int = 1 , SCREAMING_SNAKE_CASE : bool = True , SCREAMING_SNAKE_CASE : bool = True , SCREAMING_SNAKE_CASE : float = 1E-5 , SCREAMING_SNAKE_CASE : str = "gelu" , SCREAMING_SNAKE_CASE : float = 0.02 , SCREAMING_SNAKE_CASE : float = 1E-12 , SCREAMING_SNAKE_CASE : int = 224 , SCREAMING_SNAKE_CASE : float = 1E-05 , **SCREAMING_SNAKE_CASE : str , ):
"""simple docstring"""
super().__init__(**lowerCamelCase__ )
SCREAMING_SNAKE_CASE_ :List[Any] = hidden_act
SCREAMING_SNAKE_CASE_ :List[str] = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ :Optional[Any] = hidden_sizes
SCREAMING_SNAKE_CASE_ :Optional[int] = num_hidden_layers
SCREAMING_SNAKE_CASE_ :Dict = num_attention_heads
SCREAMING_SNAKE_CASE_ :Any = initializer_range
SCREAMING_SNAKE_CASE_ :List[Any] = layer_norm_eps
SCREAMING_SNAKE_CASE_ :Tuple = patch_size
SCREAMING_SNAKE_CASE_ :Union[str, Any] = num_channels
SCREAMING_SNAKE_CASE_ :List[Any] = depths
SCREAMING_SNAKE_CASE_ :List[Any] = mlp_expansion_ratio
SCREAMING_SNAKE_CASE_ :Union[str, Any] = downsamples
SCREAMING_SNAKE_CASE_ :Tuple = dim
SCREAMING_SNAKE_CASE_ :Union[str, Any] = key_dim
SCREAMING_SNAKE_CASE_ :Optional[int] = attention_ratio
SCREAMING_SNAKE_CASE_ :List[Any] = resolution
SCREAMING_SNAKE_CASE_ :Union[str, Any] = pool_size
SCREAMING_SNAKE_CASE_ :List[Any] = downsample_patch_size
SCREAMING_SNAKE_CASE_ :Union[str, Any] = downsample_stride
SCREAMING_SNAKE_CASE_ :List[str] = downsample_pad
SCREAMING_SNAKE_CASE_ :List[Any] = drop_path_rate
SCREAMING_SNAKE_CASE_ :List[str] = num_metaad_blocks
SCREAMING_SNAKE_CASE_ :Union[str, Any] = distillation
SCREAMING_SNAKE_CASE_ :str = use_layer_scale
SCREAMING_SNAKE_CASE_ :Union[str, Any] = layer_scale_init_value
SCREAMING_SNAKE_CASE_ :Union[str, Any] = image_size
SCREAMING_SNAKE_CASE_ :int = batch_norm_eps
| 718
|
'''simple docstring'''
from __future__ import annotations
import os
from typing import Any
import requests
SCREAMING_SNAKE_CASE__ : Any = """https://api.github.com"""
# https://docs.github.com/en/free-pro-team@latest/rest/reference/users#get-the-authenticated-user
SCREAMING_SNAKE_CASE__ : int = BASE_URL + """/user"""
# https://github.com/settings/tokens
SCREAMING_SNAKE_CASE__ : Union[str, Any] = os.environ.get("""USER_TOKEN""", """""")
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE_ :Dict = {
'Authorization': F'token {auth_token}',
'Accept': 'application/vnd.github.v3+json',
}
return requests.get(SCREAMING_SNAKE_CASE , headers=SCREAMING_SNAKE_CASE ).json()
if __name__ == "__main__": # pragma: no cover
if USER_TOKEN:
for key, value in fetch_github_info(USER_TOKEN).items():
print(f"""{key}: {value}""")
else:
raise ValueError("""'USER_TOKEN' field cannot be empty.""")
| 233
| 0
|
"""simple docstring"""
import argparse
import importlib
from pathlib import Path
# Test all the extensions added in the setup
lowercase_ = [
"kernels/rwkv/wkv_cuda.cu",
"kernels/rwkv/wkv_op.cpp",
"kernels/deformable_detr/ms_deform_attn.h",
"kernels/deformable_detr/cuda/ms_deform_im2col_cuda.cuh",
"models/graphormer/algos_graphormer.pyx",
]
def A_ ( lowercase ) -> int:
"""simple docstring"""
for file in FILES_TO_FIND:
if not (transformers_path / file).exists():
return False
return True
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
parser.add_argument("--check_lib", action="store_true", help="Whether to check the build or the actual package.")
lowercase_ = parser.parse_args()
if args.check_lib:
lowercase_ = importlib.import_module("transformers")
lowercase_ = Path(transformers_module.__file__).parent
else:
lowercase_ = Path.cwd() / "build/lib/transformers"
if not test_custom_files_are_present(transformers_path):
raise ValueError("The built release does not contain the custom files. Fix this before going further!")
| 470
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
'''tanreinama/GPTSAN-2.8B-spout_is_uniform''': (
'''https://huggingface.co/tanreinama/GPTSAN-2.8B-spout_is_uniform/resolve/main/config.json'''
),
}
class _SCREAMING_SNAKE_CASE ( UpperCAmelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[Any] = "gptsan-japanese"
SCREAMING_SNAKE_CASE_: Any = [
"past_key_values",
]
SCREAMING_SNAKE_CASE_: List[Any] = {
"hidden_size": "d_model",
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self : int , UpperCAmelCase_ : Dict=36_000 , UpperCAmelCase_ : Any=1_280 , UpperCAmelCase_ : Optional[int]=1_024 , UpperCAmelCase_ : str=8_192 , UpperCAmelCase_ : int=4_096 , UpperCAmelCase_ : Optional[int]=128 , UpperCAmelCase_ : int=10 , UpperCAmelCase_ : List[Any]=0 , UpperCAmelCase_ : Any=16 , UpperCAmelCase_ : int=16 , UpperCAmelCase_ : str=128 , UpperCAmelCase_ : List[str]=0.0 , UpperCAmelCase_ : Tuple=1E-5 , UpperCAmelCase_ : List[str]=False , UpperCAmelCase_ : List[str]=0.0 , UpperCAmelCase_ : Union[str, Any]="float32" , UpperCAmelCase_ : str=False , UpperCAmelCase_ : Optional[Any]=False , UpperCAmelCase_ : Tuple=False , UpperCAmelCase_ : Dict=0.002 , UpperCAmelCase_ : Optional[Any]=False , UpperCAmelCase_ : List[Any]=True , UpperCAmelCase_ : str=35_998 , UpperCAmelCase_ : str=35_995 , UpperCAmelCase_ : Optional[int]=35_999 , **UpperCAmelCase_ : int , ) -> int:
"""simple docstring"""
_lowerCAmelCase = vocab_size
_lowerCAmelCase = max_position_embeddings
_lowerCAmelCase = d_model
_lowerCAmelCase = d_ff
_lowerCAmelCase = d_ext
_lowerCAmelCase = d_spout
_lowerCAmelCase = num_switch_layers
_lowerCAmelCase = num_ext_layers
_lowerCAmelCase = num_switch_layers + num_ext_layers
_lowerCAmelCase = num_heads
_lowerCAmelCase = num_experts
_lowerCAmelCase = expert_capacity
_lowerCAmelCase = dropout_rate
_lowerCAmelCase = layer_norm_epsilon
_lowerCAmelCase = router_bias
_lowerCAmelCase = router_jitter_noise
_lowerCAmelCase = router_dtype
_lowerCAmelCase = router_ignore_padding_tokens
_lowerCAmelCase = output_hidden_states
_lowerCAmelCase = output_attentions
_lowerCAmelCase = initializer_factor
_lowerCAmelCase = output_router_logits
_lowerCAmelCase = use_cache
super().__init__(
separator_token_id=UpperCAmelCase_ , pad_token_id=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_ , **UpperCAmelCase_ , )
| 580
| 0
|
'''simple docstring'''
import unittest
import torch
from torch import nn
from diffusers.models.activations import get_activation
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
def lowerCAmelCase ( self ):
__UpperCamelCase : Any = get_activation('swish' )
self.assertIsInstance(_lowerCamelCase , nn.SiLU )
self.assertEqual(act(torch.tensor(-1_0_0 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(2_0 , dtype=torch.floataa ) ).item() , 2_0 )
def lowerCAmelCase ( self ):
__UpperCamelCase : List[Any] = get_activation('silu' )
self.assertIsInstance(_lowerCamelCase , nn.SiLU )
self.assertEqual(act(torch.tensor(-1_0_0 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(2_0 , dtype=torch.floataa ) ).item() , 2_0 )
def lowerCAmelCase ( self ):
__UpperCamelCase : Tuple = get_activation('mish' )
self.assertIsInstance(_lowerCamelCase , nn.Mish )
self.assertEqual(act(torch.tensor(-2_0_0 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(2_0 , dtype=torch.floataa ) ).item() , 2_0 )
def lowerCAmelCase ( self ):
__UpperCamelCase : Dict = get_activation('gelu' )
self.assertIsInstance(_lowerCamelCase , nn.GELU )
self.assertEqual(act(torch.tensor(-1_0_0 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(2_0 , dtype=torch.floataa ) ).item() , 2_0 )
| 287
|
'''simple docstring'''
import os
import unittest
from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast
from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __lowercase ( _lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = LayoutLMTokenizer
SCREAMING_SNAKE_CASE__ = LayoutLMTokenizerFast
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = True
def lowerCAmelCase ( self ):
super().setUp()
__UpperCamelCase : List[str] = [
'[UNK]',
'[CLS]',
'[SEP]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
__UpperCamelCase : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def lowerCAmelCase ( self , **_lowerCamelCase ):
return LayoutLMTokenizer.from_pretrained(self.tmpdirname , **_lowerCamelCase )
def lowerCAmelCase ( self , _lowerCamelCase ):
__UpperCamelCase : Dict = 'UNwant\u00E9d,running'
__UpperCamelCase : Union[str, Any] = 'unwanted, running'
return input_text, output_text
def lowerCAmelCase ( self ):
__UpperCamelCase : Dict = self.tokenizer_class(self.vocab_file )
__UpperCamelCase : Optional[Any] = tokenizer.tokenize('UNwant\u00E9d,running' )
self.assertListEqual(_lowerCamelCase , ['un', '##want', '##ed', ',', 'runn', '##ing'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowerCamelCase ) , [7, 4, 5, 1_0, 8, 9] )
def lowerCAmelCase ( self ):
pass
| 287
| 1
|
"""simple docstring"""
from typing import Callable, Optional
from .. import Features
from ..packaged_modules.generator.generator import Generator
from .abc import AbstractDatasetInputStream
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
def __init__( self : Optional[int] , __lowerCamelCase : Callable , __lowerCamelCase : Optional[Features] = None , __lowerCamelCase : str = None , __lowerCamelCase : bool = False , __lowerCamelCase : bool = False , __lowerCamelCase : Optional[dict] = None , __lowerCamelCase : Optional[int] = None , **__lowerCamelCase : Union[str, Any] , ):
"""simple docstring"""
super().__init__(
features=__lowerCamelCase , cache_dir=__lowerCamelCase , keep_in_memory=__lowerCamelCase , streaming=__lowerCamelCase , num_proc=__lowerCamelCase , **__lowerCamelCase , )
_snake_case = Generator(
cache_dir=__lowerCamelCase , features=__lowerCamelCase , generator=__lowerCamelCase , gen_kwargs=__lowerCamelCase , **__lowerCamelCase , )
def __UpperCAmelCase ( self : Optional[int] ):
"""simple docstring"""
# Build iterable dataset
if self.streaming:
_snake_case = self.builder.as_streaming_dataset(split='''train''' )
# Build regular (map-style) dataset
else:
_snake_case = None
_snake_case = None
_snake_case = None
_snake_case = None
self.builder.download_and_prepare(
download_config=__lowerCamelCase , download_mode=__lowerCamelCase , verification_mode=__lowerCamelCase , base_path=__lowerCamelCase , num_proc=self.num_proc , )
_snake_case = self.builder.as_dataset(
split='''train''' , verification_mode=__lowerCamelCase , in_memory=self.keep_in_memory )
return dataset
| 103
|
def lowerCamelCase ( ) -> List[Any]:
for n in range(1 , 1_000_000 ):
yield n * (n + 1) // 2
def lowerCamelCase ( a_ ) -> str:
lowerCAmelCase_ = 1
lowerCAmelCase_ = 2
while i * i <= n:
lowerCAmelCase_ = 0
while n % i == 0:
n //= i
multiplicity += 1
divisors_count *= multiplicity + 1
i += 1
if n > 1:
divisors_count *= 2
return divisors_count
def lowerCamelCase ( ) -> Optional[Any]:
return next(i for i in triangle_number_generator() if count_divisors(a_ ) > 500 )
if __name__ == "__main__":
print(solution())
| 318
| 0
|
import json
import os
import shutil
import tempfile
import unittest
from multiprocessing import get_context
from pathlib import Path
import datasets
import numpy as np
from datasets import load_dataset
from parameterized import parameterized
from transformers import AutoProcessor
from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available
from ..wavaveca.test_feature_extraction_wavaveca import floats_list
if is_pyctcdecode_available():
from huggingface_hub import snapshot_download
from pyctcdecode import BeamSearchDecoderCTC
from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM
from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput
if is_torch_available():
from transformers import WavaVecaForCTC
@require_pyctcdecode
class lowerCamelCase ( unittest.TestCase ):
def UpperCAmelCase(self : Tuple ) -> str:
snake_case = "| <pad> <unk> <s> </s> a b c d e f g h i j k".split()
snake_case = dict(zip(_A , range(len(_A ) ) ) )
snake_case = {
"unk_token": "<unk>",
"bos_token": "<s>",
"eos_token": "</s>",
}
snake_case = {
"feature_size": 1,
"padding_value": 0.0,
"sampling_rate": 1_6_0_0_0,
"return_attention_mask": False,
"do_normalize": True,
}
snake_case = tempfile.mkdtemp()
snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
snake_case = os.path.join(self.tmpdirname , _A )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(_A ) + "\n" )
with open(self.feature_extraction_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(_A ) + "\n" )
# load decoder from hub
snake_case = "hf-internal-testing/ngram-beam-search-decoder"
def UpperCAmelCase(self : List[str] , **_A : List[str] ) -> Tuple:
snake_case = self.add_kwargs_tokens_map.copy()
kwargs.update(_A )
return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname , **_A )
def UpperCAmelCase(self : Optional[Any] , **_A : int ) -> List[Any]:
return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname , **_A )
def UpperCAmelCase(self : int , **_A : str ) -> Any:
return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name , **_A )
def UpperCAmelCase(self : Tuple ) -> Tuple:
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase(self : List[str] ) -> Optional[int]:
snake_case = self.get_tokenizer()
snake_case = self.get_feature_extractor()
snake_case = self.get_decoder()
snake_case = WavaVecaProcessorWithLM(tokenizer=_A , feature_extractor=_A , decoder=_A )
processor.save_pretrained(self.tmpdirname )
snake_case = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname )
# tokenizer
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , _A )
# feature extractor
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , _A )
# decoder
self.assertEqual(processor.decoder._alphabet.labels , decoder._alphabet.labels )
self.assertEqual(
processor.decoder.model_container[decoder._model_key]._unigram_set , decoder.model_container[decoder._model_key]._unigram_set , )
self.assertIsInstance(processor.decoder , _A )
def UpperCAmelCase(self : List[Any] ) -> Optional[Any]:
snake_case = WavaVecaProcessorWithLM(
tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
processor.save_pretrained(self.tmpdirname )
# make sure that error is thrown when decoder alphabet doesn't match
snake_case = WavaVecaProcessorWithLM.from_pretrained(
self.tmpdirname , alpha=5.0 , beta=3.0 , score_boundary=-7.0 , unk_score_offset=3 )
# decoder
self.assertEqual(processor.language_model.alpha , 5.0 )
self.assertEqual(processor.language_model.beta , 3.0 )
self.assertEqual(processor.language_model.score_boundary , -7.0 )
self.assertEqual(processor.language_model.unk_score_offset , 3 )
def UpperCAmelCase(self : Optional[Any] ) -> Dict:
snake_case = self.get_tokenizer()
# add token to trigger raise
tokenizer.add_tokens(["xx"] )
with self.assertRaisesRegex(_A , "include" ):
WavaVecaProcessorWithLM(
tokenizer=_A , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
def UpperCAmelCase(self : Optional[Any] ) -> Optional[int]:
snake_case = self.get_feature_extractor()
snake_case = self.get_tokenizer()
snake_case = self.get_decoder()
snake_case = WavaVecaProcessorWithLM(tokenizer=_A , feature_extractor=_A , decoder=_A )
snake_case = floats_list((3, 1_0_0_0) )
snake_case = feature_extractor(_A , return_tensors="np" )
snake_case = processor(_A , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def UpperCAmelCase(self : List[Any] ) -> Optional[int]:
snake_case = self.get_feature_extractor()
snake_case = self.get_tokenizer()
snake_case = self.get_decoder()
snake_case = WavaVecaProcessorWithLM(tokenizer=_A , feature_extractor=_A , decoder=_A )
snake_case = "This is a test string"
snake_case = processor(text=_A )
snake_case = tokenizer(_A )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def UpperCAmelCase(self : Optional[Any] , _A : Any=(2, 1_0, 1_6) , _A : List[Any]=7_7 ) -> Optional[Any]:
np.random.seed(_A )
return np.random.rand(*_A )
def UpperCAmelCase(self : Any ) -> str:
snake_case = self.get_feature_extractor()
snake_case = self.get_tokenizer()
snake_case = self.get_decoder()
snake_case = WavaVecaProcessorWithLM(tokenizer=_A , feature_extractor=_A , decoder=_A )
snake_case = self._get_dummy_logits(shape=(1_0, 1_6) , seed=1_3 )
snake_case = processor.decode(_A )
snake_case = decoder.decode_beams(_A )[0]
self.assertEqual(decoded_decoder[0] , decoded_processor.text )
self.assertEqual("</s> <s> </s>" , decoded_processor.text )
self.assertEqual(decoded_decoder[-2] , decoded_processor.logit_score )
self.assertEqual(decoded_decoder[-1] , decoded_processor.lm_score )
@parameterized.expand([[None], ["fork"], ["spawn"]] )
def UpperCAmelCase(self : str , _A : str ) -> List[Any]:
snake_case = self.get_feature_extractor()
snake_case = self.get_tokenizer()
snake_case = self.get_decoder()
snake_case = WavaVecaProcessorWithLM(tokenizer=_A , feature_extractor=_A , decoder=_A )
snake_case = self._get_dummy_logits()
# note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM.
# otherwise, the LM won't be available to the pool's sub-processes.
# manual logic used to allow parameterized test for both pool=None and pool=Pool(...)
if pool_context is None:
snake_case = processor.batch_decode(_A )
else:
with get_context(_A ).Pool() as pool:
snake_case = processor.batch_decode(_A , _A )
snake_case = list(_A )
with get_context("fork" ).Pool() as p:
snake_case = decoder.decode_beams_batch(_A , _A )
snake_case , snake_case , snake_case = [], [], []
for beams in decoded_beams:
texts_decoder.append(beams[0][0] )
logit_scores_decoder.append(beams[0][-2] )
lm_scores_decoder.append(beams[0][-1] )
self.assertListEqual(_A , decoded_processor.text )
self.assertListEqual(["<s> <s> </s>", "<s> <s> <s>"] , decoded_processor.text )
self.assertListEqual(_A , decoded_processor.logit_score )
self.assertListEqual(_A , decoded_processor.lm_score )
def UpperCAmelCase(self : List[str] ) -> List[str]:
snake_case = self.get_feature_extractor()
snake_case = self.get_tokenizer()
snake_case = self.get_decoder()
snake_case = WavaVecaProcessorWithLM(tokenizer=_A , feature_extractor=_A , decoder=_A )
snake_case = self._get_dummy_logits()
snake_case = 1_5
snake_case = -20.0
snake_case = -4.0
snake_case = processor.batch_decode(
_A , beam_width=_A , beam_prune_logp=_A , token_min_logp=_A , )
snake_case = decoded_processor_out.text
snake_case = list(_A )
with get_context("fork" ).Pool() as pool:
snake_case = decoder.decode_beams_batch(
_A , _A , beam_width=_A , beam_prune_logp=_A , token_min_logp=_A , )
snake_case = [d[0][0] for d in decoded_decoder_out]
snake_case = [d[0][2] for d in decoded_decoder_out]
snake_case = [d[0][3] for d in decoded_decoder_out]
self.assertListEqual(_A , _A )
self.assertListEqual(["</s> <s> <s>", "<s> <s> <s>"] , _A )
self.assertTrue(np.array_equal(_A , decoded_processor_out.logit_score ) )
self.assertTrue(np.allclose([-20.0_54, -18.4_47] , _A , atol=1E-3 ) )
self.assertTrue(np.array_equal(_A , decoded_processor_out.lm_score ) )
self.assertTrue(np.allclose([-15.5_54, -13.94_74] , _A , atol=1E-3 ) )
def UpperCAmelCase(self : Optional[int] ) -> Any:
snake_case = self.get_feature_extractor()
snake_case = self.get_tokenizer()
snake_case = self.get_decoder()
snake_case = WavaVecaProcessorWithLM(tokenizer=_A , feature_extractor=_A , decoder=_A )
snake_case = self._get_dummy_logits()
snake_case = 2.0
snake_case = 5.0
snake_case = -20.0
snake_case = True
snake_case = processor.batch_decode(
_A , alpha=_A , beta=_A , unk_score_offset=_A , lm_score_boundary=_A , )
snake_case = decoded_processor_out.text
snake_case = list(_A )
decoder.reset_params(
alpha=_A , beta=_A , unk_score_offset=_A , lm_score_boundary=_A , )
with get_context("fork" ).Pool() as pool:
snake_case = decoder.decode_beams_batch(
_A , _A , )
snake_case = [d[0][0] for d in decoded_decoder_out]
self.assertListEqual(_A , _A )
self.assertListEqual(["<s> </s> <s> </s> </s>", "</s> </s> <s> </s> </s>"] , _A )
snake_case = processor.decoder.model_container[processor.decoder._model_key]
self.assertEqual(lm_model.alpha , 2.0 )
self.assertEqual(lm_model.beta , 5.0 )
self.assertEqual(lm_model.unk_score_offset , -20.0 )
self.assertEqual(lm_model.score_boundary , _A )
def UpperCAmelCase(self : Optional[Any] ) -> Any:
snake_case = WavaVecaProcessorWithLM.from_pretrained("hf-internal-testing/processor_with_lm" )
snake_case = processor.decoder.model_container[processor.decoder._model_key]
snake_case = Path(language_model._kenlm_model.path.decode("utf-8" ) ).parent.parent.absolute()
snake_case = os.listdir(_A )
snake_case = ["alphabet.json", "language_model"]
downloaded_decoder_files.sort()
expected_decoder_files.sort()
# test that only decoder relevant files from
# https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main
# are downloaded and none of the rest (e.g. README.md, ...)
self.assertListEqual(_A , _A )
def UpperCAmelCase(self : List[Any] ) -> str:
snake_case = snapshot_download("hf-internal-testing/processor_with_lm" )
snake_case = WavaVecaProcessorWithLM.from_pretrained(_A )
snake_case = processor.decoder.model_container[processor.decoder._model_key]
snake_case = Path(language_model._kenlm_model.path.decode("utf-8" ) ).parent.parent.absolute()
snake_case = os.listdir(_A )
snake_case = os.listdir(_A )
local_decoder_files.sort()
expected_decoder_files.sort()
# test that both decoder form hub and local files in cache are the same
self.assertListEqual(_A , _A )
def UpperCAmelCase(self : List[str] ) -> Optional[int]:
snake_case = WavaVecaProcessorWithLM.from_pretrained("hf-internal-testing/processor_with_lm" )
snake_case = AutoProcessor.from_pretrained("hf-internal-testing/processor_with_lm" )
snake_case = floats_list((3, 1_0_0_0) )
snake_case = processor_wavaveca(_A , return_tensors="np" )
snake_case = processor_auto(_A , return_tensors="np" )
for key in input_wavaveca.keys():
self.assertAlmostEqual(input_wavaveca[key].sum() , input_auto[key].sum() , delta=1E-2 )
snake_case = self._get_dummy_logits()
snake_case = processor_wavaveca.batch_decode(_A )
snake_case = processor_auto.batch_decode(_A )
self.assertListEqual(decoded_wavaveca.text , decoded_auto.text )
def UpperCAmelCase(self : Tuple ) -> List[Any]:
snake_case = self.get_feature_extractor()
snake_case = self.get_tokenizer()
snake_case = self.get_decoder()
snake_case = WavaVecaProcessorWithLM(tokenizer=_A , feature_extractor=_A , decoder=_A )
self.assertListEqual(
processor.model_input_names , feature_extractor.model_input_names , msg="`processor` and `feature_extractor` model input names do not match" , )
@staticmethod
def UpperCAmelCase(_A : List[Any] , _A : Union[str, Any] ) -> Dict:
snake_case = [d[key] for d in offsets]
return retrieved_list
def UpperCAmelCase(self : Optional[int] ) -> List[Any]:
snake_case = WavaVecaProcessorWithLM.from_pretrained("hf-internal-testing/processor_with_lm" )
snake_case = self._get_dummy_logits()[0]
snake_case = processor.decode(_A , output_word_offsets=_A )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue("text" in outputs )
self.assertTrue("word_offsets" in outputs )
self.assertTrue(isinstance(_A , _A ) )
self.assertEqual(" ".join(self.get_from_offsets(outputs["word_offsets"] , "word" ) ) , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs["word_offsets"] , "word" ) , ["<s>", "<s>", "</s>"] )
self.assertListEqual(self.get_from_offsets(outputs["word_offsets"] , "start_offset" ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs["word_offsets"] , "end_offset" ) , [1, 3, 5] )
def UpperCAmelCase(self : Tuple ) -> Optional[int]:
snake_case = WavaVecaProcessorWithLM.from_pretrained("hf-internal-testing/processor_with_lm" )
snake_case = self._get_dummy_logits()
snake_case = processor.batch_decode(_A , output_word_offsets=_A )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue("text" in outputs )
self.assertTrue("word_offsets" in outputs )
self.assertTrue(isinstance(_A , _A ) )
self.assertListEqual(
[" ".join(self.get_from_offsets(_A , "word" ) ) for o in outputs["word_offsets"]] , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs["word_offsets"][0] , "word" ) , ["<s>", "<s>", "</s>"] )
self.assertListEqual(self.get_from_offsets(outputs["word_offsets"][0] , "start_offset" ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs["word_offsets"][0] , "end_offset" ) , [1, 3, 5] )
@slow
@require_torch
@require_torchaudio
def UpperCAmelCase(self : Tuple ) -> Optional[Any]:
import torch
snake_case = load_dataset("common_voice" , "en" , split="train" , streaming=_A )
snake_case = ds.cast_column("audio" , datasets.Audio(sampling_rate=1_6_0_0_0 ) )
snake_case = iter(_A )
snake_case = next(_A )
snake_case = AutoProcessor.from_pretrained("patrickvonplaten/wav2vec2-base-100h-with-lm" )
snake_case = WavaVecaForCTC.from_pretrained("patrickvonplaten/wav2vec2-base-100h-with-lm" )
# compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train
snake_case = processor(sample["audio"]["array"] , return_tensors="pt" ).input_values
with torch.no_grad():
snake_case = model(_A ).logits.cpu().numpy()
snake_case = processor.decode(logits[0] , output_word_offsets=_A )
snake_case = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate
snake_case = [
{
"start_time": d["start_offset"] * time_offset,
"end_time": d["end_offset"] * time_offset,
"word": d["word"],
}
for d in output["word_offsets"]
]
snake_case = "WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL"
# output words
self.assertEqual(" ".join(self.get_from_offsets(_A , "word" ) ) , _A )
self.assertEqual(" ".join(self.get_from_offsets(_A , "word" ) ) , output.text )
# output times
snake_case = torch.tensor(self.get_from_offsets(_A , "start_time" ) )
snake_case = torch.tensor(self.get_from_offsets(_A , "end_time" ) )
# fmt: off
snake_case = torch.tensor([1.41_99, 1.65_99, 2.25_99, 3.0, 3.24, 3.59_99, 3.79_99, 4.09_99, 4.26, 4.94, 5.28, 5.65_99, 5.78, 5.94, 6.32, 6.53_99, 6.65_99] )
snake_case = torch.tensor([1.53_99, 1.89_99, 2.9, 3.16, 3.53_99, 3.72, 4.01_99, 4.17_99, 4.76, 5.15_99, 5.55_99, 5.69_99, 5.86, 6.19_99, 6.38, 6.61_99, 6.94] )
# fmt: on
self.assertTrue(torch.allclose(_A , _A , atol=0.01 ) )
self.assertTrue(torch.allclose(_A , _A , atol=0.01 ) )
| 294
|
def lowercase_ ( A__ = 1000 ) -> int:
"""simple docstring"""
snake_case = -1
snake_case = 0
for a in range(1 , n // 3 ):
# Solving the two equations a**2+b**2=c**2 and a+b+c=N eliminating c
snake_case = (n * n - 2 * a * n) // (2 * n - 2 * a)
snake_case = n - a - b
if c * c == (a * a + b * b):
snake_case = a * b * c
if candidate >= product:
snake_case = candidate
return product
if __name__ == "__main__":
print(f"{solution() = }")
| 294
| 1
|
import pytest
from datasets.utils.sharding import _distribute_shards, _number_of_shards_in_gen_kwargs, _split_gen_kwargs
@pytest.mark.parametrize(
'kwargs, expected' , [
({'num_shards': 0, 'max_num_jobs': 1}, []),
({'num_shards': 10, 'max_num_jobs': 1}, [range(10 )]),
({'num_shards': 10, 'max_num_jobs': 10}, [range(__SCREAMING_SNAKE_CASE , i + 1 ) for i in range(10 )]),
({'num_shards': 1, 'max_num_jobs': 10}, [range(1 )]),
({'num_shards': 10, 'max_num_jobs': 3}, [range(0 , 4 ), range(4 , 7 ), range(7 , 10 )]),
({'num_shards': 3, 'max_num_jobs': 10}, [range(0 , 1 ), range(1 , 2 ), range(2 , 3 )]),
] , )
def _lowercase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Tuple:
UpperCamelCase__ : Any = _distribute_shards(**__SCREAMING_SNAKE_CASE )
assert out == expected
@pytest.mark.parametrize(
'gen_kwargs, max_num_jobs, expected' , [
({'foo': 0}, 10, [{'foo': 0}]),
({'shards': [0, 1, 2, 3]}, 1, [{'shards': [0, 1, 2, 3]}]),
({'shards': [0, 1, 2, 3]}, 4, [{'shards': [0]}, {'shards': [1]}, {'shards': [2]}, {'shards': [3]}]),
({'shards': [0, 1]}, 4, [{'shards': [0]}, {'shards': [1]}]),
({'shards': [0, 1, 2, 3]}, 2, [{'shards': [0, 1]}, {'shards': [2, 3]}]),
] , )
def _lowercase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> int:
UpperCamelCase__ : Any = _split_gen_kwargs(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
assert out == expected
@pytest.mark.parametrize(
'gen_kwargs, expected' , [
({'foo': 0}, 1),
({'shards': [0]}, 1),
({'shards': [0, 1, 2, 3]}, 4),
({'shards': [0, 1, 2, 3], 'foo': 0}, 4),
({'shards': [0, 1, 2, 3], 'other': (0, 1)}, 4),
({'shards': [0, 1, 2, 3], 'shards2': [0, 1]}, RuntimeError),
] , )
def _lowercase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Any:
if expected is RuntimeError:
with pytest.raises(__SCREAMING_SNAKE_CASE ):
_number_of_shards_in_gen_kwargs(__SCREAMING_SNAKE_CASE )
else:
UpperCamelCase__ : Tuple = _number_of_shards_in_gen_kwargs(__SCREAMING_SNAKE_CASE )
assert out == expected
| 410
|
# Usage:
# ./gen-card-allenai-wmt16.py
import os
from pathlib import Path
def _lowercase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> int:
UpperCamelCase__ : Dict = {
'en': 'Machine learning is great, isn\'t it?',
'ru': 'Машинное обучение - это здорово, не так ли?',
'de': 'Maschinelles Lernen ist großartig, nicht wahr?',
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
UpperCamelCase__ : List[str] = {
'wmt16-en-de-dist-12-1': [28.3, 27.52],
'wmt16-en-de-dist-6-1': [27.4, 27.11],
'wmt16-en-de-12-1': [26.9, 25.75],
}
UpperCamelCase__ : Optional[int] = F"""{src_lang}-{tgt_lang}"""
UpperCamelCase__ : Tuple = F"""
---
language:
- {src_lang}
- {tgt_lang}
thumbnail:
tags:
- translation
- wmt16
- allenai
license: apache-2.0
datasets:
- wmt16
metrics:
- bleu
---
# FSMT
## Model description
This is a ported version of fairseq-based [wmt16 transformer](https://github.com/jungokasai/deep-shallow/) for {src_lang}-{tgt_lang}.
For more details, please, see [Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation](https://arxiv.org/abs/2006.10369).
All 3 models are available:
* [wmt16-en-de-dist-12-1](https://huggingface.co/allenai/wmt16-en-de-dist-12-1)
* [wmt16-en-de-dist-6-1](https://huggingface.co/allenai/wmt16-en-de-dist-6-1)
* [wmt16-en-de-12-1](https://huggingface.co/allenai/wmt16-en-de-12-1)
## Intended uses & limitations
#### How to use
```python
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
mname = \"allenai/{model_name}\"
tokenizer = FSMTTokenizer.from_pretrained(mname)
model = FSMTForConditionalGeneration.from_pretrained(mname)
input = \"{texts[src_lang]}\"
input_ids = tokenizer.encode(input, return_tensors=\"pt\")
outputs = model.generate(input_ids)
decoded = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(decoded) # {texts[tgt_lang]}
```
#### Limitations and bias
## Training data
Pretrained weights were left identical to the original model released by allenai. For more details, please, see the [paper](https://arxiv.org/abs/2006.10369).
## Eval results
Here are the BLEU scores:
model | fairseq | transformers
-------|---------|----------
{model_name} | {scores[model_name][0]} | {scores[model_name][1]}
The score is slightly below the score reported in the paper, as the researchers don't use `sacrebleu` and measure the score on tokenized outputs. `transformers` score was measured using `sacrebleu` on detokenized outputs.
The score was calculated using this code:
```bash
git clone https://github.com/huggingface/transformers
cd transformers
export PAIR={pair}
export DATA_DIR=data/$PAIR
export SAVE_DIR=data/$PAIR
export BS=8
export NUM_BEAMS=5
mkdir -p $DATA_DIR
sacrebleu -t wmt16 -l $PAIR --echo src > $DATA_DIR/val.source
sacrebleu -t wmt16 -l $PAIR --echo ref > $DATA_DIR/val.target
echo $PAIR
PYTHONPATH=\"src:examples/seq2seq\" python examples/seq2seq/run_eval.py allenai/{model_name} $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS
```
## Data Sources
- [training, etc.](http://www.statmt.org/wmt16/)
- [test set](http://matrix.statmt.org/test_sets/newstest2016.tgz?1504722372)
### BibTeX entry and citation info
```
@misc{{kasai2020deep,
title={{Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation}},
author={{Jungo Kasai and Nikolaos Pappas and Hao Peng and James Cross and Noah A. Smith}},
year={{2020}},
eprint={{2006.10369}},
archivePrefix={{arXiv}},
primaryClass={{cs.CL}}
}}
```
"""
model_card_dir.mkdir(parents=__SCREAMING_SNAKE_CASE , exist_ok=__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Tuple = os.path.join(__SCREAMING_SNAKE_CASE , 'README.md' )
print(F"""Generating {path}""" )
with open(__SCREAMING_SNAKE_CASE , 'w' , encoding='utf-8' ) as f:
f.write(__SCREAMING_SNAKE_CASE )
# make sure we are under the root of the project
UpperCAmelCase__ : Any = Path(__file__).resolve().parent.parent.parent
UpperCAmelCase__ : Optional[Any] = repo_dir / '''model_cards'''
for model_name in ["wmt16-en-de-dist-12-1", "wmt16-en-de-dist-6-1", "wmt16-en-de-12-1"]:
UpperCAmelCase__ : List[str] = model_cards_dir / '''allenai''' / model_name
write_model_card(model_card_dir, src_lang='''en''', tgt_lang='''de''', model_name=model_name)
| 410
| 1
|
def __magic_name__ ( lowercase = 1000000 ) -> int:
"""simple docstring"""
lowercase_ : Tuple = limit + 1
lowercase_ : int = [0] * limit
for first_term in range(1 , lowercase ):
for n in range(lowercase , lowercase , lowercase ):
lowercase_ : Union[str, Any] = first_term + n / first_term
if common_difference % 4: # d must be divisble by 4
continue
else:
common_difference /= 4
if (
first_term > common_difference
and first_term < 4 * common_difference
): # since x,y,z are positive integers
frequency[n] += 1 # so z>0 and a>d ,also 4d<a
lowercase_ : Dict = sum(1 for x in frequency[1:limit] if x == 10 )
return count
if __name__ == "__main__":
print(F'''{solution() = }''')
| 720
|
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
"""RUCAIBox/mvp""": """https://huggingface.co/RUCAIBox/mvp/resolve/main/config.json""",
}
class UpperCamelCase__ ( lowerCamelCase__ ):
'''simple docstring'''
__a : Dict = """mvp"""
__a : List[str] = ["""past_key_values"""]
__a : List[str] = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self, snake_case__=5_02_67, snake_case__=10_24, snake_case__=12, snake_case__=40_96, snake_case__=16, snake_case__=12, snake_case__=40_96, snake_case__=16, snake_case__=0.0, snake_case__=0.0, snake_case__="gelu", snake_case__=10_24, snake_case__=0.1, snake_case__=0.0, snake_case__=0.0, snake_case__=0.02, snake_case__=0.0, snake_case__=False, snake_case__=True, snake_case__=1, snake_case__=0, snake_case__=2, snake_case__=True, snake_case__=2, snake_case__=2, snake_case__=False, snake_case__=1_00, snake_case__=8_00, **snake_case__, ) -> str:
"""simple docstring"""
lowercase_ : Dict = vocab_size
lowercase_ : Optional[Any] = max_position_embeddings
lowercase_ : List[Any] = d_model
lowercase_ : Tuple = encoder_ffn_dim
lowercase_ : List[Any] = encoder_layers
lowercase_ : Union[str, Any] = encoder_attention_heads
lowercase_ : int = decoder_ffn_dim
lowercase_ : str = decoder_layers
lowercase_ : Tuple = decoder_attention_heads
lowercase_ : Union[str, Any] = dropout
lowercase_ : int = attention_dropout
lowercase_ : Any = activation_dropout
lowercase_ : Tuple = activation_function
lowercase_ : str = init_std
lowercase_ : int = encoder_layerdrop
lowercase_ : Dict = decoder_layerdrop
lowercase_ : List[Any] = classifier_dropout
lowercase_ : Union[str, Any] = use_cache
lowercase_ : Optional[Any] = encoder_layers
lowercase_ : str = scale_embedding # scale factor will be sqrt(d_model) if True
lowercase_ : List[str] = use_prompt
lowercase_ : Union[str, Any] = prompt_length
lowercase_ : Dict = prompt_mid_dim
super().__init__(
pad_token_id=snake_case__, bos_token_id=snake_case__, eos_token_id=snake_case__, is_encoder_decoder=snake_case__, decoder_start_token_id=snake_case__, forced_eos_token_id=snake_case__, **snake_case__, )
if self.forced_bos_token_id is None and kwargs.get("""force_bos_token_to_be_generated""", snake_case__ ):
lowercase_ : Dict = self.bos_token_id
warnings.warn(
f"""Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. """
"""The config can simply be saved and uploaded again to be fixed.""" )
| 436
| 0
|
from abc import ABC, abstractmethod
from typing import List, Optional
class lowerCAmelCase_ ( __lowercase ):
def __init__( self : Union[str, Any] ):
# test for the above condition
self.test()
def UpperCamelCase_ ( self : Union[str, Any] ):
_UpperCamelCase = 0
_UpperCamelCase = False
while not completed:
if counter == 1:
self.reset()
_UpperCamelCase = self.advance()
if not self.does_advance(_A ):
raise Exception(
'''Custom Constraint is not defined correctly. self.does_advance(self.advance()) must be true.''' )
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = self.update(_A )
counter += 1
if counter > 1_0000:
raise Exception('''update() does not fulfill the constraint.''' )
if self.remaining() != 0:
raise Exception('''Custom Constraint is not defined correctly.''' )
@abstractmethod
def UpperCamelCase_ ( self : Union[str, Any] ):
raise NotImplementedError(
F"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
@abstractmethod
def UpperCamelCase_ ( self : Any , _A : int ):
raise NotImplementedError(
F"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
@abstractmethod
def UpperCamelCase_ ( self : Optional[Any] , _A : int ):
raise NotImplementedError(
F"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
@abstractmethod
def UpperCamelCase_ ( self : Optional[Any] ):
raise NotImplementedError(
F"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
@abstractmethod
def UpperCamelCase_ ( self : str ):
raise NotImplementedError(
F"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
@abstractmethod
def UpperCamelCase_ ( self : Optional[int] , _A : str=False ):
raise NotImplementedError(
F"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
class lowerCAmelCase_ ( __lowercase ):
def __init__( self : Tuple , _A : List[int] ):
super(_A , self ).__init__()
if not isinstance(_A , _A ) or len(_A ) == 0:
raise ValueError(F"""`token_ids` has to be a non-empty list, but is {token_ids}.""" )
if any((not isinstance(_A , _A ) or token_id < 0) for token_id in token_ids ):
raise ValueError(F"""Each list in `token_ids` has to be a list of positive integers, but is {token_ids}.""" )
_UpperCamelCase = token_ids
_UpperCamelCase = len(self.token_ids )
_UpperCamelCase = -1 # the index of the currently fulfilled step
_UpperCamelCase = False
def UpperCamelCase_ ( self : Optional[int] ):
if self.completed:
return None
return self.token_ids[self.fulfilled_idx + 1]
def UpperCamelCase_ ( self : int , _A : int ):
if not isinstance(_A , _A ):
raise ValueError(F"""`token_id` has to be an `int`, but is {token_id} of type {type(_A )}""" )
if self.completed:
return False
return token_id == self.token_ids[self.fulfilled_idx + 1]
def UpperCamelCase_ ( self : Optional[Any] , _A : int ):
if not isinstance(_A , _A ):
raise ValueError(F"""`token_id` has to be an `int`, but is {token_id} of type {type(_A )}""" )
_UpperCamelCase = False
_UpperCamelCase = False
_UpperCamelCase = False
if self.does_advance(_A ):
self.fulfilled_idx += 1
_UpperCamelCase = True
if self.fulfilled_idx == (self.seqlen - 1):
_UpperCamelCase = True
_UpperCamelCase = completed
else:
# failed to make progress.
_UpperCamelCase = True
self.reset()
return stepped, completed, reset
def UpperCamelCase_ ( self : Optional[int] ):
_UpperCamelCase = False
_UpperCamelCase = 0
def UpperCamelCase_ ( self : str ):
return self.seqlen - (self.fulfilled_idx + 1)
def UpperCamelCase_ ( self : Any , _A : Optional[int]=False ):
_UpperCamelCase = PhrasalConstraint(self.token_ids )
if stateful:
_UpperCamelCase = self.seqlen
_UpperCamelCase = self.fulfilled_idx
_UpperCamelCase = self.completed
return new_constraint
class lowerCAmelCase_ :
def __init__( self : Any , _A : List[List[int]] , _A : Optional[int]=True ):
_UpperCamelCase = max([len(_A ) for one in nested_token_ids] )
_UpperCamelCase = {}
for token_ids in nested_token_ids:
_UpperCamelCase = root
for tidx, token_id in enumerate(_A ):
if token_id not in level:
_UpperCamelCase = {}
_UpperCamelCase = level[token_id]
if no_subsets and self.has_subsets(_A , _A ):
raise ValueError(
'''Each list in `nested_token_ids` can\'t be a complete subset of another list, but is'''
F""" {nested_token_ids}.""" )
_UpperCamelCase = root
def UpperCamelCase_ ( self : str , _A : Union[str, Any] ):
_UpperCamelCase = self.trie
for current_token in current_seq:
_UpperCamelCase = start[current_token]
_UpperCamelCase = list(start.keys() )
return next_tokens
def UpperCamelCase_ ( self : Any , _A : Optional[Any] ):
_UpperCamelCase = self.next_tokens(_A )
return len(_A ) == 0
def UpperCamelCase_ ( self : int , _A : List[str] ):
_UpperCamelCase = list(root.values() )
if len(_A ) == 0:
return 1
else:
return sum([self.count_leaves(_A ) for nn in next_nodes] )
def UpperCamelCase_ ( self : Optional[Any] , _A : Optional[Any] , _A : Any ):
_UpperCamelCase = self.count_leaves(_A )
return len(_A ) != leaf_count
class lowerCAmelCase_ ( __lowercase ):
def __init__( self : List[Any] , _A : List[List[int]] ):
super(_A , self ).__init__()
if not isinstance(_A , _A ) or len(_A ) == 0:
raise ValueError(F"""`nested_token_ids` has to be a non-empty list, but is {nested_token_ids}.""" )
if any(not isinstance(_A , _A ) for token_ids in nested_token_ids ):
raise ValueError(F"""`nested_token_ids` has to be a list of lists, but is {nested_token_ids}.""" )
if any(
any((not isinstance(_A , _A ) or token_id < 0) for token_id in token_ids )
for token_ids in nested_token_ids ):
raise ValueError(
F"""Each list in `nested_token_ids` has to be a list of positive integers, but is {nested_token_ids}.""" )
_UpperCamelCase = DisjunctiveTrie(_A )
_UpperCamelCase = nested_token_ids
_UpperCamelCase = self.trie.max_height
_UpperCamelCase = []
_UpperCamelCase = False
def UpperCamelCase_ ( self : List[Any] ):
_UpperCamelCase = self.trie.next_tokens(self.current_seq )
if len(_A ) == 0:
return None
else:
return token_list
def UpperCamelCase_ ( self : Optional[int] , _A : int ):
if not isinstance(_A , _A ):
raise ValueError(F"""`token_id` is supposed to be type `int`, but is {token_id} of type {type(_A )}""" )
_UpperCamelCase = self.trie.next_tokens(self.current_seq )
return token_id in next_tokens
def UpperCamelCase_ ( self : Union[str, Any] , _A : int ):
if not isinstance(_A , _A ):
raise ValueError(F"""`token_id` is supposed to be type `int`, but is {token_id} of type {type(_A )}""" )
_UpperCamelCase = False
_UpperCamelCase = False
_UpperCamelCase = False
if self.does_advance(_A ):
self.current_seq.append(_A )
_UpperCamelCase = True
else:
_UpperCamelCase = True
self.reset()
_UpperCamelCase = self.trie.reached_leaf(self.current_seq )
_UpperCamelCase = completed
return stepped, completed, reset
def UpperCamelCase_ ( self : List[Any] ):
_UpperCamelCase = False
_UpperCamelCase = []
def UpperCamelCase_ ( self : int ):
if self.completed:
# since this can be completed without reaching max height
return 0
else:
return self.seqlen - len(self.current_seq )
def UpperCamelCase_ ( self : List[str] , _A : int=False ):
_UpperCamelCase = DisjunctiveConstraint(self.token_ids )
if stateful:
_UpperCamelCase = self.seqlen
_UpperCamelCase = self.current_seq
_UpperCamelCase = self.completed
return new_constraint
class lowerCAmelCase_ :
def __init__( self : Tuple , _A : List[Constraint] ):
_UpperCamelCase = constraints
# max # of steps required to fulfill a given constraint
_UpperCamelCase = max([c.seqlen for c in constraints] )
_UpperCamelCase = len(_A )
_UpperCamelCase = False
self.init_state()
def UpperCamelCase_ ( self : List[str] ):
_UpperCamelCase = []
_UpperCamelCase = None
_UpperCamelCase = [constraint.copy(stateful=_A ) for constraint in self.constraints]
def UpperCamelCase_ ( self : int ):
_UpperCamelCase = 0
if self.inprogress_constraint:
# extra points for having a constraint mid-fulfilled
add += self.max_seqlen - self.inprogress_constraint.remaining()
return (len(self.complete_constraints ) * self.max_seqlen) + add
def UpperCamelCase_ ( self : List[Any] ):
_UpperCamelCase = []
if self.inprogress_constraint is None:
for constraint in self.pending_constraints: # "pending" == "unfulfilled yet"
_UpperCamelCase = constraint.advance()
if isinstance(_A , _A ):
token_list.append(_A )
elif isinstance(_A , _A ):
token_list.extend(_A )
else:
_UpperCamelCase = self.inprogress_constraint.advance()
if isinstance(_A , _A ):
token_list.append(_A )
elif isinstance(_A , _A ):
token_list.extend(_A )
if len(_A ) == 0:
return None
else:
return token_list
def UpperCamelCase_ ( self : Optional[int] , _A : Optional[List[int]] ):
self.init_state()
if token_ids is not None:
for token in token_ids:
# completes or steps **one** constraint
_UpperCamelCase , _UpperCamelCase = self.add(_A )
# the entire list of constraints are fulfilled
if self.completed:
break
def UpperCamelCase_ ( self : Union[str, Any] , _A : int ):
if not isinstance(_A , _A ):
raise ValueError(F"""`token_id` should be an `int`, but is `{token_id}`.""" )
_UpperCamelCase , _UpperCamelCase = False, False
if self.completed:
_UpperCamelCase = True
_UpperCamelCase = False
return complete, stepped
if self.inprogress_constraint is not None:
# In the middle of fulfilling a constraint. If the `token_id` *does* makes an incremental progress to current
# job, simply update the state
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = self.inprogress_constraint.update(_A )
if reset:
# 1. If the next token breaks the progress, then we must restart.
# e.g. constraint = "I love pies" and sequence so far is "I love" but `token_id` == "books".
# But that doesn't mean we self.init_state(), since we only reset the state for this particular
# constraint, not the full list of constraints.
self.pending_constraints.append(self.inprogress_constraint.copy(stateful=_A ) )
_UpperCamelCase = None
if complete:
# 2. If the next token completes the constraint, move it to completed list, set
# inprogress to None. If there are no pending constraints either, then this full list of constraints
# is complete.
self.complete_constraints.append(self.inprogress_constraint )
_UpperCamelCase = None
if len(self.pending_constraints ) == 0:
# we're done!
_UpperCamelCase = True
else:
# Not in the middle of fulfilling a constraint. So does this `token_id` helps us step towards any of our list
# of constraints?
for cidx, pending_constraint in enumerate(self.pending_constraints ):
if pending_constraint.does_advance(_A ):
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = pending_constraint.update(_A )
if not stepped:
raise Exception(
'''`constraint.update(token_id)` is not yielding incremental progress, '''
'''even though `constraint.does_advance(token_id)` is true.''' )
if complete:
self.complete_constraints.append(_A )
_UpperCamelCase = None
if not complete and stepped:
_UpperCamelCase = pending_constraint
if complete or stepped:
# If we made any progress at all, then it's at least not a "pending constraint".
_UpperCamelCase = (
self.pending_constraints[:cidx] + self.pending_constraints[cidx + 1 :]
)
if len(self.pending_constraints ) == 0 and self.inprogress_constraint is None:
# If there's no longer any pending after this and no inprogress either, then we must be
# complete.
_UpperCamelCase = True
break # prevent accidentally stepping through multiple constraints with just one token.
return complete, stepped
def UpperCamelCase_ ( self : str , _A : Optional[Any]=True ):
_UpperCamelCase = ConstraintListState(self.constraints ) # we actually never though self.constraints objects
# throughout this process. So it's at initialization state.
if stateful:
_UpperCamelCase = [
constraint.copy(stateful=_A ) for constraint in self.complete_constraints
]
if self.inprogress_constraint is not None:
_UpperCamelCase = self.inprogress_constraint.copy(stateful=_A )
_UpperCamelCase = [constraint.copy() for constraint in self.pending_constraints]
return new_state
| 10
|
import datasets
from .nmt_bleu import compute_bleu # From: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py
_a : Union[str, Any] = """\
@INPROCEEDINGS{Papineni02bleu:a,
author = {Kishore Papineni and Salim Roukos and Todd Ward and Wei-jing Zhu},
title = {BLEU: a Method for Automatic Evaluation of Machine Translation},
booktitle = {},
year = {2002},
pages = {311--318}
}
@inproceedings{lin-och-2004-orange,
title = \"{ORANGE}: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation\",
author = \"Lin, Chin-Yew and
Och, Franz Josef\",
booktitle = \"{COLING} 2004: Proceedings of the 20th International Conference on Computational Linguistics\",
month = \"aug 23{--}aug 27\",
year = \"2004\",
address = \"Geneva, Switzerland\",
publisher = \"COLING\",
url = \"https://www.aclweb.org/anthology/C04-1072\",
pages = \"501--507\",
}
"""
_a : Union[str, Any] = """\
BLEU (bilingual evaluation understudy) is an algorithm for evaluating the quality of text which has been machine-translated from one natural language to another.
Quality is considered to be the correspondence between a machine's output and that of a human: \"the closer a machine translation is to a professional human translation,
the better it is\" – this is the central idea behind BLEU. BLEU was one of the first metrics to claim a high correlation with human judgements of quality, and
remains one of the most popular automated and inexpensive metrics.
Scores are calculated for individual translated segments—generally sentences—by comparing them with a set of good quality reference translations.
Those scores are then averaged over the whole corpus to reach an estimate of the translation's overall quality. Intelligibility or grammatical correctness
are not taken into account[citation needed].
BLEU's output is always a number between 0 and 1. This value indicates how similar the candidate text is to the reference texts, with values closer to 1
representing more similar texts. Few human translations will attain a score of 1, since this would indicate that the candidate is identical to one of the
reference translations. For this reason, it is not necessary to attain a score of 1. Because there are more opportunities to match, adding additional
reference translations will increase the BLEU score.
"""
_a : List[str] = """
Computes BLEU score of translated segments against one or more references.
Args:
predictions: list of translations to score.
Each translation should be tokenized into a list of tokens.
references: list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
max_order: Maximum n-gram order to use when computing BLEU score.
smooth: Whether or not to apply Lin et al. 2004 smoothing.
Returns:
'bleu': bleu score,
'precisions': geometric mean of n-gram precisions,
'brevity_penalty': brevity penalty,
'length_ratio': ratio of lengths,
'translation_length': translation_length,
'reference_length': reference_length
Examples:
>>> predictions = [
... [\"hello\", \"there\", \"general\", \"kenobi\"], # tokenized prediction of the first sample
... [\"foo\", \"bar\", \"foobar\"] # tokenized prediction of the second sample
... ]
>>> references = [
... [[\"hello\", \"there\", \"general\", \"kenobi\"], [\"hello\", \"there\", \"!\"]], # tokenized references for the first sample (2 references)
... [[\"foo\", \"bar\", \"foobar\"]] # tokenized references for the second sample (1 reference)
... ]
>>> bleu = datasets.load_metric(\"bleu\")
>>> results = bleu.compute(predictions=predictions, references=references)
>>> print(results[\"bleu\"])
1.0
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _UpperCAmelCase ( datasets.Metric ):
"""simple docstring"""
def snake_case_ ( self ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ),
"references": datasets.Sequence(
datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ) , id="references" ),
} ) , codebase_urls=["https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py"] , reference_urls=[
"https://en.wikipedia.org/wiki/BLEU",
"https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213",
] , )
def snake_case_ ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=4 , _lowerCAmelCase=False ):
'''simple docstring'''
lowerCAmelCase__ :Any = compute_bleu(
reference_corpus=_lowerCAmelCase , translation_corpus=_lowerCAmelCase , max_order=_lowerCAmelCase , smooth=_lowerCAmelCase )
((lowerCAmelCase__) ,(lowerCAmelCase__) ,(lowerCAmelCase__) ,(lowerCAmelCase__) ,(lowerCAmelCase__) ,(lowerCAmelCase__)) :Union[str, Any] = score
return {
"bleu": bleu,
"precisions": precisions,
"brevity_penalty": bp,
"length_ratio": ratio,
"translation_length": translation_length,
"reference_length": reference_length,
}
| 145
| 0
|
"""simple docstring"""
class lowercase_ :
'''simple docstring'''
def __init__( self : List[str] , _UpperCAmelCase : str , _UpperCAmelCase : Any , _UpperCAmelCase : Dict ):
_A = name
_A = value
_A = weight
def __repr__( self : Dict ):
return F'''{self.__class__.__name__}({self.name}, {self.value}, {self.weight})'''
def lowerCAmelCase_ ( self : List[Any] ):
return self.value
def lowerCAmelCase_ ( self : Any ):
return self.name
def lowerCAmelCase_ ( self : Optional[int] ):
return self.weight
def lowerCAmelCase_ ( self : int ):
return self.value / self.weight
def _snake_case ( _snake_case : Any , _snake_case : int , _snake_case : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
_A = []
for i in range(len(_snake_case ) ):
menu.append(Things(name[i] , value[i] , weight[i] ) )
return menu
def _snake_case ( _snake_case : List[Any] , _snake_case : List[str] , _snake_case : Tuple ) -> Any:
'''simple docstring'''
_A = sorted(_snake_case , key=_snake_case , reverse=_snake_case )
_A = []
_A , _A = 0.0, 0.0
for i in range(len(_snake_case ) ):
if (total_cost + items_copy[i].get_weight()) <= max_cost:
result.append(items_copy[i] )
total_cost += items_copy[i].get_weight()
total_value += items_copy[i].get_value()
return (result, total_value)
def _snake_case ( ) -> Optional[int]:
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod()
| 505
|
"""simple docstring"""
import uuid
from typing import Any, Dict, List, Optional, Union
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
a = logging.get_logger(__name__)
class lowercase_ :
'''simple docstring'''
def __init__( self : str , _UpperCAmelCase : str = None , _UpperCAmelCase : uuid.UUID = None , _UpperCAmelCase : Tuple=None , _UpperCAmelCase : Tuple=None ):
if not conversation_id:
_A = uuid.uuida()
if past_user_inputs is None:
_A = []
if generated_responses is None:
_A = []
_A = conversation_id
_A = past_user_inputs
_A = generated_responses
_A = text
def __eq__( self : Union[str, Any] , _UpperCAmelCase : Optional[int] ):
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
return False
if self.uuid == other.uuid:
return True
return (
self.new_user_input == other.new_user_input
and self.past_user_inputs == other.past_user_inputs
and self.generated_responses == other.generated_responses
)
def lowerCAmelCase_ ( self : int , _UpperCAmelCase : str , _UpperCAmelCase : bool = False ):
if self.new_user_input:
if overwrite:
logger.warning(
F'''User input added while unprocessed input was existing: "{self.new_user_input}" was overwritten '''
F'''with: "{text}".''' )
_A = text
else:
logger.warning(
F'''User input added while unprocessed input was existing: "{self.new_user_input}" new input '''
F'''ignored: "{text}". Set `overwrite` to True to overwrite unprocessed user input''' )
else:
_A = text
def lowerCAmelCase_ ( self : List[str] ):
if self.new_user_input:
self.past_user_inputs.append(self.new_user_input )
_A = None
def lowerCAmelCase_ ( self : Dict , _UpperCAmelCase : str ):
self.generated_responses.append(_UpperCAmelCase )
def lowerCAmelCase_ ( self : Dict ):
for user_input, generated_response in zip(self.past_user_inputs , self.generated_responses ):
yield True, user_input
yield False, generated_response
if self.new_user_input:
yield True, self.new_user_input
def __repr__( self : int ):
_A = F'''Conversation id: {self.uuid} \n'''
for is_user, text in self.iter_texts():
_A = 'user' if is_user else 'bot'
output += F'''{name} >> {text} \n'''
return output
@add_end_docstrings(
__lowerCAmelCase , r'''
min_length_for_response (`int`, *optional*, defaults to 32):
The minimum length (in number of tokens) for a response.
minimum_tokens (`int`, *optional*, defaults to 10):
The minimum length of tokens to leave for a response.
''' , )
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
def __init__( self : Optional[int] , *_UpperCAmelCase : Optional[int] , **_UpperCAmelCase : Any ):
super().__init__(*_UpperCAmelCase , **_UpperCAmelCase )
if self.tokenizer.pad_token_id is None:
_A = self.tokenizer.eos_token
def lowerCAmelCase_ ( self : Optional[int] , _UpperCAmelCase : Dict=None , _UpperCAmelCase : Tuple=None , _UpperCAmelCase : Dict=None , **_UpperCAmelCase : List[Any] ):
_A = {}
_A = {}
_A = {}
if min_length_for_response is not None:
_A = min_length_for_response
if minimum_tokens is not None:
_A = minimum_tokens
if "max_length" in generate_kwargs:
_A = generate_kwargs['max_length']
# self.max_length = generate_kwargs.get("max_length", self.model.config.max_length)
if clean_up_tokenization_spaces is not None:
_A = clean_up_tokenization_spaces
if generate_kwargs:
forward_params.update(_UpperCAmelCase )
return preprocess_params, forward_params, postprocess_params
def __call__( self : Any , _UpperCAmelCase : Union[Conversation, List[Conversation]] , _UpperCAmelCase : int=0 , **_UpperCAmelCase : str ):
_A = super().__call__(_UpperCAmelCase , num_workers=_UpperCAmelCase , **_UpperCAmelCase )
if isinstance(_UpperCAmelCase , _UpperCAmelCase ) and len(_UpperCAmelCase ) == 1:
return outputs[0]
return outputs
def lowerCAmelCase_ ( self : Tuple , _UpperCAmelCase : Conversation , _UpperCAmelCase : int=32 ):
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
raise ValueError('ConversationalPipeline, expects Conversation as inputs' )
if conversation.new_user_input is None:
raise ValueError(
F'''Conversation with UUID {type(conversation.uuid )} does not contain new user input to process. '''
'Add user inputs with the conversation\'s `add_user_input` method' )
if hasattr(self.tokenizer , '_build_conversation_input_ids' ):
_A = self.tokenizer._build_conversation_input_ids(_UpperCAmelCase )
else:
# If the tokenizer cannot handle conversations, we default to only the old version
_A = self._legacy_parse_and_tokenize(_UpperCAmelCase )
if self.framework == "pt":
_A = torch.LongTensor([input_ids] )
elif self.framework == "tf":
_A = tf.constant([input_ids] )
return {"input_ids": input_ids, "conversation": conversation}
def lowerCAmelCase_ ( self : Any , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Dict=10 , **_UpperCAmelCase : Any ):
_A = generate_kwargs.get('max_length' , self.model.config.max_length )
_A = model_inputs['input_ids'].shape[1]
if max_length - minimum_tokens < n:
logger.warning(F'''Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})''' )
_A = max_length - minimum_tokens
_A = model_inputs['input_ids'][:, -trim:]
if "attention_mask" in model_inputs:
_A = model_inputs['attention_mask'][:, -trim:]
_A = model_inputs.pop('conversation' )
_A = max_length
_A = self.model.generate(**_UpperCAmelCase , **_UpperCAmelCase )
if self.model.config.is_encoder_decoder:
_A = 1
else:
_A = n
return {"output_ids": output_ids[:, start_position:], "conversation": conversation}
def lowerCAmelCase_ ( self : Optional[Any] , _UpperCAmelCase : Dict , _UpperCAmelCase : Dict=True ):
_A = model_outputs['output_ids']
_A = self.tokenizer.decode(
output_ids[0] , skip_special_tokens=_UpperCAmelCase , clean_up_tokenization_spaces=_UpperCAmelCase , )
_A = model_outputs['conversation']
conversation.mark_processed()
conversation.append_response(_UpperCAmelCase )
return conversation
def lowerCAmelCase_ ( self : str , _UpperCAmelCase : Conversation ):
_A = self.tokenizer.eos_token_id
_A = []
for is_user, text in conversation.iter_texts():
if eos_token_id is not None:
input_ids.extend(self.tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) + [eos_token_id] )
else:
input_ids.extend(self.tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) )
if len(_UpperCAmelCase ) > self.tokenizer.model_max_length:
_A = input_ids[-self.tokenizer.model_max_length :]
return input_ids
| 505
| 1
|
"""simple docstring"""
from pathlib import Path
import fire
from tqdm import tqdm
def _lowerCamelCase ( UpperCAmelCase_ : Tuple="ro", UpperCAmelCase_ : Any="en", UpperCAmelCase_ : List[str]="wmt16", UpperCAmelCase_ : Dict=None ) -> None:
"""simple docstring"""
try:
import datasets
except (ModuleNotFoundError, ImportError):
raise ImportError("run pip install datasets" )
A__ = F"""{src_lang}-{tgt_lang}"""
print(F"""Converting {dataset}-{pair}""" )
A__ = datasets.load_dataset(UpperCAmelCase_, UpperCAmelCase_ )
if save_dir is None:
A__ = F"""{dataset}-{pair}"""
A__ = Path(UpperCAmelCase_ )
save_dir.mkdir(exist_ok=UpperCAmelCase_ )
for split in ds.keys():
print(F"""Splitting {split} with {ds[split].num_rows} records""" )
# to save to val.source, val.target like summary datasets
A__ = "val" if split == "validation" else split
A__ = save_dir.joinpath(F"""{fn}.source""" )
A__ = save_dir.joinpath(F"""{fn}.target""" )
A__ = src_path.open("w+" )
A__ = tgt_path.open("w+" )
# reader is the bottleneck so writing one record at a time doesn't slow things down
for x in tqdm(ds[split] ):
A__ = x["translation"]
src_fp.write(ex[src_lang] + "\n" )
tgt_fp.write(ex[tgt_lang] + "\n" )
print(F"""Saved {dataset} dataset to {save_dir}""" )
if __name__ == "__main__":
fire.Fire(download_wmt_dataset)
| 104
|
from typing import Optional
import numpy as np
import torch
from torch import nn
from transformers import GPTaConfig, GPTaLMHeadModel
from transformers.modeling_utils import ModuleUtilsMixin
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class SCREAMING_SNAKE_CASE__ (__snake_case , __snake_case , __snake_case ):
__lowerCamelCase : Optional[int] = [r"""h\.\d+\.attn\.bias""", r"""h\.\d+\.attn\.masked_bias"""]
@register_to_config
def __init__( self , a , a , a = None , a = 5_0257 , a = 1024 , a = 768 , a = 12 , a = 12 , a = None , a = "gelu_new" , a = 0.1 , a = 0.1 , a = 0.1 , a = 1e-5 , a = 0.02 , a = True , a = True , a = False , a = False , ):
super().__init__()
lowercase__ : List[str] = prefix_length
if prefix_inner_dim != n_embd and prefix_hidden_dim is None:
raise ValueError(
f"""`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and"""
f""" `n_embd`: {n_embd} are not equal.""")
lowercase__ : Any = prefix_inner_dim
lowercase__ : List[str] = prefix_hidden_dim
lowercase__ : Tuple = (
nn.Linear(self.prefix_inner_dim , self.prefix_hidden_dim)
if self.prefix_hidden_dim is not None
else nn.Identity()
)
lowercase__ : Dict = (
nn.Linear(self.prefix_hidden_dim , a) if self.prefix_hidden_dim is not None else nn.Identity()
)
lowercase__ : Tuple = GPTaConfig(
vocab_size=a , n_positions=a , n_embd=a , n_layer=a , n_head=a , n_inner=a , activation_function=a , resid_pdrop=a , embd_pdrop=a , attn_pdrop=a , layer_norm_epsilon=a , initializer_range=a , scale_attn_weights=a , use_cache=a , scale_attn_by_inverse_layer_idx=a , reorder_and_upcast_attn=a , )
lowercase__ : Tuple = GPTaLMHeadModel(a)
def snake_case_ ( self , a , a , a = None , a = None , ):
lowercase__ : Optional[Any] = self.transformer.transformer.wte(a)
lowercase__ : Optional[Any] = self.encode_prefix(a)
lowercase__ : Union[str, Any] = self.decode_prefix(a)
lowercase__ : List[Any] = torch.cat((prefix_embeds, embedding_text) , dim=1)
if labels is not None:
lowercase__ : Optional[Any] = self.get_dummy_token(input_ids.shape[0] , input_ids.device)
lowercase__ : Optional[int] = torch.cat((dummy_token, input_ids) , dim=1)
lowercase__ : Optional[Any] = self.transformer(inputs_embeds=a , labels=a , attention_mask=a)
if self.prefix_hidden_dim is not None:
return out, hidden
else:
return out
def snake_case_ ( self , a , a):
return torch.zeros(a , self.prefix_length , dtype=torch.intaa , device=a)
def snake_case_ ( self , a):
return self.encode_prefix(a)
@torch.no_grad()
def snake_case_ ( self , a , a , a):
lowercase__ : List[str] = torch.split(a , 1 , dim=0)
lowercase__ : Optional[Any] = []
lowercase__ : str = []
for feature in features:
lowercase__ : Dict = self.decode_prefix(feature.to(a)) # back to the clip feature
# Only support beam search for now
lowercase__ , lowercase__ : str = self.generate_beam(
input_embeds=a , device=a , eos_token_id=a)
generated_tokens.append(output_tokens[0])
generated_seq_lengths.append(seq_lengths[0])
lowercase__ : str = torch.stack(a)
lowercase__ : List[str] = torch.stack(a)
return generated_tokens, generated_seq_lengths
@torch.no_grad()
def snake_case_ ( self , a=None , a=None , a=None , a = 5 , a = 67 , a = 1.0 , a = None , ):
lowercase__ : Optional[int] = eos_token_id
lowercase__ : List[Any] = None
lowercase__ : int = None
lowercase__ : str = torch.ones(a , device=a , dtype=torch.int)
lowercase__ : List[Any] = torch.zeros(a , device=a , dtype=torch.bool)
if input_embeds is not None:
lowercase__ : int = input_embeds
else:
lowercase__ : int = self.transformer.transformer.wte(a)
for i in range(a):
lowercase__ : Union[str, Any] = self.transformer(inputs_embeds=a)
lowercase__ : Optional[int] = outputs.logits
lowercase__ : Union[str, Any] = logits[:, -1, :] / (temperature if temperature > 0 else 1.0)
lowercase__ : Union[str, Any] = logits.softmax(-1).log()
if scores is None:
lowercase__ , lowercase__ : Tuple = logits.topk(a , -1)
lowercase__ : Dict = generated.expand(a , *generated.shape[1:])
lowercase__ , lowercase__ : Dict = next_tokens.permute(1 , 0), scores.squeeze(0)
if tokens is None:
lowercase__ : Union[str, Any] = next_tokens
else:
lowercase__ : Dict = tokens.expand(a , *tokens.shape[1:])
lowercase__ : Dict = torch.cat((tokens, next_tokens) , dim=1)
else:
lowercase__ : str = -float(np.inf)
lowercase__ : Optional[Any] = 0
lowercase__ : Dict = scores[:, None] + logits
seq_lengths[~is_stopped] += 1
lowercase__ : str = scores_sum / seq_lengths[:, None]
lowercase__ , lowercase__ : List[str] = scores_sum_average.view(-1).topk(a , -1)
lowercase__ : List[str] = next_tokens // scores_sum.shape[1]
lowercase__ : List[Any] = seq_lengths[next_tokens_source]
lowercase__ : Dict = next_tokens % scores_sum.shape[1]
lowercase__ : Tuple = next_tokens.unsqueeze(1)
lowercase__ : Union[str, Any] = tokens[next_tokens_source]
lowercase__ : Any = torch.cat((tokens, next_tokens) , dim=1)
lowercase__ : List[str] = generated[next_tokens_source]
lowercase__ : Union[str, Any] = scores_sum_average * seq_lengths
lowercase__ : str = is_stopped[next_tokens_source]
lowercase__ : List[Any] = self.transformer.transformer.wte(next_tokens.squeeze()).view(generated.shape[0] , 1 , -1)
lowercase__ : Optional[Any] = torch.cat((generated, next_token_embed) , dim=1)
lowercase__ : Optional[Any] = is_stopped + next_tokens.eq(a).squeeze()
if is_stopped.all():
break
lowercase__ : Dict = scores / seq_lengths
lowercase__ : Optional[Any] = scores.argsort(descending=a)
# tokens tensors are already padded to max_seq_length
lowercase__ : int = [tokens[i] for i in order]
lowercase__ : Optional[int] = torch.stack(a , dim=0)
lowercase__ : Optional[Any] = torch.tensor([seq_lengths[i] for i in order] , dtype=seq_lengths.dtype)
return output_texts, seq_lengths
| 164
| 0
|
'''simple docstring'''
from __future__ import annotations
def UpperCAmelCase ( UpperCAmelCase__ : list[int]):
if not nums:
return 0
lowerCamelCase : Tuple = nums[0]
lowerCamelCase : Optional[int] = 0
for num in nums[1:]:
lowerCamelCase : Dict = (
max_excluding + num,
max(UpperCAmelCase__ , UpperCAmelCase__),
)
return max(UpperCAmelCase__ , UpperCAmelCase__)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 714
|
'''simple docstring'''
from __future__ import annotations
def UpperCAmelCase ( UpperCAmelCase__ : int):
lowerCamelCase : List[Any] = str(UpperCAmelCase__)
return len(UpperCAmelCase__) == 9 and set(UpperCAmelCase__) == set('123456789')
def UpperCAmelCase ( ):
for base_num in range(99_99 , 49_99 , -1):
lowerCamelCase : Tuple = 10_00_02 * base_num
if is_9_pandigital(UpperCAmelCase__):
return candidate
for base_num in range(3_33 , 99 , -1):
lowerCamelCase : List[Any] = 1_00_20_03 * base_num
if is_9_pandigital(UpperCAmelCase__):
return candidate
return None
if __name__ == "__main__":
print(f"""{solution() = }""")
| 449
| 0
|
def __lowerCAmelCase ( UpperCamelCase ) -> list:
return [
txt[:a] + txt[a].upper() + txt[a + 1 :]
for a in range(len(UpperCamelCase ) )
if txt[a].isalpha()
]
if __name__ == "__main__":
__import__("""doctest""").testmod()
| 678
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCAmelCase_ = {
"""configuration_nezha""": ["""NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """NezhaConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
"""NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""NezhaForNextSentencePrediction""",
"""NezhaForMaskedLM""",
"""NezhaForPreTraining""",
"""NezhaForMultipleChoice""",
"""NezhaForQuestionAnswering""",
"""NezhaForSequenceClassification""",
"""NezhaForTokenClassification""",
"""NezhaModel""",
"""NezhaPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_nezha import NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP, NezhaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nezha import (
NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
NezhaPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 678
| 1
|
def __lowerCAmelCase ( A , A = " " ):
UpperCAmelCase_ = []
UpperCAmelCase_ = 0
for index, char in enumerate(A ):
if char == separator:
split_words.append(string[last_index:index] )
UpperCAmelCase_ = index + 1
elif index + 1 == len(A ):
split_words.append(string[last_index : index + 1] )
return split_words
if __name__ == "__main__":
from doctest import testmod
testmod()
| 715
|
import warnings
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_a: Dict = logging.get_logger(__name__)
class __UpperCamelCase ( lowercase ):
SCREAMING_SNAKE_CASE__ = ['input_ids', 'attention_mask']
def __init__( self : Tuple , lowerCAmelCase : Union[str, Any]="</s>" , lowerCAmelCase : str="<unk>" , lowerCAmelCase : Optional[int]="<pad>" , lowerCAmelCase : List[str]=125 , lowerCAmelCase : List[Any]=None , **lowerCAmelCase : Optional[Any] , ):
'''simple docstring'''
if extra_ids > 0 and additional_special_tokens is None:
UpperCAmelCase_ = [F"<extra_id_{i}>" for i in range(lowerCAmelCase )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
UpperCAmelCase_ = len(set(filter(lambda lowerCAmelCase : bool("extra_id" in str(lowerCAmelCase ) ) , lowerCAmelCase ) ) )
if extra_tokens != extra_ids:
raise ValueError(
F"Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are"
" provided to ByT5Tokenizer. In this case the additional_special_tokens must include the"
" extra_ids tokens" )
UpperCAmelCase_ = AddedToken(lowerCAmelCase , lstrip=lowerCAmelCase , rstrip=lowerCAmelCase ) if isinstance(lowerCAmelCase , lowerCAmelCase ) else pad_token
UpperCAmelCase_ = AddedToken(lowerCAmelCase , lstrip=lowerCAmelCase , rstrip=lowerCAmelCase ) if isinstance(lowerCAmelCase , lowerCAmelCase ) else eos_token
UpperCAmelCase_ = AddedToken(lowerCAmelCase , lstrip=lowerCAmelCase , rstrip=lowerCAmelCase ) if isinstance(lowerCAmelCase , lowerCAmelCase ) else unk_token
super().__init__(
eos_token=lowerCAmelCase , unk_token=lowerCAmelCase , pad_token=lowerCAmelCase , extra_ids=lowerCAmelCase , additional_special_tokens=lowerCAmelCase , **lowerCAmelCase , )
UpperCAmelCase_ = extra_ids
UpperCAmelCase_ = 2**8 # utf is 8 bits
# define special tokens dict
UpperCAmelCase_ = {
self.pad_token: 0,
self.eos_token: 1,
self.unk_token: 2,
}
UpperCAmelCase_ = len(self.special_tokens_encoder )
UpperCAmelCase_ = len(lowerCAmelCase )
for i, token in enumerate(lowerCAmelCase ):
UpperCAmelCase_ = self.vocab_size + i - n
UpperCAmelCase_ = {v: k for k, v in self.special_tokens_encoder.items()}
@property
def __A ( self : Optional[Any] ):
'''simple docstring'''
return self._utf_vocab_size + self._num_special_tokens + self._extra_ids
def __A ( self : List[Any] , lowerCAmelCase : List[int] , lowerCAmelCase : Optional[List[int]] = None , lowerCAmelCase : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase , token_ids_a=lowerCAmelCase , already_has_special_tokens=lowerCAmelCase )
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(lowerCAmelCase )) + [1]
return ([0] * len(lowerCAmelCase )) + [1] + ([0] * len(lowerCAmelCase )) + [1]
def __A ( self : Any , lowerCAmelCase : List[int] ):
'''simple docstring'''
if len(lowerCAmelCase ) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
F"This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated"
" eos tokens being added." )
return token_ids
else:
return token_ids + [self.eos_token_id]
def __A ( self : Optional[Any] , lowerCAmelCase : List[int] , lowerCAmelCase : Optional[List[int]] = None ):
'''simple docstring'''
UpperCAmelCase_ = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def __A ( self : Tuple , lowerCAmelCase : List[int] , lowerCAmelCase : Optional[List[int]] = None ):
'''simple docstring'''
UpperCAmelCase_ = self._add_eos_if_not_present(lowerCAmelCase )
if token_ids_a is None:
return token_ids_a
else:
UpperCAmelCase_ = self._add_eos_if_not_present(lowerCAmelCase )
return token_ids_a + token_ids_a
def __A ( self : List[str] , lowerCAmelCase : str ):
'''simple docstring'''
UpperCAmelCase_ = [chr(lowerCAmelCase ) for i in text.encode("utf-8" )]
return tokens
def __A ( self : Optional[Any] , lowerCAmelCase : List[Any] ):
'''simple docstring'''
if token in self.special_tokens_encoder:
UpperCAmelCase_ = self.special_tokens_encoder[token]
elif token in self.added_tokens_encoder:
UpperCAmelCase_ = self.added_tokens_encoder[token]
elif len(lowerCAmelCase ) != 1:
UpperCAmelCase_ = self.unk_token_id
else:
UpperCAmelCase_ = ord(lowerCAmelCase ) + self._num_special_tokens
return token_id
def __A ( self : str , lowerCAmelCase : Any ):
'''simple docstring'''
if index in self.special_tokens_decoder:
UpperCAmelCase_ = self.special_tokens_decoder[index]
else:
UpperCAmelCase_ = chr(index - self._num_special_tokens )
return token
def __A ( self : Union[str, Any] , lowerCAmelCase : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase_ = b""
for token in tokens:
if token in self.special_tokens_decoder:
UpperCAmelCase_ = self.special_tokens_decoder[token].encode("utf-8" )
elif token in self.added_tokens_decoder:
UpperCAmelCase_ = self.special_tokens_decoder[token].encode("utf-8" )
elif token in self.special_tokens_encoder:
UpperCAmelCase_ = token.encode("utf-8" )
elif token in self.added_tokens_encoder:
UpperCAmelCase_ = token.encode("utf-8" )
else:
UpperCAmelCase_ = bytes([ord(lowerCAmelCase )] )
bstring += tok_string
UpperCAmelCase_ = bstring.decode("utf-8" , errors="ignore" )
return string
def __A ( self : List[Any] , lowerCAmelCase : str , lowerCAmelCase : Optional[str] = None ):
'''simple docstring'''
return ()
| 268
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
a = {"configuration_plbart": ["PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP", "PLBartConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = ["PLBartTokenizer"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = [
"PLBART_PRETRAINED_MODEL_ARCHIVE_LIST",
"PLBartForCausalLM",
"PLBartForConditionalGeneration",
"PLBartForSequenceClassification",
"PLBartModel",
"PLBartPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_plbart import PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP, PLBartConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_plbart import PLBartTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_plbart import (
PLBART_PRETRAINED_MODEL_ARCHIVE_LIST,
PLBartForCausalLM,
PLBartForConditionalGeneration,
PLBartForSequenceClassification,
PLBartModel,
PLBartPreTrainedModel,
)
else:
import sys
a = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 109
|
'''simple docstring'''
import logging
import os
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from tempfile import TemporaryDirectory
from typing import List, Optional
import faiss
import torch
from datasets import Features, Sequence, Value, load_dataset
from transformers import DPRContextEncoder, DPRContextEncoderTokenizerFast, HfArgumentParser
a = logging.getLogger(__name__)
torch.set_grad_enabled(False)
a = "cuda" if torch.cuda.is_available() else "cpu"
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase=100 , __UpperCAmelCase=" " ) -> List[str]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = text.split(__UpperCAmelCase )
return [character.join(text[i : i + n] ).strip() for i in range(0 , len(__UpperCAmelCase ) , __UpperCAmelCase )]
def __magic_name__ ( __UpperCAmelCase ) -> dict:
'''simple docstring'''
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = [], []
for title, text in zip(documents["""title"""] , documents["""text"""] ):
if text is not None:
for passage in split_text(__UpperCAmelCase ):
titles.append(title if title is not None else """""" )
texts.append(__UpperCAmelCase )
return {"title": titles, "text": texts}
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> dict:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = ctx_tokenizer(
documents["""title"""] , documents["""text"""] , truncation=__UpperCAmelCase , padding="""longest""" , return_tensors="""pt""" )["""input_ids"""]
__SCREAMING_SNAKE_CASE = ctx_encoder(input_ids.to(device=__UpperCAmelCase ) , return_dict=__UpperCAmelCase ).pooler_output
return {"embeddings": embeddings.detach().cpu().numpy()}
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) -> Any:
'''simple docstring'''
logger.info("""Step 1 - Create the dataset""" )
######################################
# The dataset needed for RAG must have three columns:
# - title (string): title of the document
# - text (string): text of a passage of the document
# - embeddings (array of dimension d): DPR representation of the passage
# Let's say you have documents in tab-separated csv files with columns "title" and "text"
assert os.path.isfile(rag_example_args.csv_path ), "Please provide a valid path to a csv file"
# You can load a Dataset object this way
__SCREAMING_SNAKE_CASE = load_dataset(
"""csv""" , data_files=[rag_example_args.csv_path] , split="""train""" , delimiter="""\t""" , column_names=["""title""", """text"""] )
# More info about loading csv files in the documentation: https://huggingface.co/docs/datasets/loading_datasets.html?highlight=csv#csv-files
# Then split the documents into passages of 100 words
__SCREAMING_SNAKE_CASE = dataset.map(__UpperCAmelCase , batched=__UpperCAmelCase , num_proc=processing_args.num_proc )
# And compute the embeddings
__SCREAMING_SNAKE_CASE = DPRContextEncoder.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name ).to(device=__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = DPRContextEncoderTokenizerFast.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name )
__SCREAMING_SNAKE_CASE = Features(
{"""text""": Value("""string""" ), """title""": Value("""string""" ), """embeddings""": Sequence(Value("""float32""" ) )} ) # optional, save as float32 instead of float64 to save space
__SCREAMING_SNAKE_CASE = dataset.map(
partial(__UpperCAmelCase , ctx_encoder=__UpperCAmelCase , ctx_tokenizer=__UpperCAmelCase ) , batched=__UpperCAmelCase , batch_size=processing_args.batch_size , features=__UpperCAmelCase , )
# And finally save your dataset
__SCREAMING_SNAKE_CASE = os.path.join(rag_example_args.output_dir , """my_knowledge_dataset""" )
dataset.save_to_disk(__UpperCAmelCase )
# from datasets import load_from_disk
# dataset = load_from_disk(passages_path) # to reload the dataset
######################################
logger.info("""Step 2 - Index the dataset""" )
######################################
# Let's use the Faiss implementation of HNSW for fast approximate nearest neighbor search
__SCREAMING_SNAKE_CASE = faiss.IndexHNSWFlat(index_hnsw_args.d , index_hnsw_args.m , faiss.METRIC_INNER_PRODUCT )
dataset.add_faiss_index("""embeddings""" , custom_index=__UpperCAmelCase )
# And save the index
__SCREAMING_SNAKE_CASE = os.path.join(rag_example_args.output_dir , """my_knowledge_dataset_hnsw_index.faiss""" )
dataset.get_index("""embeddings""" ).save(__UpperCAmelCase )
# dataset.load_faiss_index("embeddings", index_path) # to reload the index
@dataclass
class __a :
__UpperCamelCase : str = field(
default=str(Path(_snake_case ).parent / 'test_run' / 'dummy-kb' / 'my_knowledge_dataset.csv' ), metadata={'help': 'Path to a tab-separated csv file with columns \'title\' and \'text\''}, )
__UpperCamelCase : Optional[str] = field(
default=_snake_case, metadata={'help': 'Question that is passed as input to RAG. Default is \'What does Moses\' rod turn into ?\'.'}, )
__UpperCamelCase : str = field(
default='facebook/rag-sequence-nq', metadata={'help': 'The RAG model to use. Either \'facebook/rag-sequence-nq\' or \'facebook/rag-token-nq\''}, )
__UpperCamelCase : str = field(
default='facebook/dpr-ctx_encoder-multiset-base', metadata={
'help': (
'The DPR context encoder model to use. Either \'facebook/dpr-ctx_encoder-single-nq-base\' or'
' \'facebook/dpr-ctx_encoder-multiset-base\''
)
}, )
__UpperCamelCase : Optional[str] = field(
default=str(Path(_snake_case ).parent / 'test_run' / 'dummy-kb' ), metadata={'help': 'Path to a directory where the dataset passages and the index will be saved'}, )
@dataclass
class __a :
__UpperCamelCase : Optional[int] = field(
default=_snake_case, metadata={
'help': 'The number of processes to use to split the documents into passages. Default is single process.'
}, )
__UpperCamelCase : int = field(
default=16, metadata={
'help': 'The batch size to use when computing the passages embeddings using the DPR context encoder.'
}, )
@dataclass
class __a :
__UpperCamelCase : int = field(
default=768, metadata={'help': 'The dimension of the embeddings to pass to the HNSW Faiss index.'}, )
__UpperCamelCase : int = field(
default=128, metadata={
'help': (
'The number of bi-directional links created for every new element during the HNSW index construction.'
)
}, )
if __name__ == "__main__":
logging.basicConfig(level=logging.WARNING)
logger.setLevel(logging.INFO)
a = HfArgumentParser((RagExampleArguments, ProcessingArguments, IndexHnswArguments))
a , a , a = parser.parse_args_into_dataclasses()
with TemporaryDirectory() as tmp_dir:
a = rag_example_args.output_dir or tmp_dir
main(rag_example_args, processing_args, index_hnsw_args)
| 109
| 1
|
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_torch_available
from ...utils import OptionalDependencyNotAvailable
UpperCAmelCase_ : Optional[Any] ={
"configuration_gpt_neox_japanese": ["GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTNeoXJapaneseConfig"],
"tokenization_gpt_neox_japanese": ["GPTNeoXJapaneseTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Dict =[
"GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST",
"GPTNeoXJapaneseForCausalLM",
"GPTNeoXJapaneseLayer",
"GPTNeoXJapaneseModel",
"GPTNeoXJapanesePreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_gpt_neox_japanese import GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXJapaneseConfig
from .tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox_japanese import (
GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXJapaneseForCausalLM,
GPTNeoXJapaneseLayer,
GPTNeoXJapaneseModel,
GPTNeoXJapanesePreTrainedModel,
)
else:
import sys
UpperCAmelCase_ : Optional[int] =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 718
|
# This code is adapted from OpenAI's release
# https://github.com/openai/human-eval/blob/master/human_eval/execution.py
import contextlib
import faulthandler
import io
import multiprocessing
import os
import platform
import signal
import tempfile
def SCREAMING_SNAKE_CASE_ ( __A : List[Any] , __A : List[Any] , __A : List[Any] , __A : Optional[int] ) -> Dict:
"""simple docstring"""
a_ : Optional[Any] = multiprocessing.Manager()
a_ : Tuple = manager.list()
a_ : Optional[int] = multiprocessing.Process(target=__A , args=(check_program, result, timeout) )
p.start()
p.join(timeout=timeout + 1 )
if p.is_alive():
p.kill()
if not result:
result.append('timed out' )
return {
"task_id": task_id,
"passed": result[0] == "passed",
"result": result[0],
"completion_id": completion_id,
}
def SCREAMING_SNAKE_CASE_ ( __A : Tuple , __A : Optional[Any] , __A : Optional[Any] ) -> Dict:
"""simple docstring"""
with create_tempdir():
# These system calls are needed when cleaning up tempdir.
import os
import shutil
a_ : Any = shutil.rmtree
a_ : List[str] = os.rmdir
a_ : List[Any] = os.chdir
# Disable functionalities that can make destructive changes to the test.
reliability_guard()
# Run program.
try:
a_ : Any = {}
with swallow_io():
with time_limit(__A ):
exec(__A , __A )
result.append('passed' )
except TimeoutException:
result.append('timed out' )
except BaseException as e:
result.append(F"""failed: {e}""" )
# Needed for cleaning up.
a_ : Tuple = rmtree
a_ : Any = rmdir
a_ : Optional[int] = chdir
@contextlib.contextmanager
def SCREAMING_SNAKE_CASE_ ( __A : List[str] ) -> str:
"""simple docstring"""
def signal_handler(__A : List[str] , __A : Dict ):
raise TimeoutException('Timed out!' )
signal.setitimer(signal.ITIMER_REAL , __A )
signal.signal(signal.SIGALRM , __A )
try:
yield
finally:
signal.setitimer(signal.ITIMER_REAL , 0 )
@contextlib.contextmanager
def SCREAMING_SNAKE_CASE_ ( ) -> Dict:
"""simple docstring"""
a_ : int = WriteOnlyStringIO()
with contextlib.redirect_stdout(__A ):
with contextlib.redirect_stderr(__A ):
with redirect_stdin(__A ):
yield
@contextlib.contextmanager
def SCREAMING_SNAKE_CASE_ ( ) -> Optional[int]:
"""simple docstring"""
with tempfile.TemporaryDirectory() as dirname:
with chdir(__A ):
yield dirname
class SCREAMING_SNAKE_CASE__ ( lowercase__ ):
pass
class SCREAMING_SNAKE_CASE__ ( io.StringIO ):
def SCREAMING_SNAKE_CASE ( self : List[Any] , *SCREAMING_SNAKE_CASE__ : List[str] , **SCREAMING_SNAKE_CASE__ : List[Any] ) -> int:
raise OSError
def SCREAMING_SNAKE_CASE ( self : Tuple , *SCREAMING_SNAKE_CASE__ : Tuple , **SCREAMING_SNAKE_CASE__ : Dict ) -> List[str]:
raise OSError
def SCREAMING_SNAKE_CASE ( self : int , *SCREAMING_SNAKE_CASE__ : Dict , **SCREAMING_SNAKE_CASE__ : List[Any] ) -> Optional[Any]:
raise OSError
def SCREAMING_SNAKE_CASE ( self : Any , *SCREAMING_SNAKE_CASE__ : Union[str, Any] , **SCREAMING_SNAKE_CASE__ : List[Any] ) -> List[Any]:
return False
class SCREAMING_SNAKE_CASE__ ( contextlib._RedirectStream ): # type: ignore
snake_case__ : Optional[int] = '''stdin'''
@contextlib.contextmanager
def SCREAMING_SNAKE_CASE_ ( __A : Tuple ) -> Union[str, Any]:
"""simple docstring"""
if root == ".":
yield
return
a_ : str = os.getcwd()
os.chdir(__A )
try:
yield
except BaseException as exc:
raise exc
finally:
os.chdir(__A )
def SCREAMING_SNAKE_CASE_ ( __A : Dict=None ) -> Dict:
"""simple docstring"""
if maximum_memory_bytes is not None:
import resource
resource.setrlimit(resource.RLIMIT_AS , (maximum_memory_bytes, maximum_memory_bytes) )
resource.setrlimit(resource.RLIMIT_DATA , (maximum_memory_bytes, maximum_memory_bytes) )
if not platform.uname().system == "Darwin":
resource.setrlimit(resource.RLIMIT_STACK , (maximum_memory_bytes, maximum_memory_bytes) )
faulthandler.disable()
import builtins
a_ : str = None
a_ : str = None
import os
a_ : Optional[int] = '1'
a_ : Any = None
a_ : int = None
a_ : int = None
a_ : Tuple = None
a_ : Dict = None
a_ : Any = None
a_ : Dict = None
a_ : Dict = None
a_ : List[str] = None
a_ : Optional[Any] = None
a_ : Tuple = None
a_ : Union[str, Any] = None
a_ : Dict = None
a_ : Dict = None
a_ : int = None
a_ : str = None
a_ : Union[str, Any] = None
a_ : str = None
a_ : Optional[int] = None
a_ : List[Any] = None
a_ : Union[str, Any] = None
a_ : Dict = None
a_ : Tuple = None
a_ : List[str] = None
a_ : str = None
a_ : str = None
a_ : str = None
import shutil
a_ : Dict = None
a_ : Tuple = None
a_ : str = None
import subprocess
a_ : str = None # type: ignore
a_ : List[Any] = None
import sys
a_ : Optional[int] = None
a_ : int = None
a_ : Any = None
a_ : str = None
a_ : str = None
| 443
| 0
|
from __future__ import annotations
def A__ ( snake_case_ : str , snake_case_ : list[str] | None = None ):
SCREAMING_SNAKE_CASE__: Union[str, Any]= word_bank or []
# create a table
SCREAMING_SNAKE_CASE__: int= len(snake_case_ ) + 1
SCREAMING_SNAKE_CASE__: list[list[list[str]]]= []
for _ in range(snake_case_ ):
table.append([] )
# seed value
SCREAMING_SNAKE_CASE__: Tuple= [[]] # because empty string has empty combination
# iterate through the indices
for i in range(snake_case_ ):
# condition
if table[i] != []:
for word in word_bank:
# slice condition
if target[i : i + len(snake_case_ )] == word:
SCREAMING_SNAKE_CASE__: list[list[str]]= [
[word, *way] for way in table[i]
]
# adds the word to every combination the current position holds
# now,push that combination to the table[i+len(word)]
table[i + len(snake_case_ )] += new_combinations
# combinations are in reverse order so reverse for better output
for combination in table[len(snake_case_ )]:
combination.reverse()
return table[len(snake_case_ )]
if __name__ == "__main__":
print(all_construct('jwajalapa', ['jwa', 'j', 'w', 'a', 'la', 'lapa']))
print(all_construct('rajamati', ['s', 'raj', 'amat', 'raja', 'ma', 'i', 't']))
print(
all_construct(
'hexagonosaurus',
['h', 'ex', 'hex', 'ag', 'ago', 'ru', 'auru', 'rus', 'go', 'no', 'o', 's'],
)
)
| 64
|
def snake_case( __magic_name__ ) -> int:
'''simple docstring'''
assert isinstance(__magic_name__ , __magic_name__ ), F"""The input value of [n={number}] is not an integer"""
if number == 1:
return 2
elif number < 1:
lowercase : Union[str, Any] = F"""The input value of [n={number}] has to be > 0"""
raise ValueError(__magic_name__ )
else:
lowercase : int = sylvester(number - 1 )
lowercase : List[Any] = num - 1
lowercase : str = num
return lower * upper + 1
if __name__ == "__main__":
print(f'''The 8th number in Sylvester\'s sequence: {sylvester(8)}''')
| 217
| 0
|
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import DeiTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
)
from transformers.models.deit.modeling_tf_deit import TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class snake_case__ :
'''simple docstring'''
def __init__( self , a__ , a__=13 , a__=30 , a__=2 , a__=3 , a__=True , a__=True , a__=32 , a__=2 , a__=4 , a__=37 , a__="gelu" , a__=0.1 , a__=0.1 , a__=10 , a__=0.02 , a__=3 , a__=None , a__=2 , ) -> str:
'''simple docstring'''
__snake_case :List[Any] = parent
__snake_case :Dict = batch_size
__snake_case :List[str] = image_size
__snake_case :Dict = patch_size
__snake_case :List[Any] = num_channels
__snake_case :Optional[int] = is_training
__snake_case :Tuple = use_labels
__snake_case :Any = hidden_size
__snake_case :Optional[Any] = num_hidden_layers
__snake_case :Optional[Any] = num_attention_heads
__snake_case :Any = intermediate_size
__snake_case :Dict = hidden_act
__snake_case :str = hidden_dropout_prob
__snake_case :Optional[Any] = attention_probs_dropout_prob
__snake_case :Optional[Any] = type_sequence_label_size
__snake_case :Union[str, Any] = initializer_range
__snake_case :List[str] = scope
__snake_case :Dict = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
__snake_case :List[Any] = (image_size // patch_size) ** 2
__snake_case :Any = num_patches + 2
def __lowercase ( self ) -> List[str]:
'''simple docstring'''
__snake_case :Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__snake_case :Union[str, Any] = None
if self.use_labels:
__snake_case :str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__snake_case :List[Any] = self.get_config()
return config, pixel_values, labels
def __lowercase ( self ) -> List[Any]:
'''simple docstring'''
return DeiTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=a__ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def __lowercase ( self , a__ , a__ , a__ ) -> Tuple:
'''simple docstring'''
__snake_case :Tuple = TFDeiTModel(config=a__ )
__snake_case :Any = model(a__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowercase ( self , a__ , a__ , a__ ) -> Any:
'''simple docstring'''
__snake_case :Any = TFDeiTForMaskedImageModeling(config=a__ )
__snake_case :List[str] = model(a__ )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
__snake_case :List[str] = 1
__snake_case :int = TFDeiTForMaskedImageModeling(a__ )
__snake_case :Any = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__snake_case :List[Any] = model(a__ )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def __lowercase ( self , a__ , a__ , a__ ) -> Dict:
'''simple docstring'''
__snake_case :Any = self.type_sequence_label_size
__snake_case :str = TFDeiTForImageClassification(a__ )
__snake_case :List[str] = model(a__ , labels=a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__snake_case :Tuple = 1
__snake_case :List[str] = TFDeiTForImageClassification(a__ )
__snake_case :Tuple = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__snake_case :Optional[Any] = model(a__ , labels=a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __lowercase ( self ) -> List[str]:
'''simple docstring'''
__snake_case :Dict = self.prepare_config_and_inputs()
__snake_case :str = config_and_inputs
__snake_case :List[Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class snake_case__ ( lowercase_ , lowercase_ , unittest.TestCase):
'''simple docstring'''
lowerCamelCase : Dict = (
(
TFDeiTModel,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
)
if is_tf_available()
else ()
)
lowerCamelCase : Optional[int] = (
{
"feature-extraction": TFDeiTModel,
"image-classification": (TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher),
}
if is_tf_available()
else {}
)
lowerCamelCase : Union[str, Any] = False
lowerCamelCase : List[Any] = False
lowerCamelCase : Optional[int] = False
lowerCamelCase : List[Any] = False
def __lowercase ( self ) -> Any:
'''simple docstring'''
__snake_case :Optional[int] = TFDeiTModelTester(self )
__snake_case :Dict = ConfigTester(self , config_class=a__ , has_text_modality=a__ , hidden_size=37 )
def __lowercase ( self ) -> Any:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="""DeiT does not use inputs_embeds""" )
def __lowercase ( self ) -> str:
'''simple docstring'''
pass
def __lowercase ( self ) -> Optional[int]:
'''simple docstring'''
__snake_case :Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case :List[str] = model_class(a__ )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
__snake_case :str = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(a__ , tf.keras.layers.Dense ) )
def __lowercase ( self ) -> str:
'''simple docstring'''
__snake_case :str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case :Tuple = model_class(a__ )
__snake_case :Optional[int] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__snake_case :Any = [*signature.parameters.keys()]
__snake_case :Any = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , a__ )
def __lowercase ( self ) -> str:
'''simple docstring'''
__snake_case :List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a__ )
def __lowercase ( self ) -> Optional[Any]:
'''simple docstring'''
__snake_case :Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*a__ )
def __lowercase ( self ) -> Dict:
'''simple docstring'''
__snake_case :List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*a__ )
def __lowercase ( self , a__ , a__ , a__=False ) -> Any:
'''simple docstring'''
__snake_case :Tuple = super()._prepare_for_class(a__ , a__ , return_labels=a__ )
if return_labels:
if "labels" in inputs_dict and "labels" not in inspect.signature(model_class.call ).parameters:
del inputs_dict["labels"]
return inputs_dict
@slow
def __lowercase ( self ) -> List[str]:
'''simple docstring'''
for model_name in TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case :Union[str, Any] = TFDeiTModel.from_pretrained(a__ )
self.assertIsNotNone(a__ )
def UpperCamelCase ( ):
'''simple docstring'''
__snake_case :Dict = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class snake_case__ ( unittest.TestCase):
'''simple docstring'''
@cached_property
def __lowercase ( self ) -> Optional[Any]:
'''simple docstring'''
return (
DeiTImageProcessor.from_pretrained("""facebook/deit-base-distilled-patch16-224""" )
if is_vision_available()
else None
)
@slow
def __lowercase ( self ) -> str:
'''simple docstring'''
__snake_case :Any = TFDeiTForImageClassificationWithTeacher.from_pretrained("""facebook/deit-base-distilled-patch16-224""" )
__snake_case :Dict = self.default_image_processor
__snake_case :List[Any] = prepare_img()
__snake_case :Optional[Any] = image_processor(images=a__ , return_tensors="""tf""" )
# forward pass
__snake_case :Optional[int] = model(**a__ )
# verify the logits
__snake_case :Tuple = tf.TensorShape((1, 10_00) )
self.assertEqual(outputs.logits.shape , a__ )
__snake_case :Tuple = tf.constant([-1.02_66, 0.19_12, -1.28_61] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , a__ , atol=1e-4 ) )
| 713
|
import json
import os
import unittest
from transformers.models.gptsan_japanese.tokenization_gptsan_japanese import (
VOCAB_FILES_NAMES,
GPTSanJapaneseTokenizer,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class snake_case__ ( lowercase_ , unittest.TestCase):
'''simple docstring'''
lowerCamelCase : Optional[int] = GPTSanJapaneseTokenizer
lowerCamelCase : Any = False
lowerCamelCase : Optional[Any] = {"do_clean_text": False, "add_prefix_space": False}
def __lowercase ( self ) -> Any:
'''simple docstring'''
super().setUp()
# fmt: off
__snake_case :str = ["""こん""", """こんに""", """にちは""", """ばんは""", """世界,㔺界""", """、""", """。""", """<BR>""", """<SP>""", """<TAB>""", """<URL>""", """<EMAIL>""", """<TEL>""", """<DATE>""", """<PRICE>""", """<BLOCK>""", """<KIGOU>""", """<U2000U2BFF>""", """<|emoji1|>""", """<unk>""", """<|bagoftoken|>""", """<|endoftext|>"""]
# fmt: on
__snake_case :Optional[Any] = {"""emoji""": {"""\ud83d\ude00""": """<|emoji1|>"""}, """emoji_inv""": {"""<|emoji1|>""": """\ud83d\ude00"""}} # 😀
__snake_case :Optional[int] = {"""unk_token""": """<unk>"""}
__snake_case :Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
__snake_case :List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""emoji_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
with open(self.emoji_file , """w""" ) as emoji_writer:
emoji_writer.write(json.dumps(a__ ) )
def __lowercase ( self , **a__ ) -> Optional[Any]:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return GPTSanJapaneseTokenizer.from_pretrained(self.tmpdirname , **a__ )
def __lowercase ( self , a__ ) -> List[str]:
'''simple docstring'''
__snake_case :Tuple = """こんにちは、世界。 \nこんばんは、㔺界。😀"""
__snake_case :Union[str, Any] = """こんにちは、世界。 \nこんばんは、世界。😀"""
return input_text, output_text
def __lowercase ( self , a__ ) -> List[str]:
'''simple docstring'''
__snake_case , __snake_case :Dict = self.get_input_output_texts(a__ )
__snake_case :Dict = tokenizer.encode(a__ , add_special_tokens=a__ )
__snake_case :int = tokenizer.decode(a__ , clean_up_tokenization_spaces=a__ )
return text, ids
def __lowercase ( self ) -> Optional[int]:
'''simple docstring'''
pass # TODO add if relevant
def __lowercase ( self ) -> Optional[Any]:
'''simple docstring'''
pass # TODO add if relevant
def __lowercase ( self ) -> Union[str, Any]:
'''simple docstring'''
pass # TODO add if relevant
def __lowercase ( self ) -> str:
'''simple docstring'''
__snake_case :Optional[int] = self.get_tokenizer()
# Testing tokenization
__snake_case :int = """こんにちは、世界。 こんばんは、㔺界。"""
__snake_case :Dict = ["""こん""", """にちは""", """、""", """世界""", """。""", """<SP>""", """こん""", """ばんは""", """、""", """㔺界""", """。"""]
__snake_case :Optional[Any] = tokenizer.tokenize(a__ )
self.assertListEqual(a__ , a__ )
# Testing conversion to ids without special tokens
__snake_case :List[str] = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6]
__snake_case :Any = tokenizer.convert_tokens_to_ids(a__ )
self.assertListEqual(a__ , a__ )
# Testing conversion to ids with special tokens
__snake_case :Tuple = tokens + [tokenizer.unk_token]
__snake_case :Optional[int] = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6, 19]
__snake_case :List[Any] = tokenizer.convert_tokens_to_ids(a__ )
self.assertListEqual(a__ , a__ )
def __lowercase ( self ) -> List[str]:
'''simple docstring'''
__snake_case :Tuple = self.get_tokenizer()
# Testing tokenization
__snake_case :Optional[Any] = """こんにちは、<|bagoftoken|>世界。こんばんは、<|bagoftoken|>㔺界。"""
__snake_case :Tuple = """こんにちは、、、、世界。こんばんは、、、、世界。"""
__snake_case :str = tokenizer.encode(a__ )
__snake_case :Optional[int] = tokenizer.decode(a__ )
self.assertEqual(a__ , a__ )
@slow
def __lowercase ( self ) -> List[Any]:
'''simple docstring'''
__snake_case :List[str] = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
# Testing tokenization
__snake_case :str = """こんにちは、世界。"""
__snake_case :List[str] = """こんばんは、㔺界。😀"""
__snake_case :Dict = """こんにちは、世界。こんばんは、世界。😀"""
__snake_case :int = tokenizer.encode(prefix_text + input_text )
__snake_case :Optional[Any] = tokenizer.encode("""""" , prefix_text=prefix_text + input_text )
__snake_case :Any = tokenizer.encode(a__ , prefix_text=a__ )
__snake_case :Optional[Any] = tokenizer.decode(a__ )
__snake_case :Optional[Any] = tokenizer.decode(a__ )
__snake_case :Optional[Any] = tokenizer.decode(a__ )
self.assertEqual(a__ , a__ )
self.assertEqual(a__ , a__ )
self.assertEqual(a__ , a__ )
@slow
def __lowercase ( self ) -> Any:
'''simple docstring'''
__snake_case :Optional[Any] = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
# Testing tokenization
__snake_case :List[Any] = """こんにちは、世界。"""
__snake_case :Dict = """こんばんは、㔺界。😀"""
__snake_case :Optional[int] = len(tokenizer.encode(a__ ) ) - 2
__snake_case :Union[str, Any] = len(tokenizer.encode(a__ ) ) - 2
__snake_case :Union[str, Any] = [1] + [0] * (len_prefix + len_text + 1)
__snake_case :Optional[Any] = [1] * (len_prefix + len_text + 1) + [0]
__snake_case :Union[str, Any] = [1] + [1] * (len_prefix) + [0] * (len_text + 1)
__snake_case :int = tokenizer(prefix_text + input_text ).token_type_ids
__snake_case :List[Any] = tokenizer("""""" , prefix_text=prefix_text + input_text ).token_type_ids
__snake_case :int = tokenizer(a__ , prefix_text=a__ ).token_type_ids
self.assertListEqual(a__ , a__ )
self.assertListEqual(a__ , a__ )
self.assertListEqual(a__ , a__ )
@slow
def __lowercase ( self ) -> Optional[int]:
'''simple docstring'''
__snake_case :Optional[Any] = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
__snake_case :Tuple = tokenizer.encode("""あンいワ""" )
__snake_case :List[str] = tokenizer.encode("""""" , prefix_text="""あンいワ""" )
__snake_case :int = tokenizer.encode("""いワ""" , prefix_text="""あン""" )
self.assertEqual(tokenizer.decode(a__ ) , tokenizer.decode(a__ ) )
self.assertEqual(tokenizer.decode(a__ ) , tokenizer.decode(a__ ) )
self.assertNotEqual(a__ , a__ )
self.assertNotEqual(a__ , a__ )
self.assertEqual(x_token_a[1] , x_token_a[-1] ) # SEG token
self.assertEqual(x_token_a[1] , x_token_a[3] ) # SEG token
@slow
def __lowercase ( self ) -> Union[str, Any]:
'''simple docstring'''
__snake_case :List[str] = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
__snake_case :int = [["""武田信玄""", """は、"""], ["""織田信長""", """の配下の、"""]]
__snake_case :Optional[int] = tokenizer(a__ , padding=a__ )
__snake_case :int = tokenizer.batch_encode_plus(a__ , padding=a__ )
# fmt: off
__snake_case :Tuple = [[3_59_93, 86_40, 2_59_48, 3_59_98, 3_06_47, 3_56_75, 3_59_99, 3_59_99], [3_59_93, 1_03_82, 98_68, 3_59_98, 3_06_46, 94_59, 3_06_46, 3_56_75]]
__snake_case :Any = [[1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0]]
__snake_case :int = [[1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1]]
# fmt: on
self.assertListEqual(x_token.input_ids , a__ )
self.assertListEqual(x_token.token_type_ids , a__ )
self.assertListEqual(x_token.attention_mask , a__ )
self.assertListEqual(x_token_a.input_ids , a__ )
self.assertListEqual(x_token_a.token_type_ids , a__ )
self.assertListEqual(x_token_a.attention_mask , a__ )
def __lowercase ( self ) -> List[str]:
'''simple docstring'''
pass
def __lowercase ( self ) -> Optional[int]:
'''simple docstring'''
pass
| 291
| 0
|
import io
import json
import fsspec
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.json import JsonDatasetReader, JsonDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def A__ ( snake_case_ : Optional[Any] , snake_case_ : Optional[Any] ):
assert isinstance(snake_case_ , snake_case_ )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def A__ ( snake_case_ : List[Any] , snake_case_ : Union[str, Any] , snake_case_ : Tuple ):
SCREAMING_SNAKE_CASE__: List[Any]= tmp_path / '''cache'''
SCREAMING_SNAKE_CASE__: int= {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
SCREAMING_SNAKE_CASE__: int= JsonDatasetReader(snake_case_ , cache_dir=snake_case_ , keep_in_memory=snake_case_ ).read()
_check_json_dataset(snake_case_ , snake_case_ )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def A__ ( snake_case_ : Any , snake_case_ : List[Any] , snake_case_ : Tuple ):
SCREAMING_SNAKE_CASE__: int= tmp_path / '''cache'''
SCREAMING_SNAKE_CASE__: int= {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
SCREAMING_SNAKE_CASE__: Tuple= features.copy() if features else default_expected_features
SCREAMING_SNAKE_CASE__: Union[str, Any]= (
Features({feature: Value(snake_case_ ) for feature, dtype in features.items()} ) if features is not None else None
)
SCREAMING_SNAKE_CASE__: List[str]= JsonDatasetReader(snake_case_ , features=snake_case_ , cache_dir=snake_case_ ).read()
_check_json_dataset(snake_case_ , snake_case_ )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_3''': '''float64''', '''col_1''': '''string''', '''col_2''': '''int64'''},
] , )
def A__ ( snake_case_ : Dict , snake_case_ : int , snake_case_ : Any ):
SCREAMING_SNAKE_CASE__: str= tmp_path / '''cache'''
SCREAMING_SNAKE_CASE__: Optional[Any]= {'''col_3''': '''float64''', '''col_1''': '''string''', '''col_2''': '''int64'''}
SCREAMING_SNAKE_CASE__: Union[str, Any]= features.copy() if features else default_expected_features
SCREAMING_SNAKE_CASE__: int= (
Features({feature: Value(snake_case_ ) for feature, dtype in features.items()} ) if features is not None else None
)
SCREAMING_SNAKE_CASE__: Optional[Any]= JsonDatasetReader(snake_case_ , features=snake_case_ , cache_dir=snake_case_ ).read()
assert isinstance(snake_case_ , snake_case_ )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_3", "col_1", "col_2"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
def A__ ( snake_case_ : Tuple , snake_case_ : int ):
# jsonl_312_path features are {"col_3": "float64", "col_1": "string", "col_2": "int64"}
SCREAMING_SNAKE_CASE__: int= {'''col_2''': '''int64''', '''col_3''': '''float64''', '''col_1''': '''string'''}
SCREAMING_SNAKE_CASE__: Optional[Any]= features.copy()
SCREAMING_SNAKE_CASE__: Any= (
Features({feature: Value(snake_case_ ) for feature, dtype in features.items()} ) if features is not None else None
)
SCREAMING_SNAKE_CASE__: Optional[Any]= tmp_path / '''cache'''
SCREAMING_SNAKE_CASE__: Union[str, Any]= JsonDatasetReader(snake_case_ , features=snake_case_ , cache_dir=snake_case_ ).read()
assert isinstance(snake_case_ , snake_case_ )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_2", "col_3", "col_1"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def A__ ( snake_case_ : str , snake_case_ : Optional[int] , snake_case_ : Tuple ):
SCREAMING_SNAKE_CASE__: Tuple= tmp_path / '''cache'''
SCREAMING_SNAKE_CASE__: int= {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
SCREAMING_SNAKE_CASE__: Any= JsonDatasetReader(snake_case_ , cache_dir=snake_case_ , split=snake_case_ ).read()
_check_json_dataset(snake_case_ , snake_case_ )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('''path_type''' , [str, list] )
def A__ ( snake_case_ : Union[str, Any] , snake_case_ : Union[str, Any] , snake_case_ : List[Any] ):
if issubclass(snake_case_ , snake_case_ ):
SCREAMING_SNAKE_CASE__: str= jsonl_path
elif issubclass(snake_case_ , snake_case_ ):
SCREAMING_SNAKE_CASE__: int= [jsonl_path]
SCREAMING_SNAKE_CASE__: Optional[Any]= tmp_path / '''cache'''
SCREAMING_SNAKE_CASE__: Optional[int]= {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
SCREAMING_SNAKE_CASE__: Union[str, Any]= JsonDatasetReader(snake_case_ , cache_dir=snake_case_ ).read()
_check_json_dataset(snake_case_ , snake_case_ )
def A__ ( snake_case_ : Optional[Any] , snake_case_ : Union[str, Any] , snake_case_ : str=("train",) ):
assert isinstance(snake_case_ , snake_case_ )
for split in splits:
SCREAMING_SNAKE_CASE__: str= dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def A__ ( snake_case_ : List[Any] , snake_case_ : str , snake_case_ : Union[str, Any] ):
SCREAMING_SNAKE_CASE__: Tuple= tmp_path / '''cache'''
SCREAMING_SNAKE_CASE__: int= {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
SCREAMING_SNAKE_CASE__: int= JsonDatasetReader({'''train''': jsonl_path} , cache_dir=snake_case_ , keep_in_memory=snake_case_ ).read()
_check_json_datasetdict(snake_case_ , snake_case_ )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def A__ ( snake_case_ : Optional[Any] , snake_case_ : List[Any] , snake_case_ : List[Any] ):
SCREAMING_SNAKE_CASE__: Optional[int]= tmp_path / '''cache'''
SCREAMING_SNAKE_CASE__: Dict= {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
SCREAMING_SNAKE_CASE__: List[Any]= features.copy() if features else default_expected_features
SCREAMING_SNAKE_CASE__: Any= (
Features({feature: Value(snake_case_ ) for feature, dtype in features.items()} ) if features is not None else None
)
SCREAMING_SNAKE_CASE__: str= JsonDatasetReader({'''train''': jsonl_path} , features=snake_case_ , cache_dir=snake_case_ ).read()
_check_json_datasetdict(snake_case_ , snake_case_ )
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def A__ ( snake_case_ : str , snake_case_ : Any , snake_case_ : Tuple ):
if split:
SCREAMING_SNAKE_CASE__: Dict= {split: jsonl_path}
else:
SCREAMING_SNAKE_CASE__: Any= '''train'''
SCREAMING_SNAKE_CASE__: Any= {'''train''': jsonl_path, '''test''': jsonl_path}
SCREAMING_SNAKE_CASE__: int= tmp_path / '''cache'''
SCREAMING_SNAKE_CASE__: Dict= {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
SCREAMING_SNAKE_CASE__: Union[str, Any]= JsonDatasetReader(snake_case_ , cache_dir=snake_case_ ).read()
_check_json_datasetdict(snake_case_ , snake_case_ , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def A__ ( snake_case_ : List[str] ):
return json.load(snake_case_ )
def A__ ( snake_case_ : Dict ):
return [json.loads(snake_case_ ) for line in buffer]
class _lowerCamelCase :
@pytest.mark.parametrize('''lines, load_json_function''' , [(True, load_json_lines), (False, load_json)] )
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) -> Dict:
with io.BytesIO() as buffer:
JsonDatasetWriter(lowerCAmelCase , lowerCAmelCase , lines=lowerCAmelCase ).write()
buffer.seek(0 )
SCREAMING_SNAKE_CASE__: Optional[Any]= load_json_function(lowerCAmelCase )
assert isinstance(lowerCAmelCase , lowerCAmelCase )
assert isinstance(exported_content[0] , lowerCAmelCase )
assert len(lowerCAmelCase ) == 10
@pytest.mark.parametrize(
'''orient, container, keys, len_at''' , [
('''records''', list, {'''tokens''', '''labels''', '''answers''', '''id'''}, None),
('''split''', dict, {'''columns''', '''data'''}, '''data'''),
('''index''', dict, set('''0123456789''' ), None),
('''columns''', dict, {'''tokens''', '''labels''', '''answers''', '''id'''}, '''tokens'''),
('''values''', list, None, None),
('''table''', dict, {'''schema''', '''data'''}, '''data'''),
] , )
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) -> Optional[int]:
with io.BytesIO() as buffer:
JsonDatasetWriter(lowerCAmelCase , lowerCAmelCase , lines=lowerCAmelCase , orient=lowerCAmelCase ).write()
buffer.seek(0 )
SCREAMING_SNAKE_CASE__: int= load_json(lowerCAmelCase )
assert isinstance(lowerCAmelCase , lowerCAmelCase )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(lowerCAmelCase , '''keys''' ) and not hasattr(exported_content[0] , '''keys''' )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(lowerCAmelCase ) == 10
@pytest.mark.parametrize('''lines, load_json_function''' , [(True, load_json_lines), (False, load_json)] )
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) -> Any:
with io.BytesIO() as buffer:
JsonDatasetWriter(lowerCAmelCase , lowerCAmelCase , lines=lowerCAmelCase , num_proc=2 ).write()
buffer.seek(0 )
SCREAMING_SNAKE_CASE__: List[str]= load_json_function(lowerCAmelCase )
assert isinstance(lowerCAmelCase , lowerCAmelCase )
assert isinstance(exported_content[0] , lowerCAmelCase )
assert len(lowerCAmelCase ) == 10
@pytest.mark.parametrize(
'''orient, container, keys, len_at''' , [
('''records''', list, {'''tokens''', '''labels''', '''answers''', '''id'''}, None),
('''split''', dict, {'''columns''', '''data'''}, '''data'''),
('''index''', dict, set('''0123456789''' ), None),
('''columns''', dict, {'''tokens''', '''labels''', '''answers''', '''id'''}, '''tokens'''),
('''values''', list, None, None),
('''table''', dict, {'''schema''', '''data'''}, '''data'''),
] , )
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) -> int:
with io.BytesIO() as buffer:
JsonDatasetWriter(lowerCAmelCase , lowerCAmelCase , lines=lowerCAmelCase , orient=lowerCAmelCase , num_proc=2 ).write()
buffer.seek(0 )
SCREAMING_SNAKE_CASE__: Union[str, Any]= load_json(lowerCAmelCase )
assert isinstance(lowerCAmelCase , lowerCAmelCase )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(lowerCAmelCase , '''keys''' ) and not hasattr(exported_content[0] , '''keys''' )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(lowerCAmelCase ) == 10
def UpperCamelCase_ ( self , lowerCAmelCase ) -> int:
with pytest.raises(lowerCAmelCase ):
with io.BytesIO() as buffer:
JsonDatasetWriter(lowerCAmelCase , lowerCAmelCase , num_proc=0 )
@pytest.mark.parametrize('''compression, extension''' , [('''gzip''', '''gz'''), ('''bz2''', '''bz2'''), ('''xz''', '''xz''')] )
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) -> Optional[int]:
SCREAMING_SNAKE_CASE__: List[str]= tmp_path_factory.mktemp('''data''' ) / f'test.json.{extension}'
SCREAMING_SNAKE_CASE__: Dict= str(shared_datadir / f'test_file.json.{extension}' )
JsonDatasetWriter(lowerCAmelCase , lowerCAmelCase , compression=lowerCAmelCase ).write()
with fsspec.open(lowerCAmelCase , '''rb''' , compression='''infer''' ) as f:
SCREAMING_SNAKE_CASE__: Tuple= f.read()
with fsspec.open(lowerCAmelCase , '''rb''' , compression='''infer''' ) as f:
SCREAMING_SNAKE_CASE__: Dict= f.read()
assert exported_content == original_content
| 64
|
"""simple docstring"""
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all image processors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...image_processing_utils import ImageProcessingMixin
from ...utils import CONFIG_NAME, IMAGE_PROCESSOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
snake_case = logging.get_logger(__name__)
snake_case = OrderedDict(
[
('''align''', '''EfficientNetImageProcessor'''),
('''beit''', '''BeitImageProcessor'''),
('''bit''', '''BitImageProcessor'''),
('''blip''', '''BlipImageProcessor'''),
('''blip-2''', '''BlipImageProcessor'''),
('''bridgetower''', '''BridgeTowerImageProcessor'''),
('''chinese_clip''', '''ChineseCLIPImageProcessor'''),
('''clip''', '''CLIPImageProcessor'''),
('''clipseg''', '''ViTImageProcessor'''),
('''conditional_detr''', '''ConditionalDetrImageProcessor'''),
('''convnext''', '''ConvNextImageProcessor'''),
('''convnextv2''', '''ConvNextImageProcessor'''),
('''cvt''', '''ConvNextImageProcessor'''),
('''data2vec-vision''', '''BeitImageProcessor'''),
('''deformable_detr''', '''DeformableDetrImageProcessor'''),
('''deit''', '''DeiTImageProcessor'''),
('''deta''', '''DetaImageProcessor'''),
('''detr''', '''DetrImageProcessor'''),
('''dinat''', '''ViTImageProcessor'''),
('''donut-swin''', '''DonutImageProcessor'''),
('''dpt''', '''DPTImageProcessor'''),
('''efficientformer''', '''EfficientFormerImageProcessor'''),
('''efficientnet''', '''EfficientNetImageProcessor'''),
('''flava''', '''FlavaImageProcessor'''),
('''focalnet''', '''BitImageProcessor'''),
('''git''', '''CLIPImageProcessor'''),
('''glpn''', '''GLPNImageProcessor'''),
('''groupvit''', '''CLIPImageProcessor'''),
('''imagegpt''', '''ImageGPTImageProcessor'''),
('''instructblip''', '''BlipImageProcessor'''),
('''layoutlmv2''', '''LayoutLMv2ImageProcessor'''),
('''layoutlmv3''', '''LayoutLMv3ImageProcessor'''),
('''levit''', '''LevitImageProcessor'''),
('''mask2former''', '''Mask2FormerImageProcessor'''),
('''maskformer''', '''MaskFormerImageProcessor'''),
('''mgp-str''', '''ViTImageProcessor'''),
('''mobilenet_v1''', '''MobileNetV1ImageProcessor'''),
('''mobilenet_v2''', '''MobileNetV2ImageProcessor'''),
('''mobilevit''', '''MobileViTImageProcessor'''),
('''mobilevit''', '''MobileViTImageProcessor'''),
('''mobilevitv2''', '''MobileViTImageProcessor'''),
('''nat''', '''ViTImageProcessor'''),
('''oneformer''', '''OneFormerImageProcessor'''),
('''owlvit''', '''OwlViTImageProcessor'''),
('''perceiver''', '''PerceiverImageProcessor'''),
('''pix2struct''', '''Pix2StructImageProcessor'''),
('''poolformer''', '''PoolFormerImageProcessor'''),
('''regnet''', '''ConvNextImageProcessor'''),
('''resnet''', '''ConvNextImageProcessor'''),
('''sam''', '''SamImageProcessor'''),
('''segformer''', '''SegformerImageProcessor'''),
('''swiftformer''', '''ViTImageProcessor'''),
('''swin''', '''ViTImageProcessor'''),
('''swin2sr''', '''Swin2SRImageProcessor'''),
('''swinv2''', '''ViTImageProcessor'''),
('''table-transformer''', '''DetrImageProcessor'''),
('''timesformer''', '''VideoMAEImageProcessor'''),
('''tvlt''', '''TvltImageProcessor'''),
('''upernet''', '''SegformerImageProcessor'''),
('''van''', '''ConvNextImageProcessor'''),
('''videomae''', '''VideoMAEImageProcessor'''),
('''vilt''', '''ViltImageProcessor'''),
('''vit''', '''ViTImageProcessor'''),
('''vit_hybrid''', '''ViTHybridImageProcessor'''),
('''vit_mae''', '''ViTImageProcessor'''),
('''vit_msn''', '''ViTImageProcessor'''),
('''xclip''', '''CLIPImageProcessor'''),
('''yolos''', '''YolosImageProcessor'''),
]
)
snake_case = _LazyAutoMapping(CONFIG_MAPPING_NAMES, IMAGE_PROCESSOR_MAPPING_NAMES)
def snake_case ( lowerCAmelCase_ ) -> str:
for module_name, extractors in IMAGE_PROCESSOR_MAPPING_NAMES.items():
if class_name in extractors:
_snake_case = model_type_to_module_name(lowerCAmelCase_ )
_snake_case = importlib.import_module(f""".{module_name}""" , '''transformers.models''' )
try:
return getattr(lowerCAmelCase_ , lowerCAmelCase_ )
except AttributeError:
continue
for _, extractor in IMAGE_PROCESSOR_MAPPING._extra_content.items():
if getattr(lowerCAmelCase_ , '''__name__''' , lowerCAmelCase_ ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
_snake_case = importlib.import_module('''transformers''' )
if hasattr(lowerCAmelCase_ , lowerCAmelCase_ ):
return getattr(lowerCAmelCase_ , lowerCAmelCase_ )
return None
def snake_case ( lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = False , lowerCAmelCase_ = False , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = False , **lowerCAmelCase_ , ) -> int:
_snake_case = get_file_from_repo(
lowerCAmelCase_ , lowerCAmelCase_ , cache_dir=lowerCAmelCase_ , force_download=lowerCAmelCase_ , resume_download=lowerCAmelCase_ , proxies=lowerCAmelCase_ , use_auth_token=lowerCAmelCase_ , revision=lowerCAmelCase_ , local_files_only=lowerCAmelCase_ , )
if resolved_config_file is None:
logger.info(
'''Could not locate the image processor configuration file, will try to use the model config instead.''' )
return {}
with open(lowerCAmelCase_ , encoding='''utf-8''' ) as reader:
return json.load(lowerCAmelCase_ )
class UpperCAmelCase :
def __init__( self : Any ):
"""simple docstring"""
raise EnvironmentError(
'''AutoImageProcessor is designed to be instantiated '''
'''using the `AutoImageProcessor.from_pretrained(pretrained_model_name_or_path)` method.''' )
@classmethod
@replace_list_option_in_docstrings(__lowerCamelCase )
def __UpperCAmelCase ( cls : List[str] , __lowerCamelCase : List[str] , **__lowerCamelCase : Dict ):
"""simple docstring"""
_snake_case = kwargs.pop('''config''' , __lowerCamelCase )
_snake_case = kwargs.pop('''trust_remote_code''' , __lowerCamelCase )
_snake_case = True
_snake_case , _snake_case = ImageProcessingMixin.get_image_processor_dict(__lowerCamelCase , **__lowerCamelCase )
_snake_case = config_dict.get('''image_processor_type''' , __lowerCamelCase )
_snake_case = None
if "AutoImageProcessor" in config_dict.get('''auto_map''' , {} ):
_snake_case = config_dict['''auto_map''']['''AutoImageProcessor''']
# If we still don't have the image processor class, check if we're loading from a previous feature extractor config
# and if so, infer the image processor class from there.
if image_processor_class is None and image_processor_auto_map is None:
_snake_case = config_dict.pop('''feature_extractor_type''' , __lowerCamelCase )
if feature_extractor_class is not None:
logger.warning(
'''Could not find image processor class in the image processor config or the model config. Loading'''
''' based on pattern matching with the model\'s feature extractor configuration.''' )
_snake_case = feature_extractor_class.replace('''FeatureExtractor''' , '''ImageProcessor''' )
if "AutoFeatureExtractor" in config_dict.get('''auto_map''' , {} ):
_snake_case = config_dict['''auto_map''']['''AutoFeatureExtractor''']
_snake_case = feature_extractor_auto_map.replace('''FeatureExtractor''' , '''ImageProcessor''' )
logger.warning(
'''Could not find image processor auto map in the image processor config or the model config.'''
''' Loading based on pattern matching with the model\'s feature extractor configuration.''' )
# If we don't find the image processor class in the image processor config, let's try the model config.
if image_processor_class is None and image_processor_auto_map is None:
if not isinstance(__lowerCamelCase , __lowerCamelCase ):
_snake_case = AutoConfig.from_pretrained(__lowerCamelCase , **__lowerCamelCase )
# It could be in `config.image_processor_type``
_snake_case = getattr(__lowerCamelCase , '''image_processor_type''' , __lowerCamelCase )
if hasattr(__lowerCamelCase , '''auto_map''' ) and "AutoImageProcessor" in config.auto_map:
_snake_case = config.auto_map['''AutoImageProcessor''']
if image_processor_class is not None:
_snake_case = image_processor_class_from_name(__lowerCamelCase )
_snake_case = image_processor_auto_map is not None
_snake_case = image_processor_class is not None or type(__lowerCamelCase ) in IMAGE_PROCESSOR_MAPPING
_snake_case = resolve_trust_remote_code(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
if has_remote_code and trust_remote_code:
_snake_case = get_class_from_dynamic_module(
__lowerCamelCase , __lowerCamelCase , **__lowerCamelCase )
_snake_case = kwargs.pop('''code_revision''' , __lowerCamelCase )
if os.path.isdir(__lowerCamelCase ):
image_processor_class.register_for_auto_class()
return image_processor_class.from_dict(__lowerCamelCase , **__lowerCamelCase )
elif image_processor_class is not None:
return image_processor_class.from_dict(__lowerCamelCase , **__lowerCamelCase )
# Last try: we use the IMAGE_PROCESSOR_MAPPING.
elif type(__lowerCamelCase ) in IMAGE_PROCESSOR_MAPPING:
_snake_case = IMAGE_PROCESSOR_MAPPING[type(__lowerCamelCase )]
return image_processor_class.from_dict(__lowerCamelCase , **__lowerCamelCase )
raise ValueError(
f"""Unrecognized image processor in {pretrained_model_name_or_path}. Should have a """
f"""`image_processor_type` key in its {IMAGE_PROCESSOR_NAME} of {CONFIG_NAME}, or one of the following """
f"""`model_type` keys in its {CONFIG_NAME}: {', '.join(c for c in IMAGE_PROCESSOR_MAPPING_NAMES.keys() )}""" )
@staticmethod
def __UpperCAmelCase ( __lowerCamelCase : int , __lowerCamelCase : int ):
"""simple docstring"""
IMAGE_PROCESSOR_MAPPING.register(__lowerCamelCase , __lowerCamelCase )
| 103
| 0
|
'''simple docstring'''
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=_UpperCAmelCase )
class A ( _UpperCAmelCase ):
lowercase_ = field(default='summarization' ,metadata={'include_in_asdict_even_if_is_default': True} )
lowercase_ = Features({'text': Value('string' )} )
lowercase_ = Features({'summary': Value('string' )} )
lowercase_ = "text"
lowercase_ = "summary"
@property
def __lowerCAmelCase ( self : Dict ) -> Tuple:
"""simple docstring"""
return {self.text_column: "text", self.summary_column: "summary"}
| 704
|
'''simple docstring'''
import math
import unittest
def snake_case_ (UpperCamelCase : int ):
'''simple docstring'''
assert isinstance(UpperCamelCase , UpperCamelCase ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(UpperCamelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
class A ( unittest.TestCase ):
def __lowerCAmelCase ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
self.assertTrue(is_prime(2 ) )
self.assertTrue(is_prime(3 ) )
self.assertTrue(is_prime(5 ) )
self.assertTrue(is_prime(7 ) )
self.assertTrue(is_prime(11 ) )
self.assertTrue(is_prime(13 ) )
self.assertTrue(is_prime(17 ) )
self.assertTrue(is_prime(19 ) )
self.assertTrue(is_prime(23 ) )
self.assertTrue(is_prime(29 ) )
def __lowerCAmelCase ( self : Any ) -> Dict:
"""simple docstring"""
with self.assertRaises(lowerCAmelCase_ ):
is_prime(-19 )
self.assertFalse(
is_prime(0 ) , '''Zero doesn\'t have any positive factors, primes must have exactly two.''' , )
self.assertFalse(
is_prime(1 ) , '''One only has 1 positive factor, primes must have exactly two.''' , )
self.assertFalse(is_prime(2 * 2 ) )
self.assertFalse(is_prime(2 * 3 ) )
self.assertFalse(is_prime(3 * 3 ) )
self.assertFalse(is_prime(3 * 5 ) )
self.assertFalse(is_prime(3 * 5 * 7 ) )
if __name__ == "__main__":
unittest.main()
| 377
| 0
|
from typing import Any, Dict, Optional
import torch
import torch.nn.functional as F
from torch import nn
from ..utils import maybe_allow_in_graph
from .activations import get_activation
from .attention_processor import Attention
from .embeddings import CombinedTimestepLabelEmbeddings
@maybe_allow_in_graph
class _A ( nn.Module ):
'''simple docstring'''
def __init__( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_=0.0 ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = "geglu" ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = False ,SCREAMING_SNAKE_CASE_ = False ,SCREAMING_SNAKE_CASE_ = False ,SCREAMING_SNAKE_CASE_ = False ,SCREAMING_SNAKE_CASE_ = True ,SCREAMING_SNAKE_CASE_ = "layer_norm" ,SCREAMING_SNAKE_CASE_ = False ,):
'''simple docstring'''
super().__init__()
snake_case : Union[str, Any] = only_cross_attention
snake_case : List[Any] = (num_embeds_ada_norm is not None) and norm_type == """ada_norm_zero"""
snake_case : Optional[int] = (num_embeds_ada_norm is not None) and norm_type == """ada_norm"""
if norm_type in ("ada_norm", "ada_norm_zero") and num_embeds_ada_norm is None:
raise ValueError(
F"""`norm_type` is set to {norm_type}, but `num_embeds_ada_norm` is not defined. Please make sure to"""
F""" define `num_embeds_ada_norm` if setting `norm_type` to {norm_type}.""" )
# Define 3 blocks. Each block has its own normalization layer.
# 1. Self-Attn
if self.use_ada_layer_norm:
snake_case : Optional[int] = AdaLayerNorm(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
elif self.use_ada_layer_norm_zero:
snake_case : str = AdaLayerNormZero(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
else:
snake_case : List[Any] = nn.LayerNorm(SCREAMING_SNAKE_CASE_ ,elementwise_affine=SCREAMING_SNAKE_CASE_ )
snake_case : Optional[int] = Attention(
query_dim=SCREAMING_SNAKE_CASE_ ,heads=SCREAMING_SNAKE_CASE_ ,dim_head=SCREAMING_SNAKE_CASE_ ,dropout=SCREAMING_SNAKE_CASE_ ,bias=SCREAMING_SNAKE_CASE_ ,cross_attention_dim=cross_attention_dim if only_cross_attention else None ,upcast_attention=SCREAMING_SNAKE_CASE_ ,)
# 2. Cross-Attn
if cross_attention_dim is not None or double_self_attention:
# We currently only use AdaLayerNormZero for self attention where there will only be one attention block.
# I.e. the number of returned modulation chunks from AdaLayerZero would not make sense if returned during
# the second cross attention block.
snake_case : List[str] = (
AdaLayerNorm(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
if self.use_ada_layer_norm
else nn.LayerNorm(SCREAMING_SNAKE_CASE_ ,elementwise_affine=SCREAMING_SNAKE_CASE_ )
)
snake_case : Union[str, Any] = Attention(
query_dim=SCREAMING_SNAKE_CASE_ ,cross_attention_dim=cross_attention_dim if not double_self_attention else None ,heads=SCREAMING_SNAKE_CASE_ ,dim_head=SCREAMING_SNAKE_CASE_ ,dropout=SCREAMING_SNAKE_CASE_ ,bias=SCREAMING_SNAKE_CASE_ ,upcast_attention=SCREAMING_SNAKE_CASE_ ,) # is self-attn if encoder_hidden_states is none
else:
snake_case : str = None
snake_case : Any = None
# 3. Feed-forward
snake_case : Tuple = nn.LayerNorm(SCREAMING_SNAKE_CASE_ ,elementwise_affine=SCREAMING_SNAKE_CASE_ )
snake_case : Dict = FeedForward(SCREAMING_SNAKE_CASE_ ,dropout=SCREAMING_SNAKE_CASE_ ,activation_fn=SCREAMING_SNAKE_CASE_ ,final_dropout=SCREAMING_SNAKE_CASE_ )
# let chunk size default to None
snake_case : Optional[int] = None
snake_case : Optional[int] = 0
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
# Sets chunk feed-forward
snake_case : Any = chunk_size
snake_case : List[Any] = dim
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = None ,):
'''simple docstring'''
# Notice that normalization is always applied before the real computation in the following blocks.
# 1. Self-Attention
if self.use_ada_layer_norm:
snake_case : List[Any] = self.norma(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
elif self.use_ada_layer_norm_zero:
snake_case , snake_case , snake_case , snake_case , snake_case : int = self.norma(
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,hidden_dtype=hidden_states.dtype )
else:
snake_case : str = self.norma(SCREAMING_SNAKE_CASE_ )
snake_case : Dict = cross_attention_kwargs if cross_attention_kwargs is not None else {}
snake_case : Tuple = self.attna(
SCREAMING_SNAKE_CASE_ ,encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None ,attention_mask=SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ ,)
if self.use_ada_layer_norm_zero:
snake_case : Tuple = gate_msa.unsqueeze(1 ) * attn_output
snake_case : Dict = attn_output + hidden_states
# 2. Cross-Attention
if self.attna is not None:
snake_case : int = (
self.norma(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ) if self.use_ada_layer_norm else self.norma(SCREAMING_SNAKE_CASE_ )
)
snake_case : Any = self.attna(
SCREAMING_SNAKE_CASE_ ,encoder_hidden_states=SCREAMING_SNAKE_CASE_ ,attention_mask=SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ ,)
snake_case : Optional[Any] = attn_output + hidden_states
# 3. Feed-forward
snake_case : Dict = self.norma(SCREAMING_SNAKE_CASE_ )
if self.use_ada_layer_norm_zero:
snake_case : int = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None]
if self._chunk_size is not None:
# "feed_forward_chunk_size" can be used to save memory
if norm_hidden_states.shape[self._chunk_dim] % self._chunk_size != 0:
raise ValueError(
F"""`hidden_states` dimension to be chunked: {norm_hidden_states.shape[self._chunk_dim]} has to be divisible by chunk size: {self._chunk_size}. Make sure to set an appropriate `chunk_size` when calling `unet.enable_forward_chunking`.""" )
snake_case : Dict = norm_hidden_states.shape[self._chunk_dim] // self._chunk_size
snake_case : Union[str, Any] = torch.cat(
[self.ff(SCREAMING_SNAKE_CASE_ ) for hid_slice in norm_hidden_states.chunk(SCREAMING_SNAKE_CASE_ ,dim=self._chunk_dim )] ,dim=self._chunk_dim ,)
else:
snake_case : int = self.ff(SCREAMING_SNAKE_CASE_ )
if self.use_ada_layer_norm_zero:
snake_case : List[Any] = gate_mlp.unsqueeze(1 ) * ff_output
snake_case : Tuple = ff_output + hidden_states
return hidden_states
class _A ( nn.Module ):
'''simple docstring'''
def __init__( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = 4 ,SCREAMING_SNAKE_CASE_ = 0.0 ,SCREAMING_SNAKE_CASE_ = "geglu" ,SCREAMING_SNAKE_CASE_ = False ,):
'''simple docstring'''
super().__init__()
snake_case : List[Any] = int(dim * mult )
snake_case : int = dim_out if dim_out is not None else dim
if activation_fn == "gelu":
snake_case : Optional[Any] = GELU(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
if activation_fn == "gelu-approximate":
snake_case : List[str] = GELU(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,approximate="""tanh""" )
elif activation_fn == "geglu":
snake_case : str = GEGLU(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
elif activation_fn == "geglu-approximate":
snake_case : str = ApproximateGELU(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
snake_case : List[Any] = nn.ModuleList([] )
# project in
self.net.append(SCREAMING_SNAKE_CASE_ )
# project dropout
self.net.append(nn.Dropout(SCREAMING_SNAKE_CASE_ ) )
# project out
self.net.append(nn.Linear(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ) )
# FF as used in Vision Transformer, MLP-Mixer, etc. have a final dropout
if final_dropout:
self.net.append(nn.Dropout(SCREAMING_SNAKE_CASE_ ) )
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
for module in self.net:
snake_case : str = module(SCREAMING_SNAKE_CASE_ )
return hidden_states
class _A ( nn.Module ):
'''simple docstring'''
def __init__( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = "none" ):
'''simple docstring'''
super().__init__()
snake_case : Optional[Any] = nn.Linear(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
snake_case : Optional[int] = approximate
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
if gate.device.type != "mps":
return F.gelu(SCREAMING_SNAKE_CASE_ ,approximate=self.approximate )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ) ,approximate=self.approximate ).to(dtype=gate.dtype )
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case : str = self.proj(SCREAMING_SNAKE_CASE_ )
snake_case : Dict = self.gelu(SCREAMING_SNAKE_CASE_ )
return hidden_states
class _A ( nn.Module ):
'''simple docstring'''
def __init__( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
super().__init__()
snake_case : Dict = nn.Linear(SCREAMING_SNAKE_CASE_ ,dim_out * 2 )
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
if gate.device.type != "mps":
return F.gelu(SCREAMING_SNAKE_CASE_ )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ) ).to(dtype=gate.dtype )
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case , snake_case : Optional[int] = self.proj(SCREAMING_SNAKE_CASE_ ).chunk(2 ,dim=-1 )
return hidden_states * self.gelu(SCREAMING_SNAKE_CASE_ )
class _A ( nn.Module ):
'''simple docstring'''
def __init__( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
super().__init__()
snake_case : str = nn.Linear(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case : Dict = self.proj(SCREAMING_SNAKE_CASE_ )
return x * torch.sigmoid(1.7_02 * x )
class _A ( nn.Module ):
'''simple docstring'''
def __init__( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
super().__init__()
snake_case : List[Any] = nn.Embedding(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
snake_case : List[str] = nn.SiLU()
snake_case : str = nn.Linear(SCREAMING_SNAKE_CASE_ ,embedding_dim * 2 )
snake_case : str = nn.LayerNorm(SCREAMING_SNAKE_CASE_ ,elementwise_affine=SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case : int = self.linear(self.silu(self.emb(SCREAMING_SNAKE_CASE_ ) ) )
snake_case , snake_case : Tuple = torch.chunk(SCREAMING_SNAKE_CASE_ ,2 )
snake_case : int = self.norm(SCREAMING_SNAKE_CASE_ ) * (1 + scale) + shift
return x
class _A ( nn.Module ):
'''simple docstring'''
def __init__( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
super().__init__()
snake_case : int = CombinedTimestepLabelEmbeddings(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
snake_case : Dict = nn.SiLU()
snake_case : Dict = nn.Linear(SCREAMING_SNAKE_CASE_ ,6 * embedding_dim ,bias=SCREAMING_SNAKE_CASE_ )
snake_case : Optional[Any] = nn.LayerNorm(SCREAMING_SNAKE_CASE_ ,elementwise_affine=SCREAMING_SNAKE_CASE_ ,eps=1E-6 )
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_=None ):
'''simple docstring'''
snake_case : Any = self.linear(self.silu(self.emb(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,hidden_dtype=SCREAMING_SNAKE_CASE_ ) ) )
snake_case , snake_case , snake_case , snake_case , snake_case , snake_case : Optional[Any] = emb.chunk(6 ,dim=1 )
snake_case : int = self.norm(SCREAMING_SNAKE_CASE_ ) * (1 + scale_msa[:, None]) + shift_msa[:, None]
return x, gate_msa, shift_mlp, scale_mlp, gate_mlp
class _A ( nn.Module ):
'''simple docstring'''
def __init__( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = 1E-5 ):
'''simple docstring'''
super().__init__()
snake_case : Optional[Any] = num_groups
snake_case : Dict = eps
if act_fn is None:
snake_case : Tuple = None
else:
snake_case : Tuple = get_activation(SCREAMING_SNAKE_CASE_ )
snake_case : Tuple = nn.Linear(SCREAMING_SNAKE_CASE_ ,out_dim * 2 )
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
if self.act:
snake_case : int = self.act(SCREAMING_SNAKE_CASE_ )
snake_case : List[Any] = self.linear(SCREAMING_SNAKE_CASE_ )
snake_case : Optional[int] = emb[:, :, None, None]
snake_case , snake_case : Optional[int] = emb.chunk(2 ,dim=1 )
snake_case : Union[str, Any] = F.group_norm(SCREAMING_SNAKE_CASE_ ,self.num_groups ,eps=self.eps )
snake_case : List[Any] = x * (1 + scale) + shift
return x
| 36
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowercase : Any = logging.get_logger(__name__)
__lowercase : str = {
'''google/mobilenet_v1_1.0_224''': '''https://huggingface.co/google/mobilenet_v1_1.0_224/resolve/main/config.json''',
'''google/mobilenet_v1_0.75_192''': '''https://huggingface.co/google/mobilenet_v1_0.75_192/resolve/main/config.json''',
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
}
class _A ( snake_case ):
'''simple docstring'''
__lowerCamelCase : Dict = '''mobilenet_v1'''
def __init__( self ,SCREAMING_SNAKE_CASE_=3 ,SCREAMING_SNAKE_CASE_=224 ,SCREAMING_SNAKE_CASE_=1.0 ,SCREAMING_SNAKE_CASE_=8 ,SCREAMING_SNAKE_CASE_="relu6" ,SCREAMING_SNAKE_CASE_=True ,SCREAMING_SNAKE_CASE_=0.9_99 ,SCREAMING_SNAKE_CASE_=0.02 ,SCREAMING_SNAKE_CASE_=0.0_01 ,**SCREAMING_SNAKE_CASE_ ,):
'''simple docstring'''
super().__init__(**SCREAMING_SNAKE_CASE_ )
if depth_multiplier <= 0:
raise ValueError("""depth_multiplier must be greater than zero.""" )
snake_case : List[Any] = num_channels
snake_case : str = image_size
snake_case : List[Any] = depth_multiplier
snake_case : Optional[int] = min_depth
snake_case : Union[str, Any] = hidden_act
snake_case : int = tf_padding
snake_case : Optional[int] = classifier_dropout_prob
snake_case : Tuple = initializer_range
snake_case : List[str] = layer_norm_eps
class _A ( snake_case ):
'''simple docstring'''
__lowerCamelCase : Optional[Any] = version.parse('''1.11''' )
@property
def snake_case_ ( self ):
'''simple docstring'''
return OrderedDict([("""pixel_values""", {0: """batch"""})] )
@property
def snake_case_ ( self ):
'''simple docstring'''
if self.task == "image-classification":
return OrderedDict([("""logits""", {0: """batch"""})] )
else:
return OrderedDict([("""last_hidden_state""", {0: """batch"""}), ("""pooler_output""", {0: """batch"""})] )
@property
def snake_case_ ( self ):
'''simple docstring'''
return 1E-4
| 36
| 1
|
"""simple docstring"""
import os
import pytest
import yaml
from datasets.features.features import Features, Value
from datasets.info import DatasetInfo, DatasetInfosDict
@pytest.mark.parametrize(
"files" , [
["full:README.md", "dataset_infos.json"],
["empty:README.md", "dataset_infos.json"],
["dataset_infos.json"],
["full:README.md"],
] , )
def lowerCAmelCase ( UpperCamelCase_: str , UpperCamelCase_: Any ) -> str:
'''simple docstring'''
_a = tmp_path_factory.mktemp("dset_infos_dir" )
if "full:README.md" in files:
with open(dataset_infos_dir / "README.md" , "w" ) as f:
f.write("---\ndataset_info:\n dataset_size: 42\n---" )
if "empty:README.md" in files:
with open(dataset_infos_dir / "README.md" , "w" ) as f:
f.write("" )
# we want to support dataset_infos.json for backward compatibility
if "dataset_infos.json" in files:
with open(dataset_infos_dir / "dataset_infos.json" , "w" ) as f:
f.write("{\"default\": {\"dataset_size\": 42}}" )
_a = DatasetInfosDict.from_directory(UpperCamelCase_ )
assert dataset_infos
assert dataset_infos["default"].dataset_size == 42
@pytest.mark.parametrize(
"dataset_info" , [
DatasetInfo(),
DatasetInfo(
description="foo" , features=Features({"a": Value("int32" )} ) , builder_name="builder" , config_name="config" , version="1.0.0" , splits=[{"name": "train"}] , download_size=42 , ),
] , )
def lowerCAmelCase ( UpperCamelCase_: Dict , UpperCamelCase_: DatasetInfo ) -> Dict:
'''simple docstring'''
_a = str(UpperCamelCase_ )
dataset_info.write_to_directory(UpperCamelCase_ )
_a = DatasetInfo.from_directory(UpperCamelCase_ )
assert dataset_info == reloaded
assert os.path.exists(os.path.join(UpperCamelCase_ , "dataset_info.json" ) )
def lowerCAmelCase ( ) -> Optional[Any]:
'''simple docstring'''
_a = DatasetInfo(
description="foo" , citation="bar" , homepage="https://foo.bar" , license="CC0" , features=Features({"a": Value("int32" )} ) , post_processed={} , supervised_keys=() , task_templates=[] , builder_name="builder" , config_name="config" , version="1.0.0" , splits=[{"name": "train", "num_examples": 42}] , download_checksums={} , download_size=1337 , post_processing_size=442 , dataset_size=1234 , size_in_bytes=1337 + 442 + 1234 , )
_a = dataset_info._to_yaml_dict()
assert sorted(UpperCamelCase_ ) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML )
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
assert key in dataset_info_yaml_dict
assert isinstance(dataset_info_yaml_dict[key] , (list, dict, int, str) )
_a = yaml.safe_dump(UpperCamelCase_ )
_a = yaml.safe_load(UpperCamelCase_ )
assert dataset_info_yaml_dict == reloaded
def lowerCAmelCase ( ) -> List[str]:
'''simple docstring'''
_a = DatasetInfo()
_a = dataset_info._to_yaml_dict()
assert dataset_info_yaml_dict == {}
@pytest.mark.parametrize(
"dataset_infos_dict" , [
DatasetInfosDict(),
DatasetInfosDict({"default": DatasetInfo()} ),
DatasetInfosDict({"my_config_name": DatasetInfo()} ),
DatasetInfosDict(
{
"default": DatasetInfo(
description="foo" , features=Features({"a": Value("int32" )} ) , builder_name="builder" , config_name="config" , version="1.0.0" , splits=[{"name": "train"}] , download_size=42 , )
} ),
DatasetInfosDict(
{
"v1": DatasetInfo(dataset_size=42 ),
"v2": DatasetInfo(dataset_size=1337 ),
} ),
] , )
def lowerCAmelCase ( UpperCamelCase_: str , UpperCamelCase_: DatasetInfosDict ) -> Optional[Any]:
'''simple docstring'''
_a = str(UpperCamelCase_ )
dataset_infos_dict.write_to_directory(UpperCamelCase_ )
_a = DatasetInfosDict.from_directory(UpperCamelCase_ )
# the config_name of the dataset_infos_dict take over the attribute
for config_name, dataset_info in dataset_infos_dict.items():
_a = config_name
# the yaml representation doesn't include fields like description or citation
# so we just test that we can recover what we can from the yaml
_a = DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict() )
assert dataset_infos_dict == reloaded
if dataset_infos_dict:
assert os.path.exists(os.path.join(UpperCamelCase_ , "README.md" ) )
| 721
|
"""simple docstring"""
import argparse
import json
import os
from collections import OrderedDict
import numpy as np
import tensorflow as tf
import torch
def lowerCAmelCase ( UpperCamelCase_: Dict ) -> Any:
'''simple docstring'''
_a = os.path.join(args.tf_model_dir , "parameters.json" )
_a = json.loads(open(UpperCamelCase_ ).read() )
if not params:
raise ValueError(
f'''It seems that the json file at {parameter_file} is empty. Make sure you have a correct json file.''' )
if not args.output.endswith(".pt" ):
_a = args.output + ".pt"
_a = OrderedDict()
with tf.device("/CPU:0" ):
_a = tf.train.load_checkpoint(args.tf_model_dir )
_a = reader.get_variable_to_shape_map()
for key_name in shapes.keys():
_a = reader.get_tensor(UpperCamelCase_ ).astype(np.floataa )
if key_name.endswith("/adam_m" ) or key_name.endswith("/adam_v" ):
continue
if key_name.startswith("pasts/" ):
if key_name.startswith("pasts/mlp" ):
_a = int(key_name[9] )
elif key_name.startswith("pasts/out" ):
_a = 8
_a = "model.sqout.%d.weight" % (player * 2) # enter to nn.Sequencial with Tanh, so 2 at a time
_a = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_a = torch.tensor(UpperCamelCase_ )
elif key_name.startswith("model/moe" ):
_a = int(key_name[9:].split("/" )[0] )
if key_name.endswith("/switch_gating/kernel" ):
_a = "model.blocks.%d.feed_forward.mlp.router.classifier.weight" % player
_a = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_a = torch.tensor(UpperCamelCase_ )
elif key_name.endswith("/softmlp/kernel" ):
_a = "model.blocks.%d.feed_forward.soft_bypass_mlp.weight" % player
_a = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_a = torch.tensor(UpperCamelCase_ )
elif key_name.endswith("/wo/kernel" ) or key_name.endswith("/wi/kernel" ):
_a = key_name[-9:-7]
for i in range(16 ):
_a = "model.blocks.%d.feed_forward.mlp.experts.expert_%d.%s.weight" % (player, i, nlayer)
_a = (
vnp[i].transpose([1, 0] ).copy()
) # In Mesh-Tensorflow, it is one array, so it is divided
_a = torch.tensor(UpperCamelCase_ )
elif key_name.startswith("model/mlp" ):
_a = int(key_name[9:].split("/" )[0] )
if key_name.endswith("/p1/kernel" ):
_a = "model.blocks.%d.feed_forward.mlp.wi.weight" % player
_a = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_a = torch.tensor(UpperCamelCase_ )
elif key_name.endswith("/p1/bias" ):
_a = "model.blocks.%d.feed_forward.mlp.wi.bias" % player
_a = vnp.copy() # same because it is one dimensional
_a = torch.tensor(UpperCamelCase_ )
elif key_name.endswith("/p2/kernel" ):
_a = "model.blocks.%d.feed_forward.mlp.wo.weight" % player
_a = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_a = torch.tensor(UpperCamelCase_ )
elif key_name.endswith("/p2/bias" ):
_a = "model.blocks.%d.feed_forward.mlp.wo.bias" % player
_a = vnp.copy() # same because it is one dimensional
_a = torch.tensor(UpperCamelCase_ )
elif key_name.startswith("model/ln" ):
_a = int(key_name[8:].split("/" )[0] )
if key_name.endswith("/b" ):
_a = "model.blocks.%d.feed_forward.norm.bias" % player
_a = vnp.copy() # same because it is one dimensional
_a = torch.tensor(UpperCamelCase_ )
elif key_name.endswith("/g" ):
_a = "model.blocks.%d.feed_forward.norm.weight" % player
_a = vnp.copy() # same because it is one dimensional
_a = torch.tensor(UpperCamelCase_ )
elif key_name.startswith("model/att" ):
_a = int(key_name[9:].split("/" )[0] )
if key_name.endswith("/qkv/kernel" ):
_a = vnp.copy() # Compute same dimension as Mesh-tensorflow using einsum
_a = state[:, 0, :, :]
_a = state[:, 1, :, :]
_a = state[:, 2, :, :]
_a = (
state_q.reshape([state_q.shape[0], state_q.shape[1] * state_q.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
_a = (
state_k.reshape([state_k.shape[0], state_k.shape[1] * state_k.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
_a = (
state_v.reshape([state_v.shape[0], state_v.shape[1] * state_v.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
_a = "model.blocks.%d.self_attn.self_attn.q_proj.weight" % player
_a = torch.tensor(UpperCamelCase_ )
_a = "model.blocks.%d.self_attn.self_attn.k_proj.weight" % player
_a = torch.tensor(UpperCamelCase_ )
_a = "model.blocks.%d.self_attn.self_attn.v_proj.weight" % player
_a = torch.tensor(UpperCamelCase_ )
elif key_name.endswith("/o/kernel" ):
_a = "model.blocks.%d.self_attn.self_attn.out_proj.weight" % player
_a = (
vnp.reshape([vnp.shape[0] * vnp.shape[1], vnp.shape[2]] ).transpose([1, 0] ).copy()
) # Mesh-Tensorflow is a diagonal matrix
_a = torch.tensor(UpperCamelCase_ )
elif key_name.startswith("model/an" ):
_a = int(key_name[8:].split("/" )[0] )
if key_name.endswith("/b" ):
_a = "model.blocks.%d.self_attn.norm.bias" % player
_a = vnp.copy() # same because it is one dimensional
_a = torch.tensor(UpperCamelCase_ )
elif key_name.endswith("/g" ):
_a = "model.blocks.%d.self_attn.norm.weight" % player
_a = vnp.copy() # same because it is one dimensional
_a = torch.tensor(UpperCamelCase_ )
elif (
key_name.startswith("model/wte" )
or key_name.startswith("model/wpe" )
or key_name.startswith("model/ete" )
):
_a = {"wte": "embed_tokens", "wpe": "position_embeddings", "ete": "extra_position_embeddings"}[
key_name[-3:]
]
_a = "model.%s.weight" % nlayer
_a = vnp.copy() # same in embedded
_a = torch.tensor(UpperCamelCase_ )
if key_name.startswith("model/wte" ):
_a = "lm_head.weight"
_a = vnp.copy() # same in embedded
_a = torch.tensor(UpperCamelCase_ )
elif key_name.startswith("model/wob" ):
_a = "final_logits_bias"
_a = vnp.copy() # same in embedded
_a = state.reshape((1, -1) )
_a = torch.tensor(UpperCamelCase_ )
elif key_name == "model/dense/kernel":
_a = "model.last_project.weight"
_a = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_a = torch.tensor(UpperCamelCase_ )
elif key_name == "model/dense_1/bias":
_a = "model.last_project.bias"
_a = vnp.copy() # same because it is one dimensional
_a = torch.tensor(UpperCamelCase_ )
torch.save(UpperCamelCase_ , args.output )
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser(
description="""model converter.""", formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument("""--tf_model_dir""", metavar="""PATH""", type=str, required=True, help="""import model""")
parser.add_argument("""--output""", metavar="""PATH""", type=str, required=True, help="""output model""")
UpperCamelCase = parser.parse_args()
convert_tf_gptsan_to_pt(args)
| 612
| 0
|
'''simple docstring'''
lowerCAmelCase_ : Tuple = '\n# Transformers 설치 방법\n! pip install transformers datasets\n# 마지막 릴리스 대신 소스에서 설치하려면, 위 명령을 주석으로 바꾸고 아래 명령을 해제하세요.\n# ! pip install git+https://github.com/huggingface/transformers.git\n'
lowerCAmelCase_ : Optional[Any] = [{'type': 'code', 'content': INSTALL_CONTENT}]
lowerCAmelCase_ : Tuple = {
'{processor_class}': 'FakeProcessorClass',
'{model_class}': 'FakeModelClass',
'{object_class}': 'FakeObjectClass',
}
| 527
|
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConfig,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaForPreTraining,
WavaVecaProcessor,
logging,
)
from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification
logging.set_verbosity_info()
lowerCAmelCase_ : Optional[int] = logging.get_logger(__name__)
lowerCAmelCase_ : List[str] = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'adapter_layer': 'encoder.layers.*.adapter_layer',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
'pooling_layer.linear': 'projector',
'pooling_layer.projection': 'classifier',
}
lowerCAmelCase_ : Any = [
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
'projector',
'classifier',
]
def UpperCAmelCase ( A : Dict ):
SCREAMING_SNAKE_CASE : Optional[Any] = {}
with open(A , '''r''' ) as file:
for line_number, line in enumerate(A ):
SCREAMING_SNAKE_CASE : Dict = line.strip()
if line:
SCREAMING_SNAKE_CASE : Union[str, Any] = line.split()
SCREAMING_SNAKE_CASE : Any = line_number
SCREAMING_SNAKE_CASE : Any = words[0]
SCREAMING_SNAKE_CASE : Optional[Any] = value
return result
def UpperCAmelCase ( A : Union[str, Any] , A : Tuple , A : List[Any] , A : str , A : int ):
for attribute in key.split('''.''' ):
SCREAMING_SNAKE_CASE : Optional[Any] = getattr(A , A )
SCREAMING_SNAKE_CASE : Any = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(A ):
SCREAMING_SNAKE_CASE : List[str] = PARAM_MAPPING[full_name.split('''.''' )[-1]]
SCREAMING_SNAKE_CASE : Tuple = '''param'''
if weight_type is not None and weight_type != "param":
SCREAMING_SNAKE_CASE : List[Any] = getattr(A , A ).shape
elif weight_type is not None and weight_type == "param":
SCREAMING_SNAKE_CASE : List[str] = hf_pointer
for attribute in hf_param_name.split('''.''' ):
SCREAMING_SNAKE_CASE : int = getattr(A , A )
SCREAMING_SNAKE_CASE : int = shape_pointer.shape
# let's reduce dimension
SCREAMING_SNAKE_CASE : Dict = value[0]
else:
SCREAMING_SNAKE_CASE : Optional[int] = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}""" )
if weight_type == "weight":
SCREAMING_SNAKE_CASE : str = value
elif weight_type == "weight_g":
SCREAMING_SNAKE_CASE : List[str] = value
elif weight_type == "weight_v":
SCREAMING_SNAKE_CASE : Optional[int] = value
elif weight_type == "bias":
SCREAMING_SNAKE_CASE : Dict = value
elif weight_type == "param":
for attribute in hf_param_name.split('''.''' ):
SCREAMING_SNAKE_CASE : List[str] = getattr(A , A )
SCREAMING_SNAKE_CASE : Optional[Any] = value
else:
SCREAMING_SNAKE_CASE : Tuple = value
logger.info(F"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def UpperCAmelCase ( A : Optional[Any] , A : List[str] , A : Optional[int] , A : Tuple , A : Tuple ):
SCREAMING_SNAKE_CASE : List[str] = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(A ):
SCREAMING_SNAKE_CASE : List[str] = PARAM_MAPPING[full_name.split('''.''' )[-1]]
SCREAMING_SNAKE_CASE : List[Any] = '''param'''
if weight_type is not None and weight_type != "param":
SCREAMING_SNAKE_CASE : str = '''.'''.join([key, weight_type] )
elif weight_type is not None and weight_type == "param":
SCREAMING_SNAKE_CASE : Tuple = '''.'''.join([key, hf_param_name] )
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = key
SCREAMING_SNAKE_CASE : int = value if '''lm_head''' in full_key else value[0]
lowerCAmelCase_ : Union[str, Any] = {
'W_a': 'linear_1.weight',
'W_b': 'linear_2.weight',
'b_a': 'linear_1.bias',
'b_b': 'linear_2.bias',
'ln_W': 'norm.weight',
'ln_b': 'norm.bias',
}
def UpperCAmelCase ( A : List[Any] , A : Tuple , A : Dict=None , A : Optional[int]=None ):
SCREAMING_SNAKE_CASE : Union[str, Any] = False
for key, mapped_key in MAPPING.items():
SCREAMING_SNAKE_CASE : Optional[Any] = '''wav2vec2.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
SCREAMING_SNAKE_CASE : Optional[int] = True
if "*" in mapped_key:
SCREAMING_SNAKE_CASE : int = name.split(A )[0].split('''.''' )[-2]
SCREAMING_SNAKE_CASE : Union[str, Any] = mapped_key.replace('''*''' , A )
if "weight_g" in name:
SCREAMING_SNAKE_CASE : List[Any] = '''weight_g'''
elif "weight_v" in name:
SCREAMING_SNAKE_CASE : int = '''weight_v'''
elif "bias" in name:
SCREAMING_SNAKE_CASE : str = '''bias'''
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
SCREAMING_SNAKE_CASE : Optional[Any] = '''weight'''
else:
SCREAMING_SNAKE_CASE : Optional[int] = None
if hf_dict is not None:
rename_dict(A , A , A , A , A )
else:
set_recursively(A , A , A , A , A )
return is_used
return is_used
def UpperCAmelCase ( A : Optional[int] , A : Any , A : int ):
SCREAMING_SNAKE_CASE : List[str] = []
SCREAMING_SNAKE_CASE : Any = fairseq_model.state_dict()
SCREAMING_SNAKE_CASE : List[str] = hf_model.wavaveca.feature_extractor
for name, value in fairseq_dict.items():
SCREAMING_SNAKE_CASE : str = False
if "conv_layers" in name:
load_conv_layer(
A , A , A , A , hf_model.config.feat_extract_norm == '''group''' , )
SCREAMING_SNAKE_CASE : List[str] = True
else:
SCREAMING_SNAKE_CASE : Optional[Any] = load_wavaveca_layer(A , A , A )
if not is_used:
unused_weights.append(A )
logger.warning(F"""Unused weights: {unused_weights}""" )
def UpperCAmelCase ( A : Tuple , A : int , A : Tuple , A : int , A : Union[str, Any] ):
SCREAMING_SNAKE_CASE : str = full_name.split('''conv_layers.''' )[-1]
SCREAMING_SNAKE_CASE : Optional[Any] = name.split('''.''' )
SCREAMING_SNAKE_CASE : Tuple = int(items[0] )
SCREAMING_SNAKE_CASE : int = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
SCREAMING_SNAKE_CASE : Union[str, Any] = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
SCREAMING_SNAKE_CASE : str = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" )
SCREAMING_SNAKE_CASE : Tuple = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" )
SCREAMING_SNAKE_CASE : Tuple = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(A )
@torch.no_grad()
def UpperCAmelCase ( A : Any , A : Union[str, Any] , A : Optional[Any]=None , A : Any=None , A : Any=True , A : str=False ):
if config_path is not None:
SCREAMING_SNAKE_CASE : List[Any] = WavaVecaConfig.from_pretrained(A )
else:
SCREAMING_SNAKE_CASE : Any = WavaVecaConfig()
if is_seq_class:
SCREAMING_SNAKE_CASE : List[Any] = read_txt_into_dict(A )
SCREAMING_SNAKE_CASE : Optional[Any] = idalabel
SCREAMING_SNAKE_CASE : Tuple = WavaVecaForSequenceClassification(A )
SCREAMING_SNAKE_CASE : Tuple = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=A , return_attention_mask=A , )
feature_extractor.save_pretrained(A )
elif is_finetuned:
if dict_path:
SCREAMING_SNAKE_CASE : Optional[Any] = Dictionary.load(A )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
SCREAMING_SNAKE_CASE : List[str] = target_dict.pad_index
SCREAMING_SNAKE_CASE : Optional[Any] = target_dict.bos_index
SCREAMING_SNAKE_CASE : Dict = target_dict.eos_index
SCREAMING_SNAKE_CASE : Dict = len(target_dict.symbols )
SCREAMING_SNAKE_CASE : str = os.path.join(A , '''vocab.json''' )
if not os.path.isdir(A ):
logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(A ) )
return
os.makedirs(A , exist_ok=A )
SCREAMING_SNAKE_CASE : Optional[Any] = target_dict.indices
# fairseq has the <pad> and <s> switched
SCREAMING_SNAKE_CASE : str = 0
SCREAMING_SNAKE_CASE : Tuple = 1
with open(A , '''w''' , encoding='''utf-8''' ) as vocab_handle:
json.dump(A , A )
SCREAMING_SNAKE_CASE : Any = WavaVecaCTCTokenizer(
A , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=A , )
SCREAMING_SNAKE_CASE : Optional[Any] = True if config.feat_extract_norm == '''layer''' else False
SCREAMING_SNAKE_CASE : Optional[Any] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=A , return_attention_mask=A , )
SCREAMING_SNAKE_CASE : Any = WavaVecaProcessor(feature_extractor=A , tokenizer=A )
processor.save_pretrained(A )
SCREAMING_SNAKE_CASE : List[str] = WavaVecaForCTC(A )
else:
SCREAMING_SNAKE_CASE : Optional[Any] = WavaVecaForPreTraining(A )
if is_finetuned or is_seq_class:
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Optional[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
else:
SCREAMING_SNAKE_CASE : str = argparse.Namespace(task='''audio_pretraining''' )
SCREAMING_SNAKE_CASE : Dict = fairseq.tasks.setup_task(A )
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : List[str] = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=A )
SCREAMING_SNAKE_CASE : List[Any] = model[0].eval()
recursively_load_weights(A , A , not is_finetuned )
hf_wavavec.save_pretrained(A )
if __name__ == "__main__":
lowerCAmelCase_ : List[Any] = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
parser.add_argument(
'--is_seq_class',
action='store_true',
help='Whether the model to convert is a fine-tuned sequence classification model or not',
)
lowerCAmelCase_ : Union[str, Any] = parser.parse_args()
lowerCAmelCase_ : Dict = not args.not_finetuned and not args.is_seq_class
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.dict_path,
is_finetuned,
args.is_seq_class,
)
| 527
| 1
|
"""simple docstring"""
import itertools
import math
def __magic_name__ ( __snake_case : int ) -> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__snake_case ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def __magic_name__ ( ) -> int:
lowercase : Optional[Any] = 2
while True:
if is_prime(__snake_case ):
yield num
num += 1
def __magic_name__ ( __snake_case : int = 1_0001 ) -> int:
return next(itertools.islice(prime_generator() , nth - 1 , __snake_case ) )
if __name__ == "__main__":
print(F"{solution() = }")
| 717
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A : int = logging.get_logger(__name__)
_A : int = {
"""alibaba-damo/mgp-str-base""": """https://huggingface.co/alibaba-damo/mgp-str-base/resolve/main/config.json""",
}
class a__ ( a_ ):
__lowerCAmelCase = """mgp-str"""
def __init__( self , _a=[32, 128] , _a=4 , _a=3 , _a=27 , _a=38 , _a=50_257 , _a=30_522 , _a=768 , _a=12 , _a=12 , _a=4.0 , _a=True , _a=False , _a=1E-5 , _a=0.0 , _a=0.0 , _a=0.0 , _a=False , _a=0.0_2 , **_a , ):
super().__init__(**_a )
lowercase : Tuple = image_size
lowercase : Optional[Any] = patch_size
lowercase : List[str] = num_channels
lowercase : Optional[int] = max_token_length
lowercase : List[Any] = num_character_labels
lowercase : int = num_bpe_labels
lowercase : Any = num_wordpiece_labels
lowercase : Optional[Any] = hidden_size
lowercase : Optional[int] = num_hidden_layers
lowercase : int = num_attention_heads
lowercase : List[Any] = mlp_ratio
lowercase : Optional[int] = distilled
lowercase : List[Any] = layer_norm_eps
lowercase : Any = drop_rate
lowercase : List[Any] = qkv_bias
lowercase : int = attn_drop_rate
lowercase : Union[str, Any] = drop_path_rate
lowercase : Optional[Any] = output_aa_attentions
lowercase : Dict = initializer_range
| 518
| 0
|
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowercase_ : Optional[Any] = [0 for i in range(r + 1 )]
# nc0 = 1
lowercase_ : str = 1
for i in range(1 , n + 1 ):
# to compute current row from previous row.
lowercase_ : Union[str, Any] = min(_UpperCamelCase , _UpperCamelCase )
while j > 0:
c[j] += c[j - 1]
j -= 1
return c[r]
print(binomial_coefficient(n=10, r=5))
| 620
|
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {
'ut/deta': 'https://huggingface.co/ut/deta/resolve/main/config.json',
}
class _UpperCAmelCase ( snake_case ):
__lowerCamelCase: Union[str, Any] = 'deta'
__lowerCamelCase: str = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__( self : List[str] , a : Union[str, Any]=None , a : Dict=9_0_0 , a : str=2_0_4_8 , a : Union[str, Any]=6 , a : int=2_0_4_8 , a : Optional[int]=8 , a : Optional[Any]=6 , a : Optional[Any]=1_0_2_4 , a : Any=8 , a : Dict=0.0 , a : Optional[int]=True , a : Union[str, Any]="relu" , a : str=2_5_6 , a : str=0.1 , a : Any=0.0 , a : Optional[Any]=0.0 , a : str=0.02 , a : int=1.0 , a : Tuple=True , a : Optional[int]=False , a : str="sine" , a : List[Any]=5 , a : str=4 , a : Any=4 , a : Optional[Any]=True , a : Union[str, Any]=3_0_0 , a : List[Any]=True , a : Tuple=True , a : Any=1 , a : int=5 , a : Optional[int]=2 , a : Tuple=1 , a : List[str]=1 , a : Tuple=5 , a : Tuple=2 , a : List[str]=0.1 , a : Tuple=0.25 , **a : Optional[int] , ):
'''simple docstring'''
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
lowercase_ : Optional[int] = CONFIG_MAPPING["resnet"](out_features=["stage2", "stage3", "stage4"] )
else:
if isinstance(a , a ):
lowercase_ : Any = backbone_config.pop("model_type" )
lowercase_ : str = CONFIG_MAPPING[backbone_model_type]
lowercase_ : Any = config_class.from_dict(a )
lowercase_ : Optional[int] = backbone_config
lowercase_ : Dict = num_queries
lowercase_ : Tuple = max_position_embeddings
lowercase_ : Union[str, Any] = d_model
lowercase_ : Optional[Any] = encoder_ffn_dim
lowercase_ : Any = encoder_layers
lowercase_ : int = encoder_attention_heads
lowercase_ : Tuple = decoder_ffn_dim
lowercase_ : Dict = decoder_layers
lowercase_ : Dict = decoder_attention_heads
lowercase_ : List[Any] = dropout
lowercase_ : Union[str, Any] = attention_dropout
lowercase_ : List[str] = activation_dropout
lowercase_ : Optional[Any] = activation_function
lowercase_ : str = init_std
lowercase_ : Dict = init_xavier_std
lowercase_ : List[Any] = encoder_layerdrop
lowercase_ : Optional[int] = auxiliary_loss
lowercase_ : str = position_embedding_type
# deformable attributes
lowercase_ : Optional[int] = num_feature_levels
lowercase_ : List[Any] = encoder_n_points
lowercase_ : Optional[Any] = decoder_n_points
lowercase_ : List[Any] = two_stage
lowercase_ : Any = two_stage_num_proposals
lowercase_ : List[str] = with_box_refine
lowercase_ : Optional[int] = assign_first_stage
if two_stage is True and with_box_refine is False:
raise ValueError("If two_stage is True, with_box_refine must be True." )
# Hungarian matcher
lowercase_ : int = class_cost
lowercase_ : Optional[int] = bbox_cost
lowercase_ : str = giou_cost
# Loss coefficients
lowercase_ : Optional[int] = mask_loss_coefficient
lowercase_ : Union[str, Any] = dice_loss_coefficient
lowercase_ : Optional[Any] = bbox_loss_coefficient
lowercase_ : Optional[Any] = giou_loss_coefficient
lowercase_ : Any = eos_coefficient
lowercase_ : str = focal_alpha
super().__init__(is_encoder_decoder=a , **a )
@property
def lowerCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
return self.encoder_attention_heads
@property
def lowerCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
return self.d_model
def lowerCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase_ : List[Any] = copy.deepcopy(self.__dict__ )
lowercase_ : Union[str, Any] = self.backbone_config.to_dict()
lowercase_ : str = self.__class__.model_type
return output
| 620
| 1
|
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Dict ):
"""simple docstring"""
for param in module.parameters():
SCREAMING_SNAKE_CASE_ = False
def _UpperCAmelCase ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = """cuda""" if torch.cuda.is_available() else """cpu"""
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
SCREAMING_SNAKE_CASE_ = """mps"""
if device == "mps":
print(
'WARNING: MPS currently doesn\'t seem to work, and messes up backpropagation without any visible torch'
' errors. I recommend using CUDA on a colab notebook or CPU instead if you\'re facing inexplicable issues'
' with generations.' )
return device
def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = plt.imshow(SCREAMING_SNAKE_CASE__ )
fig.axes.get_xaxis().set_visible(SCREAMING_SNAKE_CASE__ )
fig.axes.get_yaxis().set_visible(SCREAMING_SNAKE_CASE__ )
plt.show()
def _UpperCAmelCase ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = datetime.now()
SCREAMING_SNAKE_CASE_ = current_time.strftime('%H:%M:%S' )
return timestamp
| 704
|
import os
import zipfile
import requests
from get_ci_error_statistics import download_artifact, get_artifacts_links
def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : int=7 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = None
if token is not None:
SCREAMING_SNAKE_CASE_ = {'Accept': 'application/vnd.github+json', 'Authorization': f"""Bearer {token}"""}
# The id of a workflow (not of a workflow run)
SCREAMING_SNAKE_CASE_ = '636036'
SCREAMING_SNAKE_CASE_ = f"""https://api.github.com/repos/huggingface/transformers/actions/workflows/{workflow_id}/runs"""
# On `main` branch + event being `schedule` + not returning PRs + only `num_runs` results
url += f"""?branch=main&event=schedule&exclude_pull_requests=true&per_page={num_runs}"""
SCREAMING_SNAKE_CASE_ = requests.get(_SCREAMING_SNAKE_CASE , headers=_SCREAMING_SNAKE_CASE ).json()
return result["workflow_runs"]
def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = get_daily_ci_runs(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ = None
for workflow_run in workflow_runs:
if workflow_run["status"] == "completed":
SCREAMING_SNAKE_CASE_ = workflow_run['id']
break
return workflow_run_id
def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = get_last_daily_ci_runs(_SCREAMING_SNAKE_CASE )
if workflow_run_id is not None:
SCREAMING_SNAKE_CASE_ = get_artifacts_links(worflow_run_id=_SCREAMING_SNAKE_CASE , token=_SCREAMING_SNAKE_CASE )
for artifact_name in artifact_names:
if artifact_name in artifacts_links:
SCREAMING_SNAKE_CASE_ = artifacts_links[artifact_name]
download_artifact(
artifact_name=_SCREAMING_SNAKE_CASE , artifact_url=_SCREAMING_SNAKE_CASE , output_dir=_SCREAMING_SNAKE_CASE , token=_SCREAMING_SNAKE_CASE )
def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : List[Any] ):
"""simple docstring"""
get_last_daily_ci_artifacts(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ = {}
for artifact_name in artifact_names:
SCREAMING_SNAKE_CASE_ = os.path.join(_SCREAMING_SNAKE_CASE , f"""{artifact_name}.zip""" )
if os.path.isfile(_SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE_ = {}
with zipfile.ZipFile(_SCREAMING_SNAKE_CASE ) as z:
for filename in z.namelist():
if not os.path.isdir(_SCREAMING_SNAKE_CASE ):
# read the file
with z.open(_SCREAMING_SNAKE_CASE ) as f:
SCREAMING_SNAKE_CASE_ = f.read().decode('UTF-8' )
return results
| 620
| 0
|
"""simple docstring"""
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
__A = 200
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
__A = 50
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
__A = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 1000))
def a__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Optional[Any]:
__lowerCAmelCase: int = len([g for position, g in enumerate(SCREAMING_SNAKE_CASE_ ) if g == main_target[position]] )
return (item, float(SCREAMING_SNAKE_CASE_ ))
def a__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Optional[int]:
__lowerCAmelCase: Optional[Any] = random.randint(0 , len(SCREAMING_SNAKE_CASE_ ) - 1 )
__lowerCAmelCase: Union[str, Any] = parent_a[:random_slice] + parent_a[random_slice:]
__lowerCAmelCase: str = parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def a__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Optional[int]:
__lowerCAmelCase: Optional[Any] = list(SCREAMING_SNAKE_CASE_ )
if random.uniform(0 , 1 ) < MUTATION_PROBABILITY:
__lowerCAmelCase: List[Any] = random.choice(SCREAMING_SNAKE_CASE_ )
return "".join(SCREAMING_SNAKE_CASE_ )
def a__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , ) -> Dict:
__lowerCAmelCase: Dict = []
# Generate more children proportionally to the fitness score.
__lowerCAmelCase: List[Any] = int(parent_a[1] * 1_0_0 ) + 1
__lowerCAmelCase: List[str] = 1_0 if child_n >= 1_0 else child_n
for _ in range(SCREAMING_SNAKE_CASE_ ):
__lowerCAmelCase: Any = population_score[random.randint(0 , SCREAMING_SNAKE_CASE_ )][0]
__lowerCAmelCase , __lowerCAmelCase: List[str] = crossover(parent_a[0] , SCREAMING_SNAKE_CASE_ )
# Append new string to the population list.
pop.append(mutate(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
pop.append(mutate(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
return pop
def a__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = True ) -> int:
if N_POPULATION < N_SELECTED:
__lowerCAmelCase: Any = F"{N_POPULATION} must be bigger than {N_SELECTED}"
raise ValueError(SCREAMING_SNAKE_CASE_ )
# Verify that the target contains no genes besides the ones inside genes variable.
__lowerCAmelCase: Optional[int] = sorted({c for c in target if c not in genes} )
if not_in_genes_list:
__lowerCAmelCase: Optional[Any] = F"{not_in_genes_list} is not in genes list, evolution cannot converge"
raise ValueError(SCREAMING_SNAKE_CASE_ )
# Generate random starting population.
__lowerCAmelCase: Optional[Any] = []
for _ in range(SCREAMING_SNAKE_CASE_ ):
population.append("".join([random.choice(SCREAMING_SNAKE_CASE_ ) for i in range(len(SCREAMING_SNAKE_CASE_ ) )] ) )
# Just some logs to know what the algorithms is doing.
__lowerCAmelCase , __lowerCAmelCase: Any = 0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(SCREAMING_SNAKE_CASE_ )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
__lowerCAmelCase: str = [evaluate(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for item in population]
# Check if there is a matching evolution.
__lowerCAmelCase: Any = sorted(SCREAMING_SNAKE_CASE_ , key=lambda __SCREAMING_SNAKE_CASE : x[1] , reverse=SCREAMING_SNAKE_CASE_ )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 1_0 == 0:
print(
F"\nGeneration: {generation}"
F"\nTotal Population:{total_population}"
F"\nBest score: {population_score[0][1]}"
F"\nBest string: {population_score[0][0]}" )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
__lowerCAmelCase: str = population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(SCREAMING_SNAKE_CASE_ )
# Normalize population score to be between 0 and 1.
__lowerCAmelCase: Dict = [
(item, score / len(SCREAMING_SNAKE_CASE_ )) for item, score in population_score
]
# This is selection
for i in range(SCREAMING_SNAKE_CASE_ ):
population.extend(select(population_score[int(SCREAMING_SNAKE_CASE_ )] , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(SCREAMING_SNAKE_CASE_ ) > N_POPULATION:
break
if __name__ == "__main__":
__A = (
"This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!"
)
__A = list(
" ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm"
"nopqrstuvwxyz.,;!?+-*#@^'èéòà€ù=)(&%$£/\\"
)
__A , __A , __A = basic(target_str, genes_list)
print(
F'''\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}'''
)
| 346
|
'''simple docstring'''
import gc
import unittest
import torch
from parameterized import parameterized
from diffusers import AutoencoderKL
from diffusers.utils import floats_tensor, load_hf_numpy, require_torch_gpu, slow, torch_all_close, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class lowerCAmelCase_ ( __magic_name__ ,__magic_name__ ,unittest.TestCase ):
__lowerCamelCase : Optional[Any] = AutoencoderKL
__lowerCamelCase : List[Any] = "sample"
__lowerCamelCase : Tuple = 1e-2
@property
def _snake_case ( self ) -> Union[str, Any]:
_lowerCAmelCase = 4
_lowerCAmelCase = 3
_lowerCAmelCase = (32, 32)
_lowerCAmelCase = floats_tensor((batch_size, num_channels) + sizes ).to(_lowerCAmelCase )
return {"sample": image}
@property
def _snake_case ( self ) -> Any:
return (3, 32, 32)
@property
def _snake_case ( self ) -> List[Any]:
return (3, 32, 32)
def _snake_case ( self ) -> str:
_lowerCAmelCase = {
"block_out_channels": [32, 64],
"in_channels": 3,
"out_channels": 3,
"down_block_types": ["DownEncoderBlock2D", "DownEncoderBlock2D"],
"up_block_types": ["UpDecoderBlock2D", "UpDecoderBlock2D"],
"latent_channels": 4,
}
_lowerCAmelCase = self.dummy_input
return init_dict, inputs_dict
def _snake_case ( self ) -> Optional[int]:
pass
def _snake_case ( self ) -> Any:
pass
@unittest.skipIf(torch_device == "mps" , "Gradient checkpointing skipped on MPS" )
def _snake_case ( self ) -> str:
# enable deterministic behavior for gradient checkpointing
_lowerCAmelCase , _lowerCAmelCase = self.prepare_init_args_and_inputs_for_common()
_lowerCAmelCase = self.model_class(**_lowerCAmelCase )
model.to(_lowerCAmelCase )
assert not model.is_gradient_checkpointing and model.training
_lowerCAmelCase = model(**_lowerCAmelCase ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model.zero_grad()
_lowerCAmelCase = torch.randn_like(_lowerCAmelCase )
_lowerCAmelCase = (out - labels).mean()
loss.backward()
# re-instantiate the model now enabling gradient checkpointing
_lowerCAmelCase = self.model_class(**_lowerCAmelCase )
# clone model
model_a.load_state_dict(model.state_dict() )
model_a.to(_lowerCAmelCase )
model_a.enable_gradient_checkpointing()
assert model_a.is_gradient_checkpointing and model_a.training
_lowerCAmelCase = model_a(**_lowerCAmelCase ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model_a.zero_grad()
_lowerCAmelCase = (out_a - labels).mean()
loss_a.backward()
# compare the output and parameters gradients
self.assertTrue((loss - loss_a).abs() < 1E-5 )
_lowerCAmelCase = dict(model.named_parameters() )
_lowerCAmelCase = dict(model_a.named_parameters() )
for name, param in named_params.items():
self.assertTrue(torch_all_close(param.grad.data , named_params_a[name].grad.data , atol=5E-5 ) )
def _snake_case ( self ) -> Optional[Any]:
_lowerCAmelCase , _lowerCAmelCase = AutoencoderKL.from_pretrained("fusing/autoencoder-kl-dummy" , output_loading_info=_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
self.assertEqual(len(loading_info["missing_keys"] ) , 0 )
model.to(_lowerCAmelCase )
_lowerCAmelCase = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def _snake_case ( self ) -> Dict:
_lowerCAmelCase = AutoencoderKL.from_pretrained("fusing/autoencoder-kl-dummy" )
_lowerCAmelCase = model.to(_lowerCAmelCase )
model.eval()
if torch_device == "mps":
_lowerCAmelCase = torch.manual_seed(0 )
else:
_lowerCAmelCase = torch.Generator(device=_lowerCAmelCase ).manual_seed(0 )
_lowerCAmelCase = torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , )
_lowerCAmelCase = image.to(_lowerCAmelCase )
with torch.no_grad():
_lowerCAmelCase = model(_lowerCAmelCase , sample_posterior=_lowerCAmelCase , generator=_lowerCAmelCase ).sample
_lowerCAmelCase = output[0, -1, -3:, -3:].flatten().cpu()
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
if torch_device == "mps":
_lowerCAmelCase = torch.tensor(
[
-4.0078E-01,
-3.8323E-04,
-1.2681E-01,
-1.1462E-01,
2.0095E-01,
1.0893E-01,
-8.8247E-02,
-3.0361E-01,
-9.8644E-03,
] )
elif torch_device == "cpu":
_lowerCAmelCase = torch.tensor(
[-0.1352, 0.0878, 0.0419, -0.0818, -0.1069, 0.0688, -0.1458, -0.4446, -0.0026] )
else:
_lowerCAmelCase = torch.tensor(
[-0.2421, 0.4642, 0.2507, -0.0438, 0.0682, 0.3160, -0.2018, -0.0727, 0.2485] )
self.assertTrue(torch_all_close(_lowerCAmelCase , _lowerCAmelCase , rtol=1E-2 ) )
@slow
class lowerCAmelCase_ ( unittest.TestCase ):
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase ) -> Union[str, Any]:
return f'''gaussian_noise_s={seed}_shape={'_'.join([str(_lowerCAmelCase ) for s in shape] )}.npy'''
def _snake_case ( self ) -> List[str]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _snake_case ( self , _lowerCAmelCase=0 , _lowerCAmelCase=(4, 3, 512, 512) , _lowerCAmelCase=False ) -> Tuple:
_lowerCAmelCase = torch.floataa if fpaa else torch.floataa
_lowerCAmelCase = torch.from_numpy(load_hf_numpy(self.get_file_format(_lowerCAmelCase , _lowerCAmelCase ) ) ).to(_lowerCAmelCase ).to(_lowerCAmelCase )
return image
def _snake_case ( self , _lowerCAmelCase="CompVis/stable-diffusion-v1-4" , _lowerCAmelCase=False ) -> Tuple:
_lowerCAmelCase = "fp16" if fpaa else None
_lowerCAmelCase = torch.floataa if fpaa else torch.floataa
_lowerCAmelCase = AutoencoderKL.from_pretrained(
_lowerCAmelCase , subfolder="vae" , torch_dtype=_lowerCAmelCase , revision=_lowerCAmelCase , )
model.to(_lowerCAmelCase ).eval()
return model
def _snake_case ( self , _lowerCAmelCase=0 ) -> str:
if torch_device == "mps":
return torch.manual_seed(_lowerCAmelCase )
return torch.Generator(device=_lowerCAmelCase ).manual_seed(_lowerCAmelCase )
@parameterized.expand(
[
# fmt: off
[33, [-0.1603, 0.9878, -0.0495, -0.0790, -0.2709, 0.8375, -0.2060, -0.0824], [-0.2395, 0.0098, 0.0102, -0.0709, -0.2840, -0.0274, -0.0718, -0.1824]],
[47, [-0.2376, 0.1168, 0.1332, -0.4840, -0.2508, -0.0791, -0.0493, -0.4089], [0.0350, 0.0847, 0.0467, 0.0344, -0.0842, -0.0547, -0.0633, -0.1131]],
# fmt: on
] )
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Dict:
_lowerCAmelCase = self.get_sd_vae_model()
_lowerCAmelCase = self.get_sd_image(_lowerCAmelCase )
_lowerCAmelCase = self.get_generator(_lowerCAmelCase )
with torch.no_grad():
_lowerCAmelCase = model(_lowerCAmelCase , generator=_lowerCAmelCase , sample_posterior=_lowerCAmelCase ).sample
assert sample.shape == image.shape
_lowerCAmelCase = sample[-1, -2:, -2:, :2].flatten().float().cpu()
_lowerCAmelCase = torch.tensor(expected_slice_mps if torch_device == "mps" else expected_slice )
assert torch_all_close(_lowerCAmelCase , _lowerCAmelCase , atol=3E-3 )
@parameterized.expand(
[
# fmt: off
[33, [-0.0513, 0.0289, 1.3799, 0.2166, -0.2573, -0.0871, 0.5103, -0.0999]],
[47, [-0.4128, -0.1320, -0.3704, 0.1965, -0.4116, -0.2332, -0.3340, 0.2247]],
# fmt: on
] )
@require_torch_gpu
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase ) -> Optional[Any]:
_lowerCAmelCase = self.get_sd_vae_model(fpaa=_lowerCAmelCase )
_lowerCAmelCase = self.get_sd_image(_lowerCAmelCase , fpaa=_lowerCAmelCase )
_lowerCAmelCase = self.get_generator(_lowerCAmelCase )
with torch.no_grad():
_lowerCAmelCase = model(_lowerCAmelCase , generator=_lowerCAmelCase , sample_posterior=_lowerCAmelCase ).sample
assert sample.shape == image.shape
_lowerCAmelCase = sample[-1, -2:, :2, -2:].flatten().float().cpu()
_lowerCAmelCase = torch.tensor(_lowerCAmelCase )
assert torch_all_close(_lowerCAmelCase , _lowerCAmelCase , atol=1E-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.1609, 0.9866, -0.0487, -0.0777, -0.2716, 0.8368, -0.2055, -0.0814], [-0.2395, 0.0098, 0.0102, -0.0709, -0.2840, -0.0274, -0.0718, -0.1824]],
[47, [-0.2377, 0.1147, 0.1333, -0.4841, -0.2506, -0.0805, -0.0491, -0.4085], [0.0350, 0.0847, 0.0467, 0.0344, -0.0842, -0.0547, -0.0633, -0.1131]],
# fmt: on
] )
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Tuple:
_lowerCAmelCase = self.get_sd_vae_model()
_lowerCAmelCase = self.get_sd_image(_lowerCAmelCase )
with torch.no_grad():
_lowerCAmelCase = model(_lowerCAmelCase ).sample
assert sample.shape == image.shape
_lowerCAmelCase = sample[-1, -2:, -2:, :2].flatten().float().cpu()
_lowerCAmelCase = torch.tensor(expected_slice_mps if torch_device == "mps" else expected_slice )
assert torch_all_close(_lowerCAmelCase , _lowerCAmelCase , atol=3E-3 )
@parameterized.expand(
[
# fmt: off
[13, [-0.2051, -0.1803, -0.2311, -0.2114, -0.3292, -0.3574, -0.2953, -0.3323]],
[37, [-0.2632, -0.2625, -0.2199, -0.2741, -0.4539, -0.4990, -0.3720, -0.4925]],
# fmt: on
] )
@require_torch_gpu
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase ) -> Optional[Any]:
_lowerCAmelCase = self.get_sd_vae_model()
_lowerCAmelCase = self.get_sd_image(_lowerCAmelCase , shape=(3, 4, 64, 64) )
with torch.no_grad():
_lowerCAmelCase = model.decode(_lowerCAmelCase ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
_lowerCAmelCase = sample[-1, -2:, :2, -2:].flatten().cpu()
_lowerCAmelCase = torch.tensor(_lowerCAmelCase )
assert torch_all_close(_lowerCAmelCase , _lowerCAmelCase , atol=1E-3 )
@parameterized.expand(
[
# fmt: off
[27, [-0.0369, 0.0207, -0.0776, -0.0682, -0.1747, -0.1930, -0.1465, -0.2039]],
[16, [-0.1628, -0.2134, -0.2747, -0.2642, -0.3774, -0.4404, -0.3687, -0.4277]],
# fmt: on
] )
@require_torch_gpu
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase ) -> str:
_lowerCAmelCase = self.get_sd_vae_model(fpaa=_lowerCAmelCase )
_lowerCAmelCase = self.get_sd_image(_lowerCAmelCase , shape=(3, 4, 64, 64) , fpaa=_lowerCAmelCase )
with torch.no_grad():
_lowerCAmelCase = model.decode(_lowerCAmelCase ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
_lowerCAmelCase = sample[-1, -2:, :2, -2:].flatten().float().cpu()
_lowerCAmelCase = torch.tensor(_lowerCAmelCase )
assert torch_all_close(_lowerCAmelCase , _lowerCAmelCase , atol=5E-3 )
@parameterized.expand([(13,), (16,), (27,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason="xformers is not required when using PyTorch 2.0." )
def _snake_case ( self , _lowerCAmelCase ) -> List[str]:
_lowerCAmelCase = self.get_sd_vae_model(fpaa=_lowerCAmelCase )
_lowerCAmelCase = self.get_sd_image(_lowerCAmelCase , shape=(3, 4, 64, 64) , fpaa=_lowerCAmelCase )
with torch.no_grad():
_lowerCAmelCase = model.decode(_lowerCAmelCase ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
_lowerCAmelCase = model.decode(_lowerCAmelCase ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
assert torch_all_close(_lowerCAmelCase , _lowerCAmelCase , atol=1E-1 )
@parameterized.expand([(13,), (16,), (37,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason="xformers is not required when using PyTorch 2.0." )
def _snake_case ( self , _lowerCAmelCase ) -> Any:
_lowerCAmelCase = self.get_sd_vae_model()
_lowerCAmelCase = self.get_sd_image(_lowerCAmelCase , shape=(3, 4, 64, 64) )
with torch.no_grad():
_lowerCAmelCase = model.decode(_lowerCAmelCase ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
_lowerCAmelCase = model.decode(_lowerCAmelCase ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
assert torch_all_close(_lowerCAmelCase , _lowerCAmelCase , atol=1E-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.3001, 0.0918, -2.6984, -3.9720, -3.2099, -5.0353, 1.7338, -0.2065, 3.4267]],
[47, [-1.5030, -4.3871, -6.0355, -9.1157, -1.6661, -2.7853, 2.1607, -5.0823, 2.5633]],
# fmt: on
] )
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase ) -> int:
_lowerCAmelCase = self.get_sd_vae_model()
_lowerCAmelCase = self.get_sd_image(_lowerCAmelCase )
_lowerCAmelCase = self.get_generator(_lowerCAmelCase )
with torch.no_grad():
_lowerCAmelCase = model.encode(_lowerCAmelCase ).latent_dist
_lowerCAmelCase = dist.sample(generator=_lowerCAmelCase )
assert list(sample.shape ) == [image.shape[0], 4] + [i // 8 for i in image.shape[2:]]
_lowerCAmelCase = sample[0, -1, -3:, -3:].flatten().cpu()
_lowerCAmelCase = torch.tensor(_lowerCAmelCase )
_lowerCAmelCase = 3E-3 if torch_device != "mps" else 1E-2
assert torch_all_close(_lowerCAmelCase , _lowerCAmelCase , atol=_lowerCAmelCase )
| 18
| 0
|
"""simple docstring"""
from typing import List, Optional, Union
import numpy as np
import torch
import torchaudio.compliance.kaldi as ta_kaldi
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
__A : Dict = logging.get_logger(__name__)
class _UpperCAmelCase ( __A ):
SCREAMING_SNAKE_CASE_ : Dict = ["input_features", "attention_mask"]
def __init__( self : Dict , A : List[str]=80 , A : Optional[int]=1_60_00 , A : Tuple=80 , A : Dict=0.0 , A : str=True , A : Union[str, Any]=True , A : str=True , **A : Union[str, Any] , ) -> Optional[Any]:
super().__init__(feature_size=A , sampling_rate=A , padding_value=A , **A )
lowercase_ : Dict = num_mel_bins
lowercase_ : str = do_ceptral_normalize
lowercase_ : Union[str, Any] = normalize_means
lowercase_ : Optional[Any] = normalize_vars
lowercase_ : List[str] = True
def A ( self : List[str] , A : np.ndarray , ) -> np.ndarray:
lowercase_ : Tuple = waveform * (2**15) # Kaldi compliance: 16-bit signed integers
lowercase_ : List[str] = torch.from_numpy(A ).unsqueeze(0 )
lowercase_ : Dict = ta_kaldi.fbank(A , num_mel_bins=self.num_mel_bins , sample_frequency=self.sampling_rate )
return features.numpy()
@staticmethod
def A ( A : np.ndarray , A : int , A : Optional[bool] = True , A : Optional[bool] = True , A : float = 0.0 , ) -> np.ndarray:
if normalize_means:
lowercase_ : str = x[:input_length].mean(axis=0 )
lowercase_ : Union[str, Any] = np.subtract(A , A )
if normalize_vars:
lowercase_ : List[str] = x[:input_length].std(axis=0 )
lowercase_ : Optional[int] = np.divide(A , A )
if input_length < x.shape[0]:
lowercase_ : Any = padding_value
# make sure array is in float32
lowercase_ : Any = x.astype(np.floataa )
return x
def A ( self : Optional[Any] , A : List[np.ndarray] , A : Optional[np.ndarray] = None ) -> List[np.ndarray]:
lowercase_ : Union[str, Any] = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [
self.utterance_cmvn(A , A , self.normalize_means , self.normalize_vars , self.padding_value )
for x, n in zip(A , A )
]
def __call__( self : Optional[Any] , A : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , A : Union[bool, str, PaddingStrategy] = False , A : Optional[int] = None , A : bool = False , A : Optional[int] = None , A : Optional[Union[str, TensorType]] = None , A : Optional[int] = None , A : Optional[bool] = None , **A : List[Any] , ) -> BatchFeature:
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of'''
F''' {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with'''
F''' {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
lowercase_ : Optional[Any] = isinstance(A , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' )
lowercase_ : Optional[int] = is_batched_numpy or (
isinstance(A , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
lowercase_ : List[str] = [np.asarray(A , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(A , np.ndarray ):
lowercase_ : int = np.asarray(A , dtype=np.floataa )
elif isinstance(A , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
lowercase_ : Tuple = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
lowercase_ : Any = [raw_speech]
# extract fbank features
lowercase_ : Any = [self._extract_fbank_features(A ) for waveform in raw_speech]
# convert into correct format for padding
lowercase_ : Optional[Any] = BatchFeature({'''input_features''': features} )
lowercase_ : List[str] = self.pad(
A , padding=A , max_length=A , truncation=A , pad_to_multiple_of=A , return_attention_mask=A , **A , )
# make sure list is in array format
lowercase_ : Any = padded_inputs.get('''input_features''' )
if isinstance(input_features[0] , A ):
lowercase_ : Optional[Any] = [np.asarray(A , dtype=np.floataa ) for feature in input_features]
lowercase_ : Union[str, Any] = padded_inputs.get('''attention_mask''' )
if attention_mask is not None:
lowercase_ : int = [np.asarray(A , dtype=np.intaa ) for array in attention_mask]
# Utterance-level cepstral mean and variance normalization
if self.do_ceptral_normalize:
lowercase_ : List[str] = (
np.array(A , dtype=np.intaa )
if self._get_padding_strategies(A , max_length=A ) is not PaddingStrategy.DO_NOT_PAD
else None
)
lowercase_ : int = self.normalize(
padded_inputs['''input_features'''] , attention_mask=A )
if return_tensors is not None:
lowercase_ : Dict = padded_inputs.convert_to_tensors(A )
return padded_inputs
| 708
|
"""simple docstring"""
import unittest
from transformers.testing_utils import CaptureStdout
from transformers.tools.python_interpreter import evaluate
def lowercase ( __snake_case : Union[str, Any] ):
return x + 2
class _UpperCAmelCase ( unittest.TestCase ):
def A ( self : List[Any] ) -> Dict:
lowercase_ : Optional[Any] = '''x = 3'''
lowercase_ : Optional[Any] = {}
lowercase_ : List[str] = evaluate(A , {} , state=A )
assert result == 3
self.assertDictEqual(A , {'''x''': 3} )
lowercase_ : Optional[Any] = '''x = y'''
lowercase_ : Union[str, Any] = {'''y''': 5}
lowercase_ : Any = evaluate(A , {} , state=A )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(A , {'''x''': 5, '''y''': 5} )
def A ( self : Dict ) -> Dict:
lowercase_ : Any = '''y = add_two(x)'''
lowercase_ : Tuple = {'''x''': 3}
lowercase_ : Any = evaluate(A , {'''add_two''': add_two} , state=A )
assert result == 5
self.assertDictEqual(A , {'''x''': 3, '''y''': 5} )
# Won't work without the tool
with CaptureStdout() as out:
lowercase_ : List[str] = evaluate(A , {} , state=A )
assert result is None
assert "tried to execute add_two" in out.out
def A ( self : Any ) -> Optional[Any]:
lowercase_ : Tuple = '''x = 3'''
lowercase_ : Tuple = {}
lowercase_ : Union[str, Any] = evaluate(A , {} , state=A )
assert result == 3
self.assertDictEqual(A , {'''x''': 3} )
def A ( self : str ) -> Any:
lowercase_ : Dict = '''test_dict = {\'x\': x, \'y\': add_two(x)}'''
lowercase_ : List[str] = {'''x''': 3}
lowercase_ : Optional[Any] = evaluate(A , {'''add_two''': add_two} , state=A )
self.assertDictEqual(A , {'''x''': 3, '''y''': 5} )
self.assertDictEqual(A , {'''x''': 3, '''test_dict''': {'''x''': 3, '''y''': 5}} )
def A ( self : Dict ) -> List[Any]:
lowercase_ : Union[str, Any] = '''x = 3\ny = 5'''
lowercase_ : Tuple = {}
lowercase_ : Optional[Any] = evaluate(A , {} , state=A )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(A , {'''x''': 3, '''y''': 5} )
def A ( self : Optional[Any] ) -> Union[str, Any]:
lowercase_ : Dict = '''text = f\'This is x: {x}.\''''
lowercase_ : Tuple = {'''x''': 3}
lowercase_ : Optional[Any] = evaluate(A , {} , state=A )
# evaluate returns the value of the last assignment.
assert result == "This is x: 3."
self.assertDictEqual(A , {'''x''': 3, '''text''': '''This is x: 3.'''} )
def A ( self : str ) -> Union[str, Any]:
lowercase_ : List[Any] = '''if x <= 3:\n y = 2\nelse:\n y = 5'''
lowercase_ : Optional[int] = {'''x''': 3}
lowercase_ : str = evaluate(A , {} , state=A )
# evaluate returns the value of the last assignment.
assert result == 2
self.assertDictEqual(A , {'''x''': 3, '''y''': 2} )
lowercase_ : Dict = {'''x''': 8}
lowercase_ : List[str] = evaluate(A , {} , state=A )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(A , {'''x''': 8, '''y''': 5} )
def A ( self : Dict ) -> List[str]:
lowercase_ : Tuple = '''test_list = [x, add_two(x)]'''
lowercase_ : Optional[int] = {'''x''': 3}
lowercase_ : Any = evaluate(A , {'''add_two''': add_two} , state=A )
self.assertListEqual(A , [3, 5] )
self.assertDictEqual(A , {'''x''': 3, '''test_list''': [3, 5]} )
def A ( self : int ) -> Tuple:
lowercase_ : Optional[int] = '''y = x'''
lowercase_ : Any = {'''x''': 3}
lowercase_ : List[str] = evaluate(A , {} , state=A )
assert result == 3
self.assertDictEqual(A , {'''x''': 3, '''y''': 3} )
def A ( self : List[Any] ) -> List[str]:
lowercase_ : Optional[Any] = '''test_list = [x, add_two(x)]\ntest_list[1]'''
lowercase_ : int = {'''x''': 3}
lowercase_ : str = evaluate(A , {'''add_two''': add_two} , state=A )
assert result == 5
self.assertDictEqual(A , {'''x''': 3, '''test_list''': [3, 5]} )
lowercase_ : str = '''test_dict = {\'x\': x, \'y\': add_two(x)}\ntest_dict[\'y\']'''
lowercase_ : Dict = {'''x''': 3}
lowercase_ : str = evaluate(A , {'''add_two''': add_two} , state=A )
assert result == 5
self.assertDictEqual(A , {'''x''': 3, '''test_dict''': {'''x''': 3, '''y''': 5}} )
def A ( self : List[Any] ) -> Any:
lowercase_ : Dict = '''x = 0\nfor i in range(3):\n x = i'''
lowercase_ : Dict = {}
lowercase_ : Any = evaluate(A , {'''range''': range} , state=A )
assert result == 2
self.assertDictEqual(A , {'''x''': 2, '''i''': 2} )
| 141
| 0
|
import time
from dataclasses import dataclass
from multiprocessing import Pool
from unittest import TestCase
from unittest.mock import patch
import multiprocess
import numpy as np
import pytest
from datasets.utils.py_utils import (
NestedDataStructure,
asdict,
iflatmap_unordered,
map_nested,
temp_seed,
temporary_assignment,
zip_dict,
)
from .utils import require_tf, require_torch
def lowerCamelCase_ ( UpperCamelCase__ : int ) -> Optional[int]: # picklable for multiprocessing
"""simple docstring"""
return x.sum()
def lowerCamelCase_ ( UpperCamelCase__ : Optional[Any] ) -> List[Any]: # picklable for multiprocessing
"""simple docstring"""
return i + 1
@dataclass
class __lowerCAmelCase :
"""simple docstring"""
snake_case_ = 42
snake_case_ = 42
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
def lowercase_ ( self ) -> Tuple:
'''simple docstring'''
__lowerCamelCase = {}
__lowerCamelCase = []
__lowerCamelCase = 1
__lowerCamelCase = [1, 2]
__lowerCamelCase = {'a': 1, 'b': 2}
__lowerCamelCase = {'a': [1, 2], 'b': [3, 4]}
__lowerCamelCase = {'a': {'1': 1}, 'b': 2}
__lowerCamelCase = {'a': 1, 'b': 2, 'c': 3, 'd': 4}
__lowerCamelCase = {}
__lowerCamelCase = []
__lowerCamelCase = 2
__lowerCamelCase = [2, 3]
__lowerCamelCase = {'a': 2, 'b': 3}
__lowerCamelCase = {'a': [2, 3], 'b': [4, 5]}
__lowerCamelCase = {'a': {'1': 2}, 'b': 3}
__lowerCamelCase = {'a': 2, 'b': 3, 'c': 4, 'd': 5}
self.assertEqual(map_nested(lowerCamelCase__ , lowerCamelCase__ ) , lowerCamelCase__ )
self.assertEqual(map_nested(lowerCamelCase__ , lowerCamelCase__ ) , lowerCamelCase__ )
self.assertEqual(map_nested(lowerCamelCase__ , lowerCamelCase__ ) , lowerCamelCase__ )
self.assertEqual(map_nested(lowerCamelCase__ , lowerCamelCase__ ) , lowerCamelCase__ )
self.assertEqual(map_nested(lowerCamelCase__ , lowerCamelCase__ ) , lowerCamelCase__ )
self.assertEqual(map_nested(lowerCamelCase__ , lowerCamelCase__ ) , lowerCamelCase__ )
self.assertEqual(map_nested(lowerCamelCase__ , lowerCamelCase__ ) , lowerCamelCase__ )
self.assertEqual(map_nested(lowerCamelCase__ , lowerCamelCase__ ) , lowerCamelCase__ )
__lowerCamelCase = 2
self.assertEqual(map_nested(lowerCamelCase__ , lowerCamelCase__ , num_proc=lowerCamelCase__ ) , lowerCamelCase__ )
self.assertEqual(map_nested(lowerCamelCase__ , lowerCamelCase__ , num_proc=lowerCamelCase__ ) , lowerCamelCase__ )
self.assertEqual(map_nested(lowerCamelCase__ , lowerCamelCase__ , num_proc=lowerCamelCase__ ) , lowerCamelCase__ )
self.assertEqual(map_nested(lowerCamelCase__ , lowerCamelCase__ , num_proc=lowerCamelCase__ ) , lowerCamelCase__ )
self.assertEqual(map_nested(lowerCamelCase__ , lowerCamelCase__ , num_proc=lowerCamelCase__ ) , lowerCamelCase__ )
self.assertEqual(map_nested(lowerCamelCase__ , lowerCamelCase__ , num_proc=lowerCamelCase__ ) , lowerCamelCase__ )
self.assertEqual(map_nested(lowerCamelCase__ , lowerCamelCase__ , num_proc=lowerCamelCase__ ) , lowerCamelCase__ )
self.assertEqual(map_nested(lowerCamelCase__ , lowerCamelCase__ , num_proc=lowerCamelCase__ ) , lowerCamelCase__ )
__lowerCamelCase = {'a': np.eye(2 ), 'b': np.zeros(3 ), 'c': np.ones(2 )}
__lowerCamelCase = {'a': 2, 'b': 0, 'c': 2}
__lowerCamelCase = {
'a': np.eye(2 ).astype(lowerCamelCase__ ),
'b': np.zeros(3 ).astype(lowerCamelCase__ ),
'c': np.ones(2 ).astype(lowerCamelCase__ ),
}
self.assertEqual(map_nested(lowerCamelCase__ , lowerCamelCase__ , map_numpy=lowerCamelCase__ ) , lowerCamelCase__ )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(lowerCamelCase__ , lowerCamelCase__ , map_numpy=lowerCamelCase__ ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
self.assertEqual(map_nested(lowerCamelCase__ , lowerCamelCase__ , map_numpy=lowerCamelCase__ , num_proc=lowerCamelCase__ ) , lowerCamelCase__ )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(lowerCamelCase__ , lowerCamelCase__ , map_numpy=lowerCamelCase__ , num_proc=lowerCamelCase__ ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
with self.assertRaises(lowerCamelCase__ ): # can't pickle a local lambda
map_nested(lambda lowerCamelCase__ : x + 1 , lowerCamelCase__ , num_proc=lowerCamelCase__ )
def lowercase_ ( self ) -> Tuple:
'''simple docstring'''
__lowerCamelCase = {'a': 1, 'b': 2}
__lowerCamelCase = {'a': 3, 'b': 4}
__lowerCamelCase = {'a': 5, 'b': 6}
__lowerCamelCase = sorted([('a', (1, 3, 5)), ('b', (2, 4, 6))] )
self.assertEqual(sorted(zip_dict(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) ) , lowerCamelCase__ )
def lowercase_ ( self ) -> Union[str, Any]:
'''simple docstring'''
class __lowerCAmelCase :
"""simple docstring"""
snake_case_ = '''bar'''
__lowerCamelCase = Foo()
self.assertEqual(foo.my_attr , 'bar' )
with temporary_assignment(lowerCamelCase__ , 'my_attr' , 'BAR' ):
self.assertEqual(foo.my_attr , 'BAR' )
self.assertEqual(foo.my_attr , 'bar' )
@pytest.mark.parametrize(
'iterable_length, num_proc, expected_num_proc' , [
(1, None, 1),
(1, 1, 1),
(2, None, 1),
(2, 1, 1),
(2, 2, 1),
(2, 3, 1),
(3, 2, 1),
(16, 16, 16),
(16, 17, 16),
(17, 16, 16),
] , )
def lowerCamelCase_ ( UpperCamelCase__ : Any , UpperCamelCase__ : Dict , UpperCamelCase__ : List[Any] ) -> Any:
"""simple docstring"""
with patch('datasets.utils.py_utils._single_map_nested' ) as mock_single_map_nested, patch(
'datasets.parallel.parallel.Pool' ) as mock_multiprocessing_pool:
__lowerCamelCase = {F"""{i}""": i for i in range(UpperCamelCase__ )}
__lowerCamelCase = map_nested(lambda UpperCamelCase__ : x + 10 , UpperCamelCase__ , num_proc=UpperCamelCase__ , parallel_min_length=16 )
if expected_num_proc == 1:
assert mock_single_map_nested.called
assert not mock_multiprocessing_pool.called
else:
assert not mock_single_map_nested.called
assert mock_multiprocessing_pool.called
assert mock_multiprocessing_pool.call_args[0][0] == expected_num_proc
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
@require_tf
def lowercase_ ( self ) -> Dict:
'''simple docstring'''
import tensorflow as tf
from tensorflow.keras import layers
__lowerCamelCase = layers.Dense(2 )
def gen_random_output():
__lowerCamelCase = tf.random.uniform((1, 3) )
return model(lowerCamelCase__ ).numpy()
with temp_seed(42 , set_tensorflow=lowerCamelCase__ ):
__lowerCamelCase = gen_random_output()
with temp_seed(42 , set_tensorflow=lowerCamelCase__ ):
__lowerCamelCase = gen_random_output()
__lowerCamelCase = gen_random_output()
np.testing.assert_equal(lowerCamelCase__ , lowerCamelCase__ )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@require_torch
def lowercase_ ( self ) -> List[str]:
'''simple docstring'''
import torch
def gen_random_output():
__lowerCamelCase = torch.nn.Linear(3 , 2 )
__lowerCamelCase = torch.rand(1 , 3 )
return model(lowerCamelCase__ ).detach().numpy()
with temp_seed(42 , set_pytorch=lowerCamelCase__ ):
__lowerCamelCase = gen_random_output()
with temp_seed(42 , set_pytorch=lowerCamelCase__ ):
__lowerCamelCase = gen_random_output()
__lowerCamelCase = gen_random_output()
np.testing.assert_equal(lowerCamelCase__ , lowerCamelCase__ )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
def lowercase_ ( self ) -> Tuple:
'''simple docstring'''
def gen_random_output():
return np.random.rand(1 , 3 )
with temp_seed(42 ):
__lowerCamelCase = gen_random_output()
with temp_seed(42 ):
__lowerCamelCase = gen_random_output()
__lowerCamelCase = gen_random_output()
np.testing.assert_equal(lowerCamelCase__ , lowerCamelCase__ )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@pytest.mark.parametrize('input_data' , [{}] )
def lowerCamelCase_ ( UpperCamelCase__ : Optional[int] ) -> Optional[int]:
"""simple docstring"""
__lowerCamelCase = NestedDataStructure(UpperCamelCase__ ).data
assert output_data == input_data
@pytest.mark.parametrize(
'data, expected_output' , [
({}, []),
([], []),
('foo', ['foo']),
(['foo', 'bar'], ['foo', 'bar']),
([['foo', 'bar']], ['foo', 'bar']),
([[['foo'], ['bar']]], ['foo', 'bar']),
([[['foo'], 'bar']], ['foo', 'bar']),
({'a': 1, 'b': 2}, [1, 2]),
({'a': [1, 2], 'b': [3, 4]}, [1, 2, 3, 4]),
({'a': [[1, 2]], 'b': [[3, 4]]}, [1, 2, 3, 4]),
({'a': [[1, 2]], 'b': [3, 4]}, [1, 2, 3, 4]),
({'a': [[[1], [2]]], 'b': [[[3], [4]]]}, [1, 2, 3, 4]),
({'a': [[[1], [2]]], 'b': [[3, 4]]}, [1, 2, 3, 4]),
({'a': [[[1], [2]]], 'b': [3, 4]}, [1, 2, 3, 4]),
({'a': [[[1], [2]]], 'b': [3, [4]]}, [1, 2, 3, 4]),
({'a': {'1': 1}, 'b': 2}, [1, 2]),
({'a': {'1': [1]}, 'b': 2}, [1, 2]),
({'a': {'1': [1]}, 'b': [2]}, [1, 2]),
] , )
def lowerCamelCase_ ( UpperCamelCase__ : Tuple , UpperCamelCase__ : Union[str, Any] ) -> Any:
"""simple docstring"""
__lowerCamelCase = NestedDataStructure(UpperCamelCase__ ).flatten()
assert output == expected_output
def lowerCamelCase_ ( ) -> List[Any]:
"""simple docstring"""
__lowerCamelCase = A(x=1 , y='foobar' )
__lowerCamelCase = {'x': 1, 'y': 'foobar'}
assert asdict(UpperCamelCase__ ) == expected_output
__lowerCamelCase = {'a': {'b': A(x=10 , y='foo' )}, 'c': [A(x=20 , y='bar' )]}
__lowerCamelCase = {'a': {'b': {'x': 10, 'y': 'foo'}}, 'c': [{'x': 20, 'y': 'bar'}]}
assert asdict(UpperCamelCase__ ) == expected_output
with pytest.raises(UpperCamelCase__ ):
asdict([1, A(x=10 , y='foo' )] )
def lowerCamelCase_ ( UpperCamelCase__ : str ) -> Tuple:
"""simple docstring"""
return text.split()
def lowerCamelCase_ ( UpperCamelCase__ : List[str] ) -> Optional[int]:
"""simple docstring"""
yield (time.time(), content)
time.sleep(2 )
yield (time.time(), content)
def lowerCamelCase_ ( ) -> List[Any]:
"""simple docstring"""
with Pool(2 ) as pool:
__lowerCamelCase = list(iflatmap_unordered(UpperCamelCase__ , _split_text , kwargs_iterable=[{'text': 'hello there'}] * 10 ) )
assert out.count('hello' ) == 10
assert out.count('there' ) == 10
assert len(UpperCamelCase__ ) == 20
# check multiprocess from pathos (uses dill for pickling)
with multiprocess.Pool(2 ) as pool:
__lowerCamelCase = list(iflatmap_unordered(UpperCamelCase__ , _split_text , kwargs_iterable=[{'text': 'hello there'}] * 10 ) )
assert out.count('hello' ) == 10
assert out.count('there' ) == 10
assert len(UpperCamelCase__ ) == 20
# check that we get items as fast as possible
with Pool(2 ) as pool:
__lowerCamelCase = []
for yield_time, content in iflatmap_unordered(
UpperCamelCase__ , _aseconds_generator_of_aitems_with_timing , kwargs_iterable=[{'content': 'a'}, {'content': 'b'}] ):
assert yield_time < time.time() + 0.1, "we should each item directly after it was yielded"
out.append(UpperCamelCase__ )
assert out.count('a' ) == 2
assert out.count('b' ) == 2
assert len(UpperCamelCase__ ) == 4
| 469
|
import importlib.metadata
import operator
import re
import sys
from typing import Optional
from packaging import version
__A = {
"<": operator.lt,
"<=": operator.le,
"==": operator.eq,
"!=": operator.ne,
">=": operator.ge,
">": operator.gt,
}
def lowerCamelCase_ ( UpperCamelCase__ : List[str] , UpperCamelCase__ : Dict , UpperCamelCase__ : Dict , UpperCamelCase__ : Any , UpperCamelCase__ : Dict , UpperCamelCase__ : int ) -> List[Any]:
"""simple docstring"""
if got_ver is None or want_ver is None:
raise ValueError(
F"""Unable to compare versions for {requirement}: need={want_ver} found={got_ver}. This is unusual. Consider"""
F""" reinstalling {pkg}.""" )
if not ops[op](version.parse(UpperCamelCase__ ) , version.parse(UpperCamelCase__ ) ):
raise ImportError(
F"""{requirement} is required for a normal functioning of this module, but found {pkg}=={got_ver}.{hint}""" )
def lowerCamelCase_ ( UpperCamelCase__ : str , UpperCamelCase__ : Optional[str] = None ) -> None:
"""simple docstring"""
__lowerCamelCase = F"""\n{hint}""" if hint is not None else ''
# non-versioned check
if re.match(R'^[\w_\-\d]+$' , UpperCamelCase__ ):
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase = requirement, None, None
else:
__lowerCamelCase = re.findall(R'^([^!=<>\s]+)([\s!=<>]{1,2}.+)' , UpperCamelCase__ )
if not match:
raise ValueError(
'requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23, but'
F""" got {requirement}""" )
__lowerCamelCase , __lowerCamelCase = match[0]
__lowerCamelCase = want_full.split(',' ) # there could be multiple requirements
__lowerCamelCase = {}
for w in want_range:
__lowerCamelCase = re.findall(R'^([\s!=<>]{1,2})(.+)' , UpperCamelCase__ )
if not match:
raise ValueError(
'requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23,'
F""" but got {requirement}""" )
__lowerCamelCase , __lowerCamelCase = match[0]
__lowerCamelCase = want_ver
if op not in ops:
raise ValueError(F"""{requirement}: need one of {list(ops.keys() )}, but got {op}""" )
# special case
if pkg == "python":
__lowerCamelCase = '.'.join([str(UpperCamelCase__ ) for x in sys.version_info[:3]] )
for op, want_ver in wanted.items():
_compare_versions(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
return
# check if any version is installed
try:
__lowerCamelCase = importlib.metadata.version(UpperCamelCase__ )
except importlib.metadata.PackageNotFoundError:
raise importlib.metadata.PackageNotFoundError(
F"""The '{requirement}' distribution was not found and is required by this application. {hint}""" )
# check that the right version is installed if version number or a range was provided
if want_ver is not None:
for op, want_ver in wanted.items():
_compare_versions(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def lowerCamelCase_ ( UpperCamelCase__ : Optional[int] ) -> Optional[int]:
"""simple docstring"""
__lowerCamelCase = 'Try: pip install transformers -U or pip install -e \'.[dev]\' if you\'re working with git main'
return require_version(UpperCamelCase__ , UpperCamelCase__ )
| 469
| 1
|
from argparse import ArgumentParser
from .add_new_model import AddNewModelCommand
from .add_new_model_like import AddNewModelLikeCommand
from .convert import ConvertCommand
from .download import DownloadCommand
from .env import EnvironmentCommand
from .lfs import LfsCommands
from .pt_to_tf import PTtoTFCommand
from .run import RunCommand
from .serving import ServeCommand
from .user import UserCommands
def __UpperCamelCase ( ) -> Any:
'''simple docstring'''
__A = ArgumentParser('''Transformers CLI tool''' , usage='''transformers-cli <command> [<args>]''' )
__A = parser.add_subparsers(help='''transformers-cli command helpers''' )
# Register commands
ConvertCommand.register_subcommand(a__ )
DownloadCommand.register_subcommand(a__ )
EnvironmentCommand.register_subcommand(a__ )
RunCommand.register_subcommand(a__ )
ServeCommand.register_subcommand(a__ )
UserCommands.register_subcommand(a__ )
AddNewModelCommand.register_subcommand(a__ )
AddNewModelLikeCommand.register_subcommand(a__ )
LfsCommands.register_subcommand(a__ )
PTtoTFCommand.register_subcommand(a__ )
# Let's go
__A = parser.parse_args()
if not hasattr(a__ , '''func''' ):
parser.print_help()
exit(1 )
# Run
__A = args.func(a__ )
service.run()
if __name__ == "__main__":
main()
| 714
|
import argparse
import json
from collections import OrderedDict
import torch
from huggingface_hub import cached_download, hf_hub_url
from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification
def __UpperCamelCase ( snake_case ) -> Dict:
'''simple docstring'''
__A = []
embed.append(
(
F"cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight",
F"stage{idx}.patch_embed.proj.weight",
) )
embed.append(
(
F"cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias",
F"stage{idx}.patch_embed.proj.bias",
) )
embed.append(
(
F"cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight",
F"stage{idx}.patch_embed.norm.weight",
) )
embed.append(
(
F"cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias",
F"stage{idx}.patch_embed.norm.bias",
) )
return embed
def __UpperCamelCase ( snake_case , snake_case ) -> List[str]:
'''simple docstring'''
__A = []
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight",
F"stage{idx}.blocks.{cnt}.attn.proj_q.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias",
F"stage{idx}.blocks.{cnt}.attn.proj_q.bias",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight",
F"stage{idx}.blocks.{cnt}.attn.proj_k.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias",
F"stage{idx}.blocks.{cnt}.attn.proj_k.bias",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight",
F"stage{idx}.blocks.{cnt}.attn.proj_v.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias",
F"stage{idx}.blocks.{cnt}.attn.proj_v.bias",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight",
F"stage{idx}.blocks.{cnt}.attn.proj.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias",
F"stage{idx}.blocks.{cnt}.attn.proj.bias",
) )
attention_weights.append(
(F"cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight", F"stage{idx}.blocks.{cnt}.mlp.fc1.weight") )
attention_weights.append(
(F"cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias", F"stage{idx}.blocks.{cnt}.mlp.fc1.bias") )
attention_weights.append(
(F"cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight", F"stage{idx}.blocks.{cnt}.mlp.fc2.weight") )
attention_weights.append(
(F"cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias", F"stage{idx}.blocks.{cnt}.mlp.fc2.bias") )
attention_weights.append(
(F"cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight", F"stage{idx}.blocks.{cnt}.norm1.weight") )
attention_weights.append(
(F"cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias", F"stage{idx}.blocks.{cnt}.norm1.bias") )
attention_weights.append(
(F"cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight", F"stage{idx}.blocks.{cnt}.norm2.weight") )
attention_weights.append(
(F"cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias", F"stage{idx}.blocks.{cnt}.norm2.bias") )
return attention_weights
def __UpperCamelCase ( snake_case ) -> Any:
'''simple docstring'''
__A = []
token.append((F"cvt.encoder.stages.{idx}.cls_token", '''stage2.cls_token''') )
return token
def __UpperCamelCase ( ) -> Tuple:
'''simple docstring'''
__A = []
head.append(('''layernorm.weight''', '''norm.weight''') )
head.append(('''layernorm.bias''', '''norm.bias''') )
head.append(('''classifier.weight''', '''head.weight''') )
head.append(('''classifier.bias''', '''head.bias''') )
return head
def __UpperCamelCase ( snake_case , snake_case , snake_case , snake_case ) -> List[Any]:
'''simple docstring'''
__A = '''imagenet-1k-id2label.json'''
__A = 1_0_0_0
__A = '''huggingface/label-files'''
__A = num_labels
__A = json.load(open(cached_download(hf_hub_url(snake_case , snake_case , repo_type='''dataset''' ) ) , '''r''' ) )
__A = {int(snake_case ): v for k, v in idalabel.items()}
__A = idalabel
__A = {v: k for k, v in idalabel.items()}
__A = __A = CvtConfig(num_labels=snake_case , idalabel=snake_case , labelaid=snake_case )
# For depth size 13 (13 = 1+2+10)
if cvt_model.rsplit('''/''' , 1 )[-1][4:6] == "13":
__A = [1, 2, 1_0]
# For depth size 21 (21 = 1+4+16)
elif cvt_model.rsplit('''/''' , 1 )[-1][4:6] == "21":
__A = [1, 4, 1_6]
# For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20)
else:
__A = [2, 2, 2_0]
__A = [3, 1_2, 1_6]
__A = [1_9_2, 7_6_8, 1_0_2_4]
__A = CvtForImageClassification(snake_case )
__A = AutoImageProcessor.from_pretrained('''facebook/convnext-base-224-22k-1k''' )
__A = image_size
__A = torch.load(snake_case , map_location=torch.device('''cpu''' ) )
__A = OrderedDict()
__A = []
for idx in range(len(config.depth ) ):
if config.cls_token[idx]:
__A = list_of_state_dict + cls_token(snake_case )
__A = list_of_state_dict + embeddings(snake_case )
for cnt in range(config.depth[idx] ):
__A = list_of_state_dict + attention(snake_case , snake_case )
__A = list_of_state_dict + final()
for gg in list_of_state_dict:
print(snake_case )
for i in range(len(snake_case ) ):
__A = original_weights[list_of_state_dict[i][1]]
model.load_state_dict(snake_case )
model.save_pretrained(snake_case )
image_processor.save_pretrained(snake_case )
# Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al
if __name__ == "__main__":
_UpperCamelCase : Tuple = argparse.ArgumentParser()
parser.add_argument(
"""--cvt_model""",
default="""cvt-w24""",
type=str,
help="""Name of the cvt model you'd like to convert.""",
)
parser.add_argument(
"""--image_size""",
default=3_8_4,
type=int,
help="""Input Image Size""",
)
parser.add_argument(
"""--cvt_file_name""",
default=r"""cvtmodels\CvT-w24-384x384-IN-22k.pth""",
type=str,
help="""Input Image Size""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
_UpperCamelCase : str = parser.parse_args()
convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
| 341
| 0
|
'''simple docstring'''
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
__snake_case =abspath(join(dirname(dirname(dirname(__file__))), """src"""))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action="""ignore""", category=FutureWarning)
def a_ ( lowerCamelCase : Dict ):
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(lowerCamelCase )
def a_ ( lowerCamelCase : List[str] ):
from transformers.testing_utils import pytest_terminal_summary_main
lowerCAmelCase = terminalreporter.config.getoption('--make-reports' )
if make_reports:
pytest_terminal_summary_main(lowerCamelCase , id=lowerCamelCase )
| 133
|
'''simple docstring'''
import random
import timeit
from functools import wraps
from typing import Callable, Optional
from ..configuration_utils import PretrainedConfig
from ..models.auto.modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING
from ..utils import is_pyanvml_available, is_tf_available, logging
from .benchmark_utils import (
Benchmark,
Memory,
MemorySummary,
measure_peak_memory_cpu,
start_memory_tracing,
stop_memory_tracing,
)
if is_tf_available():
import tensorflow as tf
from tensorflow.python.framework.errors_impl import ResourceExhaustedError
from .benchmark_args_tf import TensorFlowBenchmarkArguments
if is_pyanvml_available():
import pyanvml.pyanvml as nvml
__snake_case =logging.get_logger(__name__)
def a_ ( lowerCamelCase : bool , lowerCamelCase : bool ):
def run_func(lowerCamelCase : Dict ):
@wraps(lowerCamelCase )
def run_in_eager_mode(*lowerCamelCase : int , **lowerCamelCase : Dict ):
return func(*lowerCamelCase , **lowerCamelCase )
@wraps(lowerCamelCase )
@tf.function(experimental_compile=lowerCamelCase )
def run_in_graph_mode(*lowerCamelCase : Tuple , **lowerCamelCase : Union[str, Any] ):
return func(*lowerCamelCase , **lowerCamelCase )
if do_eager_mode is True:
if use_xla is not False:
raise ValueError(
'Cannot run model in XLA, if `args.eager_mode` is set to `True`. Please set `args.eager_mode=False`.' )
return run_in_eager_mode
else:
return run_in_graph_mode
return run_func
def a_ ( lowerCamelCase : int , lowerCamelCase : int , lowerCamelCase : int ):
lowerCAmelCase = random.Random()
lowerCAmelCase = [rng.randint(0 , vocab_size - 1 ) for i in range(batch_size * sequence_length )]
return tf.constant(lowerCamelCase , shape=(batch_size, sequence_length) , dtype=tf.intaa )
class UpperCAmelCase_ ( __lowercase ):
lowerCamelCase : TensorFlowBenchmarkArguments
lowerCamelCase : PretrainedConfig
lowerCamelCase : str = "TensorFlow"
@property
def __UpperCAmelCase ( self : List[str] ) -> Dict:
return tf.__version__
def __UpperCAmelCase ( self : List[str] , UpperCAmelCase__ : str , UpperCAmelCase__ : int , UpperCAmelCase__ : int ) -> float:
# initialize GPU on separate process
lowerCAmelCase = self.args.strategy
if strategy is None:
raise ValueError('A device strategy has to be initialized before using TensorFlow.' )
lowerCAmelCase = self._prepare_inference_func(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
return self._measure_speed(_inference )
def __UpperCAmelCase ( self : int , UpperCAmelCase__ : str , UpperCAmelCase__ : int , UpperCAmelCase__ : int ) -> float:
lowerCAmelCase = self.args.strategy
if strategy is None:
raise ValueError('A device strategy has to be initialized before using TensorFlow.' )
lowerCAmelCase = self._prepare_train_func(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
return self._measure_speed(_train )
def __UpperCAmelCase ( self : int , UpperCAmelCase__ : str , UpperCAmelCase__ : int , UpperCAmelCase__ : int ) -> [Memory, Optional[MemorySummary]]:
# initialize GPU on separate process
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , UpperCAmelCase__ )
lowerCAmelCase = self.args.strategy
if strategy is None:
raise ValueError('A device strategy has to be initialized before using TensorFlow.' )
lowerCAmelCase = self._prepare_inference_func(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
return self._measure_memory(_inference )
def __UpperCAmelCase ( self : Dict , UpperCAmelCase__ : str , UpperCAmelCase__ : int , UpperCAmelCase__ : int ) -> [Memory, Optional[MemorySummary]]:
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , UpperCAmelCase__ )
lowerCAmelCase = self.args.strategy
if strategy is None:
raise ValueError('A device strategy has to be initialized before using TensorFlow.' )
lowerCAmelCase = self._prepare_train_func(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
return self._measure_memory(_train )
def __UpperCAmelCase ( self : Union[str, Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : int , UpperCAmelCase__ : int ) -> Callable[[], None]:
lowerCAmelCase = self.config_dict[model_name]
if self.args.fpaa:
raise NotImplementedError('Mixed precision is currently not supported.' )
lowerCAmelCase = (
hasattr(UpperCAmelCase__ , 'architectures' )
and isinstance(config.architectures , UpperCAmelCase__ )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
lowerCAmelCase = 'TF' + config.architectures[0] # prepend 'TF' for tensorflow model
lowerCAmelCase = __import__('transformers' , fromlist=[model_class] )
lowerCAmelCase = getattr(UpperCAmelCase__ , UpperCAmelCase__ )
lowerCAmelCase = model_cls(UpperCAmelCase__ )
except ImportError:
raise ImportError(
F'''{model_class} does not exist. If you just want to test the pretrained model, you might want to'''
' set `--only_pretrain_model` or `args.only_pretrain_model=True`.' )
else:
lowerCAmelCase = TF_MODEL_MAPPING[config.__class__](UpperCAmelCase__ )
# encoder-decoder has vocab size saved differently
lowerCAmelCase = config.vocab_size if hasattr(UpperCAmelCase__ , 'vocab_size' ) else config.encoder.vocab_size
lowerCAmelCase = random_input_ids(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_forward():
return model(UpperCAmelCase__ , decoder_input_ids=UpperCAmelCase__ , training=UpperCAmelCase__ )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_forward():
return model(UpperCAmelCase__ , training=UpperCAmelCase__ )
lowerCAmelCase = encoder_decoder_forward if config.is_encoder_decoder else encoder_forward
return _inference
def __UpperCAmelCase ( self : str , UpperCAmelCase__ : str , UpperCAmelCase__ : int , UpperCAmelCase__ : int ) -> Callable[[], None]:
lowerCAmelCase = self.config_dict[model_name]
if self.args.eager_mode is not False:
raise ValueError('Training cannot be done in eager mode. Please make sure that `args.eager_mode = False`.' )
if self.args.fpaa:
raise NotImplementedError('Mixed precision is currently not supported.' )
lowerCAmelCase = (
hasattr(UpperCAmelCase__ , 'architectures' )
and isinstance(config.architectures , UpperCAmelCase__ )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
lowerCAmelCase = 'TF' + config.architectures[0] # prepend 'TF' for tensorflow model
lowerCAmelCase = __import__('transformers' , fromlist=[model_class] )
lowerCAmelCase = getattr(UpperCAmelCase__ , UpperCAmelCase__ )
lowerCAmelCase = model_cls(UpperCAmelCase__ )
except ImportError:
raise ImportError(
F'''{model_class} does not exist. If you just want to test the pretrained model, you might want to'''
' set `--only_pretrain_model` or `args.only_pretrain_model=True`.' )
else:
lowerCAmelCase = TF_MODEL_WITH_LM_HEAD_MAPPING[config.__class__](UpperCAmelCase__ )
# encoder-decoder has vocab size saved differently
lowerCAmelCase = config.vocab_size if hasattr(UpperCAmelCase__ , 'vocab_size' ) else config.encoder.vocab_size
lowerCAmelCase = random_input_ids(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_train():
lowerCAmelCase = model(UpperCAmelCase__ , decoder_input_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ , training=UpperCAmelCase__ )[0]
lowerCAmelCase = tf.gradients(UpperCAmelCase__ , model.trainable_variables )
return gradients
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_train():
lowerCAmelCase = model(UpperCAmelCase__ , labels=UpperCAmelCase__ , training=UpperCAmelCase__ )[0]
lowerCAmelCase = tf.gradients(UpperCAmelCase__ , model.trainable_variables )
return gradients
lowerCAmelCase = encoder_decoder_train if config.is_encoder_decoder else encoder_train
return _train
def __UpperCAmelCase ( self : int , UpperCAmelCase__ : str ) -> float:
with self.args.strategy.scope():
try:
if self.args.is_tpu or self.args.use_xla:
# run additional 10 times to stabilize compilation for tpu
logger.info('Do inference on TPU. Running model 5 times to stabilize compilation' )
timeit.repeat(UpperCAmelCase__ , repeat=1 , number=5 )
# as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average
lowerCAmelCase = timeit.repeat(
UpperCAmelCase__ , repeat=self.args.repeat , number=1_0 , )
return min(UpperCAmelCase__ ) / 10.0
except ResourceExhaustedError as e:
self.print_fn(F'''Doesn\'t fit on GPU. {e}''' )
def __UpperCAmelCase ( self : Optional[Any] , UpperCAmelCase__ : Callable[[], None] ) -> [Memory, MemorySummary]:
logger.info(
'Note that TensorFlow allocates more memory than '
'it might need to speed up computation. '
'The memory reported here corresponds to the memory '
'reported by `nvidia-smi`, which can vary depending '
'on total available memory on the GPU that is used.' )
with self.args.strategy.scope():
try:
if self.args.trace_memory_line_by_line:
if not self.args.eager_mode:
raise ValueError(
'`args.eager_mode` is set to `False`. Make sure to run model in eager mode to measure memory'
' consumption line by line.' )
lowerCAmelCase = start_memory_tracing('transformers' )
if self.args.is_tpu:
# tpu
raise NotImplementedError(
'Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking'
' with `args.memory=False`' )
elif self.args.is_gpu:
# gpu
if not is_pyanvml_available():
logger.warning(
'py3nvml not installed, we won\'t log GPU memory usage. '
'Install py3nvml (pip install py3nvml) to log information about GPU.' )
lowerCAmelCase = 'N/A'
else:
logger.info(
'Measuring total GPU usage on GPU device. Make sure to not have additional processes'
' running on the same GPU.' )
# init nvml
nvml.nvmlInit()
func()
lowerCAmelCase = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx )
lowerCAmelCase = nvml.nvmlDeviceGetMemoryInfo(UpperCAmelCase__ )
lowerCAmelCase = meminfo.used
lowerCAmelCase = Memory(UpperCAmelCase__ )
# shutdown nvml
nvml.nvmlShutdown()
else:
# cpu
if self.args.trace_memory_line_by_line:
logger.info(
'When enabling line by line tracing, the max peak memory for CPU is inaccurate in'
' TensorFlow.' )
lowerCAmelCase = None
else:
lowerCAmelCase = measure_peak_memory_cpu(UpperCAmelCase__ )
lowerCAmelCase = Memory(UpperCAmelCase__ ) if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) else memory_bytes
if self.args.trace_memory_line_by_line:
lowerCAmelCase = stop_memory_tracing(UpperCAmelCase__ )
if memory is None:
lowerCAmelCase = summary.total
else:
lowerCAmelCase = None
return memory, summary
except ResourceExhaustedError as e:
self.print_fn(F'''Doesn\'t fit on GPU. {e}''' )
return "N/A", None
| 133
| 1
|
'''simple docstring'''
from collections import UserDict
from typing import Union
import numpy as np
import requests
from ..utils import (
add_end_docstrings,
logging,
)
from .audio_classification import ffmpeg_read
from .base import PIPELINE_INIT_ARGS, Pipeline
a : int = logging.get_logger(__name__)
@add_end_docstrings(_lowerCamelCase )
class a ( _lowerCamelCase ):
def __init__( self : str , **lowercase_ : Optional[Any] ):
super().__init__(**lowercase_ )
if self.framework != "pt":
raise ValueError(F"The {self.__class__} is only available in PyTorch." )
# No specific FOR_XXX available yet
def __call__( self : int , lowercase_ : Union[np.ndarray, bytes, str] , **lowercase_ : Any ):
return super().__call__(lowercase_ , **lowercase_ )
def A_ ( self : List[Any] , **lowercase_ : Optional[Any] ):
snake_case_ = {}
if "candidate_labels" in kwargs:
snake_case_ = kwargs['''candidate_labels''']
if "hypothesis_template" in kwargs:
snake_case_ = kwargs['''hypothesis_template''']
return preprocess_params, {}, {}
def A_ ( self : List[Any] , lowercase_ : Optional[int] , lowercase_ : Optional[Any]=None , lowercase_ : Union[str, Any]="This is a sound of {}." ):
if isinstance(lowercase_ , lowercase_ ):
if audio.startswith('''http://''' ) or audio.startswith('''https://''' ):
# We need to actually check for a real protocol, otherwise it's impossible to use a local file
# like http_huggingface_co.png
snake_case_ = requests.get(lowercase_ ).content
else:
with open(lowercase_ , '''rb''' ) as f:
snake_case_ = f.read()
if isinstance(lowercase_ , lowercase_ ):
snake_case_ = ffmpeg_read(lowercase_ , self.feature_extractor.sampling_rate )
if not isinstance(lowercase_ , np.ndarray ):
raise ValueError('''We expect a numpy ndarray as input''' )
if len(audio.shape ) != 1:
raise ValueError('''We expect a single channel audio input for ZeroShotAudioClassificationPipeline''' )
snake_case_ = self.feature_extractor(
[audio] , sampling_rate=self.feature_extractor.sampling_rate , return_tensors='''pt''' )
snake_case_ = candidate_labels
snake_case_ = [hypothesis_template.format(lowercase_ ) for x in candidate_labels]
snake_case_ = self.tokenizer(lowercase_ , return_tensors=self.framework , padding=lowercase_ )
snake_case_ = [text_inputs]
return inputs
def A_ ( self : Union[str, Any] , lowercase_ : Union[str, Any] ):
snake_case_ = model_inputs.pop('''candidate_labels''' )
snake_case_ = model_inputs.pop('''text_inputs''' )
if isinstance(text_inputs[0] , lowercase_ ):
snake_case_ = text_inputs[0]
else:
# Batching case.
snake_case_ = text_inputs[0][0]
snake_case_ = self.model(**lowercase_ , **lowercase_ )
snake_case_ = {
'''candidate_labels''': candidate_labels,
'''logits''': outputs.logits_per_audio,
}
return model_outputs
def A_ ( self : Optional[Any] , lowercase_ : List[str] ):
snake_case_ = model_outputs.pop('''candidate_labels''' )
snake_case_ = model_outputs['''logits'''][0]
if self.framework == "pt":
snake_case_ = logits.softmax(dim=0 )
snake_case_ = probs.tolist()
else:
raise ValueError('''`tf` framework not supported.''' )
snake_case_ = [
{'''score''': score, '''label''': candidate_label}
for score, candidate_label in sorted(zip(lowercase_ , lowercase_ ) , key=lambda lowercase_ : -x[0] )
]
return result
| 593
|
'''simple docstring'''
from __future__ import annotations
import time
a : Dict = list[tuple[int, int]]
a : int = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
a : Optional[int] = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
class a :
def __init__( self : Optional[Any] , lowercase_ : int , lowercase_ : int , lowercase_ : int , lowercase_ : int , lowercase_ : Node | None ):
snake_case_ = pos_x
snake_case_ = pos_y
snake_case_ = (pos_y, pos_x)
snake_case_ = goal_x
snake_case_ = goal_y
snake_case_ = parent
class a :
def __init__( self : Optional[Any] , lowercase_ : tuple[int, int] , lowercase_ : tuple[int, int] ):
snake_case_ = Node(start[1] , start[0] , goal[1] , goal[0] , lowercase_ )
snake_case_ = Node(goal[1] , goal[0] , goal[1] , goal[0] , lowercase_ )
snake_case_ = [self.start]
snake_case_ = False
def A_ ( self : List[Any] ):
while self.node_queue:
snake_case_ = self.node_queue.pop(0 )
if current_node.pos == self.target.pos:
snake_case_ = True
return self.retrace_path(lowercase_ )
snake_case_ = self.get_successors(lowercase_ )
for node in successors:
self.node_queue.append(lowercase_ )
if not self.reached:
return [self.start.pos]
return None
def A_ ( self : Any , lowercase_ : Node ):
snake_case_ = []
for action in delta:
snake_case_ = parent.pos_x + action[1]
snake_case_ = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(lowercase_ ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(lowercase_ , lowercase_ , self.target.pos_y , self.target.pos_x , lowercase_ ) )
return successors
def A_ ( self : int , lowercase_ : Node | None ):
snake_case_ = node
snake_case_ = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
snake_case_ = current_node.parent
path.reverse()
return path
class a :
def __init__( self : str , lowercase_ : Union[str, Any] , lowercase_ : List[Any] ):
snake_case_ = BreadthFirstSearch(lowercase_ , lowercase_ )
snake_case_ = BreadthFirstSearch(lowercase_ , lowercase_ )
snake_case_ = False
def A_ ( self : Tuple ):
while self.fwd_bfs.node_queue or self.bwd_bfs.node_queue:
snake_case_ = self.fwd_bfs.node_queue.pop(0 )
snake_case_ = self.bwd_bfs.node_queue.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
snake_case_ = True
return self.retrace_bidirectional_path(
lowercase_ , lowercase_ )
snake_case_ = current_bwd_node
snake_case_ = current_fwd_node
snake_case_ = {
self.fwd_bfs: self.fwd_bfs.get_successors(lowercase_ ),
self.bwd_bfs: self.bwd_bfs.get_successors(lowercase_ ),
}
for bfs in [self.fwd_bfs, self.bwd_bfs]:
for node in successors[bfs]:
bfs.node_queue.append(lowercase_ )
if not self.reached:
return [self.fwd_bfs.start.pos]
return None
def A_ ( self : Optional[Any] , lowercase_ : Node , lowercase_ : Node ):
snake_case_ = self.fwd_bfs.retrace_path(lowercase_ )
snake_case_ = self.bwd_bfs.retrace_path(lowercase_ )
bwd_path.pop()
bwd_path.reverse()
snake_case_ = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
import doctest
doctest.testmod()
a : Any = (0, 0)
a : Optional[int] = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
a : List[Any] = time.time()
a : Any = BreadthFirstSearch(init, goal)
a : List[Any] = bfs.search()
a : List[Any] = time.time() - start_bfs_time
print('Unidirectional BFS computation time : ', bfs_time)
a : Optional[Any] = time.time()
a : Tuple = BidirectionalBreadthFirstSearch(init, goal)
a : str = bd_bfs.search()
a : Optional[int] = time.time() - start_bd_bfs_time
print('Bidirectional BFS computation time : ', bd_bfs_time)
| 593
| 1
|
"""simple docstring"""
def a__ ( lowerCAmelCase , lowerCAmelCase ) -> str:
if a < 0 or b < 0:
raise ValueError("""the value of both inputs must be positive""" )
UpperCAmelCase__ : Tuple = str(bin(lowerCAmelCase ) )[2:] # remove the leading "0b"
UpperCAmelCase__ : Union[str, Any] = str(bin(lowerCAmelCase ) )[2:]
UpperCAmelCase__ : Union[str, Any] = max(len(lowerCAmelCase ) , len(lowerCAmelCase ) )
return "0b" + "".join(
str(int("""1""" in (char_a, char_b) ) )
for char_a, char_b in zip(a_binary.zfill(lowerCAmelCase ) , b_binary.zfill(lowerCAmelCase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 182
|
"""simple docstring"""
def a__ ( ) -> Union[str, Any]:
UpperCAmelCase__ : Dict = []
UpperCAmelCase__ : Tuple = 1
while len(lowerCAmelCase ) < 1E6:
constant.append(str(lowerCAmelCase ) )
i += 1
UpperCAmelCase__ : Any = """""".join(lowerCAmelCase )
return (
int(constant[0] )
* int(constant[9] )
* int(constant[99] )
* int(constant[9_99] )
* int(constant[99_99] )
* int(constant[9_99_99] )
* int(constant[99_99_99] )
)
if __name__ == "__main__":
print(solution())
| 182
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
A = {
"configuration_efficientformer": [
"EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"EfficientFormerConfig",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = ["EfficientFormerImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = [
"EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"EfficientFormerForImageClassification",
"EfficientFormerForImageClassificationWithTeacher",
"EfficientFormerModel",
"EfficientFormerPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = [
"TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFEfficientFormerForImageClassification",
"TFEfficientFormerForImageClassificationWithTeacher",
"TFEfficientFormerModel",
"TFEfficientFormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_efficientformer import EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, EfficientFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientformer import EfficientFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientformer import (
EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientFormerForImageClassification,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerModel,
EfficientFormerPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
TFEfficientFormerPreTrainedModel,
)
else:
import sys
A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 277
|
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A = {"configuration_mmbt": ["MMBTConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = ["MMBTForClassification", "MMBTModel", "ModalEmbeddings"]
if TYPE_CHECKING:
from .configuration_mmbt import MMBTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mmbt import MMBTForClassification, MMBTModel, ModalEmbeddings
else:
import sys
A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 277
| 1
|
'''simple docstring'''
from typing import Callable, List, Optional, Union
import PIL
import torch
from transformers import (
CLIPImageProcessor,
CLIPSegForImageSegmentation,
CLIPSegProcessor,
CLIPTextModel,
CLIPTokenizer,
)
from diffusers import DiffusionPipeline
from diffusers.configuration_utils import FrozenDict
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionInpaintPipeline
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import deprecate, is_accelerate_available, logging
SCREAMING_SNAKE_CASE_: Optional[int] =logging.get_logger(__name__) # pylint: disable=invalid-name
class __A ( UpperCamelCase__ ):
def __init__(self : Any , __a : CLIPSegForImageSegmentation , __a : CLIPSegProcessor , __a : AutoencoderKL , __a : CLIPTextModel , __a : CLIPTokenizer , __a : UNetaDConditionModel , __a : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , __a : StableDiffusionSafetyChecker , __a : CLIPImageProcessor , ):
super().__init__()
if hasattr(scheduler.config , "steps_offset" ) and scheduler.config.steps_offset != 1:
UpperCAmelCase_ = (
f"""The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`"""
f""" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure """
"to update the config accordingly as leaving `steps_offset` might led to incorrect results"
" in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,"
" it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`"
" file"
)
deprecate("steps_offset!=1" , "1.0.0" , __a , standard_warn=__a )
UpperCAmelCase_ = dict(scheduler.config )
UpperCAmelCase_ = 1
UpperCAmelCase_ = FrozenDict(__a )
if hasattr(scheduler.config , "skip_prk_steps" ) and scheduler.config.skip_prk_steps is False:
UpperCAmelCase_ = (
f"""The configuration file of this scheduler: {scheduler} has not set the configuration"""
" `skip_prk_steps`. `skip_prk_steps` should be set to True in the configuration file. Please make"
" sure to update the config accordingly as not setting `skip_prk_steps` in the config might lead to"
" incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face"
" Hub, it would be very nice if you could open a Pull request for the"
" `scheduler/scheduler_config.json` file"
)
deprecate("skip_prk_steps not set" , "1.0.0" , __a , standard_warn=__a )
UpperCAmelCase_ = dict(scheduler.config )
UpperCAmelCase_ = True
UpperCAmelCase_ = FrozenDict(__a )
if safety_checker is None:
logger.warning(
f"""You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"""
" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
" results in services or applications open to the public. Both the diffusers team and Hugging Face"
" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
" it only for use-cases that involve analyzing network behavior or auditing its results. For more"
" information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." )
self.register_modules(
segmentation_model=__a , segmentation_processor=__a , vae=__a , text_encoder=__a , tokenizer=__a , unet=__a , scheduler=__a , safety_checker=__a , feature_extractor=__a , )
def _lowercase (self : str , __a : Optional[Union[str, int]] = "auto" ):
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
UpperCAmelCase_ = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(__a )
def _lowercase (self : int ):
self.enable_attention_slicing(__a )
def _lowercase (self : Optional[Any] ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
UpperCAmelCase_ = torch.device("cuda" )
for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae, self.safety_checker]:
if cpu_offloaded_model is not None:
cpu_offload(__a , __a )
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def _lowercase (self : Optional[int] ):
if self.device != torch.device("meta" ) or not hasattr(self.unet , "_hf_hook" ):
return self.device
for module in self.unet.modules():
if (
hasattr(__a , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
def __call__(self : Dict , __a : Union[str, List[str]] , __a : Union[torch.FloatTensor, PIL.Image.Image] , __a : str , __a : int = 512 , __a : int = 512 , __a : int = 50 , __a : float = 7.5 , __a : Optional[Union[str, List[str]]] = None , __a : Optional[int] = 1 , __a : float = 0.0 , __a : Optional[torch.Generator] = None , __a : Optional[torch.FloatTensor] = None , __a : Optional[str] = "pil" , __a : bool = True , __a : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __a : int = 1 , **__a : int , ):
UpperCAmelCase_ = self.segmentation_processor(
text=[text] , images=[image] , padding="max_length" , return_tensors="pt" ).to(self.device )
UpperCAmelCase_ = self.segmentation_model(**__a )
UpperCAmelCase_ = torch.sigmoid(outputs.logits ).cpu().detach().unsqueeze(-1 ).numpy()
UpperCAmelCase_ = self.numpy_to_pil(__a )[0].resize(image.size )
# Run inpainting pipeline with the generated mask
UpperCAmelCase_ = StableDiffusionInpaintPipeline(
vae=self.vae , text_encoder=self.text_encoder , tokenizer=self.tokenizer , unet=self.unet , scheduler=self.scheduler , safety_checker=self.safety_checker , feature_extractor=self.feature_extractor , )
return inpainting_pipeline(
prompt=__a , image=__a , mask_image=__a , height=__a , width=__a , num_inference_steps=__a , guidance_scale=__a , negative_prompt=__a , num_images_per_prompt=__a , eta=__a , generator=__a , latents=__a , output_type=__a , return_dict=__a , callback=__a , callback_steps=__a , )
| 78
|
from ..utils import DummyObject, requires_backends
class __magic_name__ (metaclass=__lowercase ):
lowerCamelCase__ = ['''speech''']
def __init__( self , *_a , **_a ) -> str:
requires_backends(self , ["speech"] )
class __magic_name__ (metaclass=__lowercase ):
lowerCamelCase__ = ['''speech''']
def __init__( self , *_a , **_a ) -> Union[str, Any]:
requires_backends(self , ["speech"] )
| 122
| 0
|
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import numpy as np
import torch
from datasets import load_dataset
from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING,
AutoConfig,
AutoImageProcessor,
AutoModelForMaskedImageModeling,
HfArgumentParser,
Trainer,
TrainingArguments,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
A : Optional[Any] = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.31.0")
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt")
A : Optional[Any] = list(MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING.keys())
A : Optional[int] = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class _lowercase :
"""simple docstring"""
A__ = field(
default="cifar10" , metadata={"help": "Name of a dataset from the datasets package"})
A__ = field(
default=_UpperCAmelCase , metadata={"help": "The configuration name of the dataset to use (via the datasets library)."})
A__ = field(
default=_UpperCAmelCase , metadata={"help": "The column name of the images in the files. If not set, will try to use 'image' or 'img'."} , )
A__ = field(default=_UpperCAmelCase , metadata={"help": "A folder containing the training data."})
A__ = field(default=_UpperCAmelCase , metadata={"help": "A folder containing the validation data."})
A__ = field(
default=0.15 , metadata={"help": "Percent to split off of train for validation."})
A__ = field(default=32 , metadata={"help": "The size of the square patches to use for masking."})
A__ = field(
default=0.6 , metadata={"help": "Percentage of patches to mask."} , )
A__ = field(
default=_UpperCAmelCase , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
A__ = field(
default=_UpperCAmelCase , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
} , )
def lowerCAmelCase ( self : Dict ):
'''simple docstring'''
lowerCamelCase__ : Any = {}
if self.train_dir is not None:
lowerCamelCase__ : Any = self.train_dir
if self.validation_dir is not None:
lowerCamelCase__ : Tuple = self.validation_dir
lowerCamelCase__ : int = data_files if data_files else None
@dataclass
class _lowercase :
"""simple docstring"""
A__ = field(
default=_UpperCAmelCase , metadata={
"help": (
"The model checkpoint for weights initialization. Can be a local path to a pytorch_model.bin or a "
"checkpoint identifier on the hub. "
"Don't set if you want to train a model from scratch."
)
} , )
A__ = field(
default=_UpperCAmelCase , metadata={"help": "If training from scratch, pass a model type from the list: " + ", ".join(_UpperCAmelCase)} , )
A__ = field(
default=_UpperCAmelCase , metadata={"help": "Pretrained config name or path if not the same as model_name"})
A__ = field(
default=_UpperCAmelCase , metadata={
"help": (
"Override some existing default config settings when a model is trained from scratch. Example: "
"n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index"
)
} , )
A__ = field(
default=_UpperCAmelCase , metadata={"help": "Where do you want to store (cache) the pretrained models/datasets downloaded from the hub"} , )
A__ = field(
default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , )
A__ = field(default=_UpperCAmelCase , metadata={"help": "Name or path of preprocessor config."})
A__ = field(
default=_UpperCAmelCase , metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} , )
A__ = field(
default=_UpperCAmelCase , metadata={
"help": (
"The size (resolution) of each image. If not specified, will use `image_size` of the configuration."
)
} , )
A__ = field(
default=_UpperCAmelCase , metadata={
"help": (
"The size (resolution) of each patch. If not specified, will use `patch_size` of the configuration."
)
} , )
A__ = field(
default=_UpperCAmelCase , metadata={"help": "Stride to use for the encoder."} , )
class _lowercase :
"""simple docstring"""
def __init__( self : Dict , __lowerCamelCase : List[Any]=192 , __lowerCamelCase : Optional[int]=32 , __lowerCamelCase : Optional[Any]=4 , __lowerCamelCase : List[str]=0.6 ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = input_size
lowerCamelCase__ : List[Any] = mask_patch_size
lowerCamelCase__ : Tuple = model_patch_size
lowerCamelCase__ : Dict = mask_ratio
if self.input_size % self.mask_patch_size != 0:
raise ValueError("Input size must be divisible by mask patch size" )
if self.mask_patch_size % self.model_patch_size != 0:
raise ValueError("Mask patch size must be divisible by model patch size" )
lowerCamelCase__ : List[Any] = self.input_size // self.mask_patch_size
lowerCamelCase__ : List[str] = self.mask_patch_size // self.model_patch_size
lowerCamelCase__ : Optional[int] = self.rand_size**2
lowerCamelCase__ : Union[str, Any] = int(np.ceil(self.token_count * self.mask_ratio ) )
def __call__( self : Dict ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = np.random.permutation(self.token_count )[: self.mask_count]
lowerCamelCase__ : Optional[Any] = np.zeros(self.token_count , dtype=lowercase__ )
lowerCamelCase__ : Optional[Any] = 1
lowerCamelCase__ : Any = mask.reshape((self.rand_size, self.rand_size) )
lowerCamelCase__ : List[Any] = mask.repeat(self.scale , axis=0 ).repeat(self.scale , axis=1 )
return torch.tensor(mask.flatten() )
def lowercase_ ( _A : Dict ):
"""simple docstring"""
lowerCamelCase__ : Any = torch.stack([example["pixel_values"] for example in examples] )
lowerCamelCase__ : Optional[int] = torch.stack([example["mask"] for example in examples] )
return {"pixel_values": pixel_values, "bool_masked_pos": mask}
def lowercase_ ( ):
"""simple docstring"""
lowerCamelCase__ : Any = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowerCamelCase__ : Tuple = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowerCamelCase__ : List[Any] = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_mim" , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
lowerCamelCase__ : Optional[int] = training_args.get_process_log_level()
logger.setLevel(SCREAMING_SNAKE_CASE__ )
transformers.utils.logging.set_verbosity(SCREAMING_SNAKE_CASE__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ F"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" )
logger.info(F"Training/evaluation parameters {training_args}" )
# Detecting last checkpoint.
lowerCamelCase__ : List[Any] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowerCamelCase__ : Dict = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"Output directory ({training_args.output_dir}) already exists and is not empty. "
"Use --overwrite_output_dir to overcome." )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Initialize our dataset.
lowerCamelCase__ : List[Any] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# If we don't have a validation split, split off a percentage of train as validation.
lowerCamelCase__ : int = None if """validation""" in ds.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , SCREAMING_SNAKE_CASE__ ) and data_args.train_val_split > 0.0:
lowerCamelCase__ : List[Any] = ds["""train"""].train_test_split(data_args.train_val_split )
lowerCamelCase__ : List[str] = split["""train"""]
lowerCamelCase__ : List[str] = split["""test"""]
# Create config
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowerCamelCase__ : Dict = {
"""cache_dir""": model_args.cache_dir,
"""revision""": model_args.model_revision,
"""use_auth_token""": True if model_args.use_auth_token else None,
}
if model_args.config_name_or_path:
lowerCamelCase__ : str = AutoConfig.from_pretrained(model_args.config_name_or_path , **SCREAMING_SNAKE_CASE__ )
elif model_args.model_name_or_path:
lowerCamelCase__ : str = AutoConfig.from_pretrained(model_args.model_name_or_path , **SCREAMING_SNAKE_CASE__ )
else:
lowerCamelCase__ : Dict = CONFIG_MAPPING[model_args.model_type]()
logger.warning("You are instantiating a new config instance from scratch." )
if model_args.config_overrides is not None:
logger.info(F"Overriding config: {model_args.config_overrides}" )
config.update_from_string(model_args.config_overrides )
logger.info(F"New config: {config}" )
# make sure the decoder_type is "simmim" (only relevant for BEiT)
if hasattr(SCREAMING_SNAKE_CASE__ , "decoder_type" ):
lowerCamelCase__ : str = """simmim"""
# adapt config
lowerCamelCase__ : Tuple = model_args.image_size if model_args.image_size is not None else config.image_size
lowerCamelCase__ : str = model_args.patch_size if model_args.patch_size is not None else config.patch_size
lowerCamelCase__ : List[Any] = (
model_args.encoder_stride if model_args.encoder_stride is not None else config.encoder_stride
)
config.update(
{
"image_size": model_args.image_size,
"patch_size": model_args.patch_size,
"encoder_stride": model_args.encoder_stride,
} )
# create image processor
if model_args.image_processor_name:
lowerCamelCase__ : Union[str, Any] = AutoImageProcessor.from_pretrained(model_args.image_processor_name , **SCREAMING_SNAKE_CASE__ )
elif model_args.model_name_or_path:
lowerCamelCase__ : Optional[Any] = AutoImageProcessor.from_pretrained(model_args.model_name_or_path , **SCREAMING_SNAKE_CASE__ )
else:
lowerCamelCase__ : str = {
conf.model_type: image_processor_class for conf, image_processor_class in IMAGE_PROCESSOR_MAPPING.items()
}
lowerCamelCase__ : Tuple = IMAGE_PROCESSOR_TYPES[model_args.model_type]()
# create model
if model_args.model_name_or_path:
lowerCamelCase__ : List[Any] = AutoModelForMaskedImageModeling.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=SCREAMING_SNAKE_CASE__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info("Training new model from scratch" )
lowerCamelCase__ : Optional[int] = AutoModelForMaskedImageModeling.from_config(SCREAMING_SNAKE_CASE__ )
if training_args.do_train:
lowerCamelCase__ : str = ds["""train"""].column_names
else:
lowerCamelCase__ : Union[str, Any] = ds["""validation"""].column_names
if data_args.image_column_name is not None:
lowerCamelCase__ : Any = data_args.image_column_name
elif "image" in column_names:
lowerCamelCase__ : str = """image"""
elif "img" in column_names:
lowerCamelCase__ : Dict = """img"""
else:
lowerCamelCase__ : Optional[int] = column_names[0]
# transformations as done in original SimMIM paper
# source: https://github.com/microsoft/SimMIM/blob/main/data/data_simmim.py
lowerCamelCase__ : List[str] = Compose(
[
Lambda(lambda _A : img.convert("RGB" ) if img.mode != "RGB" else img ),
RandomResizedCrop(model_args.image_size , scale=(0.67, 1.0) , ratio=(3.0 / 4.0, 4.0 / 3.0) ),
RandomHorizontalFlip(),
ToTensor(),
Normalize(mean=image_processor.image_mean , std=image_processor.image_std ),
] )
# create mask generator
lowerCamelCase__ : Any = MaskGenerator(
input_size=model_args.image_size , mask_patch_size=data_args.mask_patch_size , model_patch_size=model_args.patch_size , mask_ratio=data_args.mask_ratio , )
def preprocess_images(_A : List[Any] ):
lowerCamelCase__ : List[str] = [transforms(SCREAMING_SNAKE_CASE__ ) for image in examples[image_column_name]]
lowerCamelCase__ : Any = [mask_generator() for i in range(len(examples[image_column_name] ) )]
return examples
if training_args.do_train:
if "train" not in ds:
raise ValueError("--do_train requires a train dataset" )
if data_args.max_train_samples is not None:
lowerCamelCase__ : Optional[int] = ds["""train"""].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
# Set the training transforms
ds["train"].set_transform(SCREAMING_SNAKE_CASE__ )
if training_args.do_eval:
if "validation" not in ds:
raise ValueError("--do_eval requires a validation dataset" )
if data_args.max_eval_samples is not None:
lowerCamelCase__ : Union[str, Any] = (
ds["""validation"""].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
ds["validation"].set_transform(SCREAMING_SNAKE_CASE__ )
# Initialize our trainer
lowerCamelCase__ : Tuple = Trainer(
model=SCREAMING_SNAKE_CASE__ , args=SCREAMING_SNAKE_CASE__ , train_dataset=ds["train"] if training_args.do_train else None , eval_dataset=ds["validation"] if training_args.do_eval else None , tokenizer=SCREAMING_SNAKE_CASE__ , data_collator=SCREAMING_SNAKE_CASE__ , )
# Training
if training_args.do_train:
lowerCamelCase__ : Any = None
if training_args.resume_from_checkpoint is not None:
lowerCamelCase__ : str = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
lowerCamelCase__ : int = last_checkpoint
lowerCamelCase__ : List[Any] = trainer.train(resume_from_checkpoint=SCREAMING_SNAKE_CASE__ )
trainer.save_model()
trainer.log_metrics("train" , train_result.metrics )
trainer.save_metrics("train" , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
lowerCamelCase__ : List[Any] = trainer.evaluate()
trainer.log_metrics("eval" , SCREAMING_SNAKE_CASE__ )
trainer.save_metrics("eval" , SCREAMING_SNAKE_CASE__ )
# Write model card and (optionally) push to hub
lowerCamelCase__ : Optional[Any] = {
"""finetuned_from""": model_args.model_name_or_path,
"""tasks""": """masked-image-modeling""",
"""dataset""": data_args.dataset_name,
"""tags""": ["""masked-image-modeling"""],
}
if training_args.push_to_hub:
trainer.push_to_hub(**SCREAMING_SNAKE_CASE__ )
else:
trainer.create_model_card(**SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
main()
| 703
|
import os
from datetime import datetime as dt
from github import Github
A : Union[str, Any] = [
"good first issue",
"good second issue",
"good difficult issue",
"enhancement",
"new pipeline/model",
"new scheduler",
"wip",
]
def lowercase_ ( ):
"""simple docstring"""
lowerCamelCase__ : Optional[int] = Github(os.environ["GITHUB_TOKEN"] )
lowerCamelCase__ : str = g.get_repo("huggingface/diffusers" )
lowerCamelCase__ : Optional[int] = repo.get_issues(state="open" )
for issue in open_issues:
lowerCamelCase__ : str = sorted(issue.get_comments() , key=lambda _A : i.created_at , reverse=_A )
lowerCamelCase__ : str = comments[0] if len(_A ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Closes the issue after 7 days of inactivity since the Stalebot notification.
issue.edit(state="closed" )
elif (
"stale" in issue.get_labels()
and last_comment is not None
and last_comment.user.login != "github-actions[bot]"
):
# Opens the issue if someone other than Stalebot commented.
issue.edit(state="open" )
issue.remove_from_labels("stale" )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Post a Stalebot notification after 23 days of inactivity.
issue.create_comment(
"This issue has been automatically marked as stale because it has not had "
"recent activity. If you think this still needs to be addressed "
"please comment on this thread.\n\nPlease note that issues that do not follow the "
"[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) "
"are likely to be ignored." )
issue.add_to_labels("stale" )
if __name__ == "__main__":
main()
| 5
| 0
|
"""simple docstring"""
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_torch_available
from transformers.testing_utils import require_torch, torch_device
if is_torch_available():
from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments
@require_torch
class lowerCamelCase (unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self : str , __UpperCAmelCase : List[Any] ) -> Dict:
for model_result in results.values():
for batch_size, sequence_length in zip(model_result["""bs"""] , model_result["""ss"""] ):
SCREAMING_SNAKE_CASE__ = model_result["""result"""][batch_size][sequence_length]
self.assertIsNotNone(__UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : str ) -> int:
SCREAMING_SNAKE_CASE__ = """sshleifer/tiny-gpt2"""
SCREAMING_SNAKE_CASE__ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCAmelCase , inference=__UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__UpperCAmelCase , )
SCREAMING_SNAKE_CASE__ = PyTorchBenchmark(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Dict:
SCREAMING_SNAKE_CASE__ = """sgugger/tiny-distilbert-classification"""
SCREAMING_SNAKE_CASE__ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCAmelCase , inference=__UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__UpperCAmelCase , only_pretrain_model=__UpperCAmelCase , )
SCREAMING_SNAKE_CASE__ = PyTorchBenchmark(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ = """sshleifer/tiny-gpt2"""
SCREAMING_SNAKE_CASE__ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCAmelCase , inference=__UpperCAmelCase , torchscript=__UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__UpperCAmelCase , )
SCREAMING_SNAKE_CASE__ = PyTorchBenchmark(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(torch_device == """cpu""" , """Cant do half precision""" )
def SCREAMING_SNAKE_CASE ( self : Any ) -> List[str]:
SCREAMING_SNAKE_CASE__ = """sshleifer/tiny-gpt2"""
SCREAMING_SNAKE_CASE__ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCAmelCase , inference=__UpperCAmelCase , fpaa=__UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__UpperCAmelCase , )
SCREAMING_SNAKE_CASE__ = PyTorchBenchmark(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> int:
SCREAMING_SNAKE_CASE__ = """sshleifer/tiny-gpt2"""
SCREAMING_SNAKE_CASE__ = AutoConfig.from_pretrained(__UpperCAmelCase )
# set architectures equal to `None`
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCAmelCase , inference=__UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__UpperCAmelCase , )
SCREAMING_SNAKE_CASE__ = PyTorchBenchmark(__UpperCAmelCase , configs=[config] )
SCREAMING_SNAKE_CASE__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> int:
SCREAMING_SNAKE_CASE__ = """sshleifer/tiny-gpt2"""
SCREAMING_SNAKE_CASE__ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCAmelCase , inference=__UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__UpperCAmelCase , )
SCREAMING_SNAKE_CASE__ = PyTorchBenchmark(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
@unittest.skipIf(torch_device == """cpu""" , """Can't do half precision""" )
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[Any]:
SCREAMING_SNAKE_CASE__ = """sshleifer/tiny-gpt2"""
SCREAMING_SNAKE_CASE__ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCAmelCase , inference=__UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , fpaa=__UpperCAmelCase , multi_process=__UpperCAmelCase , )
SCREAMING_SNAKE_CASE__ = PyTorchBenchmark(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Any:
SCREAMING_SNAKE_CASE__ = """sshleifer/tiny-gpt2"""
SCREAMING_SNAKE_CASE__ = AutoConfig.from_pretrained(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCAmelCase , inference=__UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__UpperCAmelCase , )
SCREAMING_SNAKE_CASE__ = PyTorchBenchmark(__UpperCAmelCase , configs=[config] )
SCREAMING_SNAKE_CASE__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ = """sshleifer/tinier_bart"""
SCREAMING_SNAKE_CASE__ = AutoConfig.from_pretrained(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCAmelCase , inference=__UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__UpperCAmelCase , )
SCREAMING_SNAKE_CASE__ = PyTorchBenchmark(__UpperCAmelCase , configs=[config] )
SCREAMING_SNAKE_CASE__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Any:
SCREAMING_SNAKE_CASE__ = """sshleifer/tiny-gpt2"""
SCREAMING_SNAKE_CASE__ = AutoConfig.from_pretrained(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCAmelCase , inference=__UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__UpperCAmelCase , )
SCREAMING_SNAKE_CASE__ = PyTorchBenchmark(__UpperCAmelCase , configs=[config] )
SCREAMING_SNAKE_CASE__ = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Any:
SCREAMING_SNAKE_CASE__ = """sshleifer/tinier_bart"""
SCREAMING_SNAKE_CASE__ = AutoConfig.from_pretrained(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCAmelCase , inference=__UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__UpperCAmelCase , )
SCREAMING_SNAKE_CASE__ = PyTorchBenchmark(__UpperCAmelCase , configs=[config] )
SCREAMING_SNAKE_CASE__ = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Any:
SCREAMING_SNAKE_CASE__ = """sshleifer/tiny-gpt2"""
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE__ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCAmelCase , inference=__UpperCAmelCase , save_to_csv=__UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(__UpperCAmelCase , """inf_time.csv""" ) , train_memory_csv_file=os.path.join(__UpperCAmelCase , """train_mem.csv""" ) , inference_memory_csv_file=os.path.join(__UpperCAmelCase , """inf_mem.csv""" ) , train_time_csv_file=os.path.join(__UpperCAmelCase , """train_time.csv""" ) , env_info_csv_file=os.path.join(__UpperCAmelCase , """env.csv""" ) , multi_process=__UpperCAmelCase , )
SCREAMING_SNAKE_CASE__ = PyTorchBenchmark(__UpperCAmelCase )
benchmark.run()
self.assertTrue(Path(os.path.join(__UpperCAmelCase , """inf_time.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(__UpperCAmelCase , """train_time.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(__UpperCAmelCase , """inf_mem.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(__UpperCAmelCase , """train_mem.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(__UpperCAmelCase , """env.csv""" ) ).exists() )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[str]:
SCREAMING_SNAKE_CASE__ = """sshleifer/tiny-gpt2"""
def _check_summary_is_not_empty(__UpperCAmelCase : Union[str, Any] ):
self.assertTrue(hasattr(__UpperCAmelCase , """sequential""" ) )
self.assertTrue(hasattr(__UpperCAmelCase , """cumulative""" ) )
self.assertTrue(hasattr(__UpperCAmelCase , """current""" ) )
self.assertTrue(hasattr(__UpperCAmelCase , """total""" ) )
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE__ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCAmelCase , inference=__UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(__UpperCAmelCase , """log.txt""" ) , log_print=__UpperCAmelCase , trace_memory_line_by_line=__UpperCAmelCase , multi_process=__UpperCAmelCase , )
SCREAMING_SNAKE_CASE__ = PyTorchBenchmark(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
_check_summary_is_not_empty(result.train_summary )
self.assertTrue(Path(os.path.join(__UpperCAmelCase , """log.txt""" ) ).exists() )
| 196
|
"""simple docstring"""
import pickle
import unittest
import torch
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils import require_cpu
@require_cpu
class lowerCamelCase (unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self : Dict ) -> str:
SCREAMING_SNAKE_CASE__ = torch.nn.Linear(1_0 , 1_0 )
SCREAMING_SNAKE_CASE__ = torch.optim.SGD(model.parameters() , 0.1 )
SCREAMING_SNAKE_CASE__ = Accelerator()
SCREAMING_SNAKE_CASE__ = accelerator.prepare(__UpperCAmelCase )
try:
pickle.loads(pickle.dumps(__UpperCAmelCase ) )
except Exception as e:
self.fail(F"""Accelerated optimizer pickling failed with {e}""" )
AcceleratorState._reset_state()
| 196
| 1
|
import unittest
import numpy as np
from transformers import MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING, TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
from transformers.pipelines import AudioClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_torchaudio,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class UpperCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__ = MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
UpperCAmelCase__ = TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
def SCREAMING_SNAKE_CASE ( self : str , UpperCAmelCase__ : int , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : int) ->List[Any]:
'''simple docstring'''
A__ = AudioClassificationPipeline(model=UpperCAmelCase__ , feature_extractor=UpperCAmelCase__)
# test with a raw waveform
A__ = np.zeros((34_000,))
A__ = np.zeros((14_000,))
return audio_classifier, [audioa, audio]
def SCREAMING_SNAKE_CASE ( self : List[Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Optional[int]) ->str:
'''simple docstring'''
A__ , A__ = examples
A__ = audio_classifier(UpperCAmelCase__)
# by default a model is initialized with num_labels=2
self.assertEqual(
UpperCAmelCase__ , [
{'''score''': ANY(UpperCAmelCase__), '''label''': ANY(UpperCAmelCase__)},
{'''score''': ANY(UpperCAmelCase__), '''label''': ANY(UpperCAmelCase__)},
] , )
A__ = audio_classifier(UpperCAmelCase__ , top_k=1)
self.assertEqual(
UpperCAmelCase__ , [
{'''score''': ANY(UpperCAmelCase__), '''label''': ANY(UpperCAmelCase__)},
] , )
self.run_torchaudio(UpperCAmelCase__)
@require_torchaudio
def SCREAMING_SNAKE_CASE ( self : List[str] , UpperCAmelCase__ : List[str]) ->List[Any]:
'''simple docstring'''
import datasets
# test with a local file
A__ = datasets.load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''')
A__ = dataset[0]['''audio''']['''array''']
A__ = audio_classifier(UpperCAmelCase__)
self.assertEqual(
UpperCAmelCase__ , [
{'''score''': ANY(UpperCAmelCase__), '''label''': ANY(UpperCAmelCase__)},
{'''score''': ANY(UpperCAmelCase__), '''label''': ANY(UpperCAmelCase__)},
] , )
@require_torch
def SCREAMING_SNAKE_CASE ( self : str) ->int:
'''simple docstring'''
A__ = '''anton-l/wav2vec2-random-tiny-classifier'''
A__ = pipeline('''audio-classification''' , model=UpperCAmelCase__)
A__ = np.ones((8_000,))
A__ = audio_classifier(UpperCAmelCase__ , top_k=4)
A__ = [
{'''score''': 0.0842, '''label''': '''no'''},
{'''score''': 0.0838, '''label''': '''up'''},
{'''score''': 0.0837, '''label''': '''go'''},
{'''score''': 0.0834, '''label''': '''right'''},
]
A__ = [
{'''score''': 0.0845, '''label''': '''stop'''},
{'''score''': 0.0844, '''label''': '''on'''},
{'''score''': 0.0841, '''label''': '''right'''},
{'''score''': 0.0834, '''label''': '''left'''},
]
self.assertIn(nested_simplify(UpperCAmelCase__ , decimals=4) , [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2])
A__ = {'''array''': np.ones((8_000,)), '''sampling_rate''': audio_classifier.feature_extractor.sampling_rate}
A__ = audio_classifier(UpperCAmelCase__ , top_k=4)
self.assertIn(nested_simplify(UpperCAmelCase__ , decimals=4) , [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2])
@require_torch
@slow
def SCREAMING_SNAKE_CASE ( self : List[str]) ->Optional[int]:
'''simple docstring'''
import datasets
A__ = '''superb/wav2vec2-base-superb-ks'''
A__ = pipeline('''audio-classification''' , model=UpperCAmelCase__)
A__ = datasets.load_dataset('''anton-l/superb_dummy''' , '''ks''' , split='''test''')
A__ = np.array(dataset[3]['''speech'''] , dtype=np.floataa)
A__ = audio_classifier(UpperCAmelCase__ , top_k=4)
self.assertEqual(
nested_simplify(UpperCAmelCase__ , decimals=3) , [
{'''score''': 0.981, '''label''': '''go'''},
{'''score''': 0.007, '''label''': '''up'''},
{'''score''': 0.006, '''label''': '''_unknown_'''},
{'''score''': 0.001, '''label''': '''down'''},
] , )
@require_tf
@unittest.skip('''Audio classification is not implemented for TF''')
def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->List[Any]:
'''simple docstring'''
pass
| 700
|
from argparse import ArgumentParser
from . import BaseTransformersCLICommand
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> str:
"""simple docstring"""
return DownloadCommand(args.model , args.cache_dir , args.force , args.trust_remote_code )
class UpperCamelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
@staticmethod
def SCREAMING_SNAKE_CASE ( UpperCAmelCase__ : ArgumentParser) ->Any:
'''simple docstring'''
A__ = parser.add_parser('''download''')
download_parser.add_argument(
'''--cache-dir''' , type=UpperCAmelCase__ , default=UpperCAmelCase__ , help='''Path to location to store the models''')
download_parser.add_argument(
'''--force''' , action='''store_true''' , help='''Force the model to be download even if already in cache-dir''')
download_parser.add_argument(
'''--trust-remote-code''' , action='''store_true''' , help='''Whether or not to allow for custom models defined on the Hub in their own modeling files. Use only if you\'ve reviewed the code as it will execute on your local machine''' , )
download_parser.add_argument('''model''' , type=UpperCAmelCase__ , help='''Name of the model to download''')
download_parser.set_defaults(func=UpperCAmelCase__)
def __init__( self : Dict , UpperCAmelCase__ : str , UpperCAmelCase__ : str , UpperCAmelCase__ : bool , UpperCAmelCase__ : bool) ->int:
'''simple docstring'''
A__ = model
A__ = cache
A__ = force
A__ = trust_remote_code
def SCREAMING_SNAKE_CASE ( self : str) ->Union[str, Any]:
'''simple docstring'''
from ..models.auto import AutoModel, AutoTokenizer
AutoModel.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code)
AutoTokenizer.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code)
| 177
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
UpperCamelCase__ : Union[str, Any] = {
'''configuration_xlm''': ['''XLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XLMConfig''', '''XLMOnnxConfig'''],
'''tokenization_xlm''': ['''XLMTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : Tuple = [
'''XLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XLMForMultipleChoice''',
'''XLMForQuestionAnswering''',
'''XLMForQuestionAnsweringSimple''',
'''XLMForSequenceClassification''',
'''XLMForTokenClassification''',
'''XLMModel''',
'''XLMPreTrainedModel''',
'''XLMWithLMHeadModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : Any = [
'''TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXLMForMultipleChoice''',
'''TFXLMForQuestionAnsweringSimple''',
'''TFXLMForSequenceClassification''',
'''TFXLMForTokenClassification''',
'''TFXLMMainLayer''',
'''TFXLMModel''',
'''TFXLMPreTrainedModel''',
'''TFXLMWithLMHeadModel''',
]
if TYPE_CHECKING:
from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig
from .tokenization_xlm import XLMTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm import (
XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMPreTrainedModel,
XLMWithLMHeadModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm import (
TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMForMultipleChoice,
TFXLMForQuestionAnsweringSimple,
TFXLMForSequenceClassification,
TFXLMForTokenClassification,
TFXLMMainLayer,
TFXLMModel,
TFXLMPreTrainedModel,
TFXLMWithLMHeadModel,
)
else:
import sys
UpperCamelCase__ : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 105
|
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextModel,
CLIPTokenizer,
WhisperForConditionalGeneration,
WhisperProcessor,
)
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.utils import logging
__a : Tuple = logging.get_logger(__name__) # pylint: disable=invalid-name
class _UpperCamelCase ( _UpperCAmelCase ):
"""simple docstring"""
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , ) -> Optional[int]:
'''simple docstring'''
super().__init__()
if safety_checker is None:
logger.warning(
F"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
''' that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered'''
''' results in services or applications open to the public. Both the diffusers team and Hugging Face'''
''' strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling'''
''' it only for use-cases that involve analyzing network behavior or auditing its results. For more'''
''' information, please have a look at https://github.com/huggingface/diffusers/pull/254 .''' )
self.register_modules(
speech_model=lowerCAmelCase__ , speech_processor=lowerCAmelCase__ , vae=lowerCAmelCase__ , text_encoder=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ , unet=lowerCAmelCase__ , scheduler=lowerCAmelCase__ , feature_extractor=lowerCAmelCase__ , )
def _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__ = "auto" ) -> Dict:
'''simple docstring'''
if slice_size == "auto":
__lowercase = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(lowerCAmelCase__ )
def _SCREAMING_SNAKE_CASE ( self ) -> int:
'''simple docstring'''
self.enable_attention_slicing(lowerCAmelCase__ )
@torch.no_grad()
def __call__( self , lowerCAmelCase__ , lowerCAmelCase__=1_60_00 , lowerCAmelCase__ = 5_12 , lowerCAmelCase__ = 5_12 , lowerCAmelCase__ = 50 , lowerCAmelCase__ = 7.5 , lowerCAmelCase__ = None , lowerCAmelCase__ = 1 , lowerCAmelCase__ = 0.0 , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = "pil" , lowerCAmelCase__ = True , lowerCAmelCase__ = None , lowerCAmelCase__ = 1 , **lowerCAmelCase__ , ) -> List[str]:
'''simple docstring'''
__lowercase = self.speech_processor.feature_extractor(
lowerCAmelCase__ , return_tensors='''pt''' , sampling_rate=lowerCAmelCase__ ).input_features.to(self.device )
__lowercase = self.speech_model.generate(lowerCAmelCase__ , max_length=48_00_00 )
__lowercase = self.speech_processor.tokenizer.batch_decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ , normalize=lowerCAmelCase__ )[
0
]
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
__lowercase = 1
elif isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
__lowercase = len(lowerCAmelCase__ )
else:
raise ValueError(F"`prompt` has to be of type `str` or `list` but is {type(lowerCAmelCase__ )}" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F"`height` and `width` have to be divisible by 8 but are {height} and {width}." )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) or callback_steps <= 0)
):
raise ValueError(
F"`callback_steps` has to be a positive integer but is {callback_steps} of type"
F" {type(lowerCAmelCase__ )}." )
# get prompt text embeddings
__lowercase = self.tokenizer(
lowerCAmelCase__ , padding='''max_length''' , max_length=self.tokenizer.model_max_length , return_tensors='''pt''' , )
__lowercase = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
__lowercase = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
'''The following part of your input was truncated because CLIP can only handle sequences up to'''
F" {self.tokenizer.model_max_length} tokens: {removed_text}" )
__lowercase = text_input_ids[:, : self.tokenizer.model_max_length]
__lowercase = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
__lowercase , __lowercase , __lowercase = text_embeddings.shape
__lowercase = text_embeddings.repeat(1 , lowerCAmelCase__ , 1 )
__lowercase = text_embeddings.view(bs_embed * num_images_per_prompt , lowerCAmelCase__ , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
__lowercase = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
__lowercase = 42
if negative_prompt is None:
__lowercase = [''''''] * batch_size
elif type(lowerCAmelCase__ ) is not type(lowerCAmelCase__ ):
raise TypeError(
F"`negative_prompt` should be the same type to `prompt`, but got {type(lowerCAmelCase__ )} !="
F" {type(lowerCAmelCase__ )}." )
elif isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
__lowercase = [negative_prompt]
elif batch_size != len(lowerCAmelCase__ ):
raise ValueError(
F"`negative_prompt`: {negative_prompt} has batch size {len(lowerCAmelCase__ )}, but `prompt`:"
F" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
''' the batch size of `prompt`.''' )
else:
__lowercase = negative_prompt
__lowercase = text_input_ids.shape[-1]
__lowercase = self.tokenizer(
lowerCAmelCase__ , padding='''max_length''' , max_length=lowerCAmelCase__ , truncation=lowerCAmelCase__ , return_tensors='''pt''' , )
__lowercase = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
__lowercase = uncond_embeddings.shape[1]
__lowercase = uncond_embeddings.repeat(1 , lowerCAmelCase__ , 1 )
__lowercase = uncond_embeddings.view(batch_size * num_images_per_prompt , lowerCAmelCase__ , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
__lowercase = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
__lowercase = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
__lowercase = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
__lowercase = torch.randn(lowerCAmelCase__ , generator=lowerCAmelCase__ , device='''cpu''' , dtype=lowerCAmelCase__ ).to(
self.device )
else:
__lowercase = torch.randn(lowerCAmelCase__ , generator=lowerCAmelCase__ , device=self.device , dtype=lowerCAmelCase__ )
else:
if latents.shape != latents_shape:
raise ValueError(F"Unexpected latents shape, got {latents.shape}, expected {latents_shape}" )
__lowercase = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(lowerCAmelCase__ )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
__lowercase = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
__lowercase = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
__lowercase = '''eta''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
__lowercase = {}
if accepts_eta:
__lowercase = eta
for i, t in enumerate(self.progress_bar(lowerCAmelCase__ ) ):
# expand the latents if we are doing classifier free guidance
__lowercase = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
__lowercase = self.scheduler.scale_model_input(lowerCAmelCase__ , lowerCAmelCase__ )
# predict the noise residual
__lowercase = self.unet(lowerCAmelCase__ , lowerCAmelCase__ , encoder_hidden_states=lowerCAmelCase__ ).sample
# perform guidance
if do_classifier_free_guidance:
__lowercase , __lowercase = noise_pred.chunk(2 )
__lowercase = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
__lowercase = self.scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
__lowercase = 1 / 0.1_8215 * latents
__lowercase = self.vae.decode(lowerCAmelCase__ ).sample
__lowercase = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
__lowercase = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
__lowercase = self.numpy_to_pil(lowerCAmelCase__ )
if not return_dict:
return image
return StableDiffusionPipelineOutput(images=lowerCAmelCase__ , nsfw_content_detected=lowerCAmelCase__ )
| 534
| 0
|
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCamelCase ={"configuration_van": ["VAN_PRETRAINED_CONFIG_ARCHIVE_MAP", "VanConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase =[
"VAN_PRETRAINED_MODEL_ARCHIVE_LIST",
"VanForImageClassification",
"VanModel",
"VanPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_van import VAN_PRETRAINED_CONFIG_ARCHIVE_MAP, VanConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_van import (
VAN_PRETRAINED_MODEL_ARCHIVE_LIST,
VanForImageClassification,
VanModel,
VanPreTrainedModel,
)
else:
import sys
lowerCamelCase =_LazyModule(__name__, globals()["__file__"], _import_structure)
| 462
|
import argparse
import json
from typing import List
from ltp import LTP
from transformers.models.bert.tokenization_bert import BertTokenizer
def SCREAMING_SNAKE_CASE_ ( UpperCamelCase__ ):
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if (
(cp >= 0x4e_00 and cp <= 0x9f_ff)
or (cp >= 0x34_00 and cp <= 0x4d_bf) #
or (cp >= 0x2_00_00 and cp <= 0x2_a6_df) #
or (cp >= 0x2_a7_00 and cp <= 0x2_b7_3f) #
or (cp >= 0x2_b7_40 and cp <= 0x2_b8_1f) #
or (cp >= 0x2_b8_20 and cp <= 0x2_ce_af) #
or (cp >= 0xf9_00 and cp <= 0xfa_ff)
or (cp >= 0x2_f8_00 and cp <= 0x2_fa_1f) #
): #
return True
return False
def SCREAMING_SNAKE_CASE_ ( UpperCamelCase__ ):
# word like '180' or '身高' or '神'
for char in word:
UpperCamelCase__ : Optional[Any] = ord(UpperCamelCase__ )
if not _is_chinese_char(UpperCamelCase__ ):
return 0
return 1
def SCREAMING_SNAKE_CASE_ ( UpperCamelCase__ ):
UpperCamelCase__ : Dict = set()
for token in tokens:
UpperCamelCase__ : Optional[int] = len(UpperCamelCase__ ) > 1 and is_chinese(UpperCamelCase__ )
if chinese_word:
word_set.add(UpperCamelCase__ )
UpperCamelCase__ : List[Any] = list(UpperCamelCase__ )
return word_list
def SCREAMING_SNAKE_CASE_ ( UpperCamelCase__ , UpperCamelCase__ ):
if not chinese_word_set:
return bert_tokens
UpperCamelCase__ : str = max([len(UpperCamelCase__ ) for w in chinese_word_set] )
UpperCamelCase__ : Optional[int] = bert_tokens
UpperCamelCase__ ,UpperCamelCase__ : Tuple = 0, len(UpperCamelCase__ )
while start < end:
UpperCamelCase__ : Any = True
if is_chinese(bert_word[start] ):
UpperCamelCase__ : Any = min(end - start , UpperCamelCase__ )
for i in range(UpperCamelCase__ , 1 , -1 ):
UpperCamelCase__ : List[Any] = ''''''.join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
UpperCamelCase__ : Optional[Any] = '''##''' + bert_word[j]
UpperCamelCase__ : Dict = start + i
UpperCamelCase__ : List[str] = False
break
if single_word:
start += 1
return bert_word
def SCREAMING_SNAKE_CASE_ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
UpperCamelCase__ : str = []
for i in range(0 , len(UpperCamelCase__ ) , 1_0_0 ):
UpperCamelCase__ : Optional[Any] = ltp_tokenizer.pipeline(lines[i : i + 1_0_0] , tasks=['''cws'''] ).cws
UpperCamelCase__ : List[str] = [get_chinese_word(UpperCamelCase__ ) for r in res]
ltp_res.extend(UpperCamelCase__ )
assert len(UpperCamelCase__ ) == len(UpperCamelCase__ )
UpperCamelCase__ : str = []
for i in range(0 , len(UpperCamelCase__ ) , 1_0_0 ):
UpperCamelCase__ : Dict = bert_tokenizer(lines[i : i + 1_0_0] , add_special_tokens=UpperCamelCase__ , truncation=UpperCamelCase__ , max_length=5_1_2 )
bert_res.extend(res['''input_ids'''] )
assert len(UpperCamelCase__ ) == len(UpperCamelCase__ )
UpperCamelCase__ : Any = []
for input_ids, chinese_word in zip(UpperCamelCase__ , UpperCamelCase__ ):
UpperCamelCase__ : Union[str, Any] = []
for id in input_ids:
UpperCamelCase__ : List[str] = bert_tokenizer._convert_id_to_token(UpperCamelCase__ )
input_tokens.append(UpperCamelCase__ )
UpperCamelCase__ : Optional[Any] = add_sub_symbol(UpperCamelCase__ , UpperCamelCase__ )
UpperCamelCase__ : Any = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(UpperCamelCase__ ):
if token[:2] == "##":
UpperCamelCase__ : Union[str, Any] = token[2:]
# save chinese tokens' pos
if len(UpperCamelCase__ ) == 1 and _is_chinese_char(ord(UpperCamelCase__ ) ):
ref_id.append(UpperCamelCase__ )
ref_ids.append(UpperCamelCase__ )
assert len(UpperCamelCase__ ) == len(UpperCamelCase__ )
return ref_ids
def SCREAMING_SNAKE_CASE_ ( UpperCamelCase__ ):
# For Chinese (Ro)Bert, the best result is from : RoBERTa-wwm-ext (https://github.com/ymcui/Chinese-BERT-wwm)
# If we want to fine-tune these model, we have to use same tokenizer : LTP (https://github.com/HIT-SCIR/ltp)
with open(args.file_name , '''r''' , encoding='''utf-8''' ) as f:
UpperCamelCase__ : List[Any] = f.readlines()
UpperCamelCase__ : Tuple = [line.strip() for line in data if len(UpperCamelCase__ ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
UpperCamelCase__ : int = LTP(args.ltp ) # faster in GPU device
UpperCamelCase__ : int = BertTokenizer.from_pretrained(args.bert )
UpperCamelCase__ : Dict = prepare_ref(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
with open(args.save_path , '''w''' , encoding='''utf-8''' ) as f:
UpperCamelCase__ : Tuple = [json.dumps(UpperCamelCase__ ) + '''\n''' for ref in ref_ids]
f.writelines(UpperCamelCase__ )
if __name__ == "__main__":
lowerCamelCase =argparse.ArgumentParser(description="prepare_chinese_ref")
parser.add_argument(
"--file_name",
required=False,
type=str,
default="./resources/chinese-demo.txt",
help="file need process, same as training data in lm",
)
parser.add_argument(
"--ltp",
required=False,
type=str,
default="./resources/ltp",
help="resources for LTP tokenizer, usually a path",
)
parser.add_argument(
"--bert",
required=False,
type=str,
default="./resources/robert",
help="resources for Bert tokenizer",
)
parser.add_argument(
"--save_path",
required=False,
type=str,
default="./resources/ref.txt",
help="path to save res",
)
lowerCamelCase =parser.parse_args()
main(args)
| 462
| 1
|
"""simple docstring"""
import inspect
import unittest
from transformers import ConvNextVaConfig
from transformers.models.auto import get_values
from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel
from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __lowercase :
def __init__( self : List[str] ,A : List[Any] ,A : List[str]=13 ,A : Any=32 ,A : List[str]=3 ,A : Optional[int]=4 ,A : Optional[int]=[10, 20, 30, 40] ,A : str=[2, 2, 3, 2] ,A : Optional[Any]=True ,A : Dict=True ,A : Tuple=37 ,A : List[str]="gelu" ,A : Optional[int]=10 ,A : List[Any]=0.0_2 ,A : Optional[int]=["stage2", "stage3", "stage4"] ,A : List[Any]=[2, 3, 4] ,A : List[Any]=None ,):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = parent
UpperCAmelCase__ : str = batch_size
UpperCAmelCase__ : Union[str, Any] = image_size
UpperCAmelCase__ : Any = num_channels
UpperCAmelCase__ : Optional[int] = num_stages
UpperCAmelCase__ : str = hidden_sizes
UpperCAmelCase__ : List[Any] = depths
UpperCAmelCase__ : str = is_training
UpperCAmelCase__ : Dict = use_labels
UpperCAmelCase__ : List[str] = intermediate_size
UpperCAmelCase__ : List[Any] = hidden_act
UpperCAmelCase__ : Optional[Any] = num_labels
UpperCAmelCase__ : Union[str, Any] = initializer_range
UpperCAmelCase__ : List[Any] = out_features
UpperCAmelCase__ : Optional[Any] = out_indices
UpperCAmelCase__ : Any = scope
def __lowercase ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase__ : Tuple = None
if self.use_labels:
UpperCAmelCase__ : List[str] = ids_tensor([self.batch_size] ,self.num_labels )
UpperCAmelCase__ : Union[str, Any] = self.get_config()
return config, pixel_values, labels
def __lowercase ( self : int ):
'''simple docstring'''
return ConvNextVaConfig(
num_channels=self.num_channels ,hidden_sizes=self.hidden_sizes ,depths=self.depths ,num_stages=self.num_stages ,hidden_act=self.hidden_act ,is_decoder=A ,initializer_range=self.initializer_range ,out_features=self.out_features ,out_indices=self.out_indices ,num_labels=self.num_labels ,)
def __lowercase ( self : str ,A : List[Any] ,A : Union[str, Any] ,A : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = ConvNextVaModel(config=A )
model.to(A )
model.eval()
UpperCAmelCase__ : Union[str, Any] = model(A )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape ,(self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) ,)
def __lowercase ( self : Union[str, Any] ,A : Union[str, Any] ,A : Optional[Any] ,A : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : Dict = ConvNextVaForImageClassification(A )
model.to(A )
model.eval()
UpperCAmelCase__ : Optional[int] = model(A ,labels=A )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def __lowercase ( self : int ,A : Optional[int] ,A : Optional[int] ,A : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : int = ConvNextVaBackbone(config=A )
model.to(A )
model.eval()
UpperCAmelCase__ : Tuple = model(A )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) ,len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) ,[self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) ,len(config.out_features ) )
self.parent.assertListEqual(model.channels ,config.hidden_sizes[1:] )
# verify backbone works with out_features=None
UpperCAmelCase__ : List[Any] = None
UpperCAmelCase__ : str = ConvNextVaBackbone(config=A )
model.to(A )
model.eval()
UpperCAmelCase__ : str = model(A )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) ,1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) ,[self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) ,1 )
self.parent.assertListEqual(model.channels ,[config.hidden_sizes[-1]] )
def __lowercase ( self : str ):
'''simple docstring'''
UpperCAmelCase__ : Dict = self.prepare_config_and_inputs()
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = config_and_inputs
UpperCAmelCase__ : List[Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
def __lowercase ( self : str ):
'''simple docstring'''
UpperCAmelCase__ : Any = self.prepare_config_and_inputs()
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : int = config_and_inputs
UpperCAmelCase__ : Dict = {"""pixel_values""": pixel_values, """labels""": labels}
return config, inputs_dict
@require_torch
class __lowercase ( __lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
snake_case_ = (
(
ConvNextVaModel,
ConvNextVaForImageClassification,
ConvNextVaBackbone,
)
if is_torch_available()
else ()
)
snake_case_ = (
{"""feature-extraction""": ConvNextVaModel, """image-classification""": ConvNextVaForImageClassification}
if is_torch_available()
else {}
)
snake_case_ = False
snake_case_ = False
snake_case_ = False
snake_case_ = False
snake_case_ = False
def __lowercase ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = ConvNextVaModelTester(self )
UpperCAmelCase__ : Any = ConfigTester(self ,config_class=A ,has_text_modality=A ,hidden_size=37 )
def __lowercase ( self : List[str] ):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __lowercase ( self : List[str] ):
'''simple docstring'''
return
@unittest.skip(reason="""ConvNextV2 does not use inputs_embeds""" )
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
pass
@unittest.skip(reason="""ConvNextV2 does not support input and output embeddings""" )
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
pass
@unittest.skip(reason="""ConvNextV2 does not use feedforward chunking""" )
def __lowercase ( self : str ):
'''simple docstring'''
pass
def __lowercase ( self : List[Any] ):
'''simple docstring'''
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
UpperCAmelCase__ , UpperCAmelCase__ : int = self.model_tester.prepare_config_and_inputs_with_labels()
UpperCAmelCase__ : int = True
if model_class.__name__ in [
*get_values(A ),
*get_values(A ),
]:
continue
UpperCAmelCase__ : Tuple = model_class(A )
model.to(A )
model.train()
UpperCAmelCase__ : List[Any] = self._prepare_for_class(A ,A ,return_labels=A )
UpperCAmelCase__ : Optional[int] = model(**A ).loss
loss.backward()
def __lowercase ( self : Tuple ):
'''simple docstring'''
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
UpperCAmelCase__ , UpperCAmelCase__ : str = self.model_tester.prepare_config_and_inputs_with_labels()
UpperCAmelCase__ : int = False
UpperCAmelCase__ : List[Any] = True
if (
model_class.__name__
in [*get_values(A ), *get_values(A )]
or not model_class.supports_gradient_checkpointing
):
continue
UpperCAmelCase__ : Dict = model_class(A )
model.to(A )
model.gradient_checkpointing_enable()
model.train()
UpperCAmelCase__ : Tuple = self._prepare_for_class(A ,A ,return_labels=A )
UpperCAmelCase__ : Optional[Any] = model(**A ).loss
loss.backward()
def __lowercase ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ : List[Any] = model_class(A )
UpperCAmelCase__ : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase__ : Optional[Any] = [*signature.parameters.keys()]
UpperCAmelCase__ : Tuple = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] ,A )
def __lowercase ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
def __lowercase ( self : Any ):
'''simple docstring'''
def check_hidden_states_output(A : Optional[Any] ,A : Union[str, Any] ,A : str ):
UpperCAmelCase__ : List[str] = model_class(A )
model.to(A )
model.eval()
with torch.no_grad():
UpperCAmelCase__ : int = model(**self._prepare_for_class(A ,A ) )
UpperCAmelCase__ : str = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
UpperCAmelCase__ : List[str] = self.model_tester.num_stages
self.assertEqual(len(A ) ,expected_num_stages + 1 )
# ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) ,[self.model_tester.image_size // 4, self.model_tester.image_size // 4] ,)
UpperCAmelCase__ , UpperCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ : Dict = True
check_hidden_states_output(A ,A ,A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase__ : Tuple = True
check_hidden_states_output(A ,A ,A )
def __lowercase ( self : int ):
'''simple docstring'''
UpperCAmelCase__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A )
@slow
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase__ : Tuple = ConvNextVaModel.from_pretrained(A )
self.assertIsNotNone(A )
def lowerCAmelCase ( ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class __lowercase ( unittest.TestCase ):
@cached_property
def __lowercase ( self : int ):
'''simple docstring'''
return AutoImageProcessor.from_pretrained("""facebook/convnextv2-tiny-1k-224""" ) if is_vision_available() else None
@slow
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : int = ConvNextVaForImageClassification.from_pretrained("""facebook/convnextv2-tiny-1k-224""" ).to(A )
UpperCAmelCase__ : Any = self.default_image_processor
UpperCAmelCase__ : str = prepare_img()
UpperCAmelCase__ : List[Any] = preprocessor(images=A ,return_tensors="""pt""" ).to(A )
# forward pass
with torch.no_grad():
UpperCAmelCase__ : Optional[int] = model(**A )
# verify the logits
UpperCAmelCase__ : List[Any] = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape ,A )
UpperCAmelCase__ : Optional[Any] = torch.tensor([0.9_9_9_6, 0.1_9_6_6, -0.4_3_8_6] ).to(A )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,A ,atol=1e-4 ) )
| 65
|
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
UniSpeechConfig,
UniSpeechForCTC,
UniSpeechForPreTraining,
WavaVecaFeatureExtractor,
WavaVecaPhonemeCTCTokenizer,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
_lowercase : Optional[int] =logging.get_logger(__name__)
_lowercase : Any ={
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "ctc_proj",
"mask_emb": "masked_spec_embed",
}
_lowercase : int =[
"ctc_proj",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
]
def lowerCAmelCase_ ( _lowercase : int , _lowercase : Union[str, Any] , _lowercase : str , _lowercase : Union[str, Any] , _lowercase : Tuple , _lowercase : List[str]) -> List[Any]:
"""simple docstring"""
for attribute in key.split("""."""):
if is_finetuned:
if attribute in ["quantizer", "project_q", "project_hid"]:
# those layers are only relevant for pretraining and should be dropped
return
if attribute == "ctc_proj":
# we should rename `ctc_proj` to `lm_head` for fine-tuned phoneme models
a__ : Optional[Any] = """lm_head"""
a__ : str = getattr(_lowercase , _lowercase)
if weight_type is not None:
a__ : Any = getattr(_lowercase , _lowercase).shape
else:
a__ : str = hf_pointer.shape
assert hf_shape == value.shape, (
F'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
a__ : Any = value
elif weight_type == "weight_g":
a__ : List[Any] = value
elif weight_type == "weight_v":
a__ : int = value
elif weight_type == "bias":
a__ : Tuple = value
else:
a__ : Dict = value
logger.info(F'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''')
def lowerCAmelCase_ ( _lowercase : Optional[Any] , _lowercase : Dict , _lowercase : Any) -> Optional[Any]:
"""simple docstring"""
a__ : Optional[int] = []
a__ : Union[str, Any] = fairseq_model.state_dict()
a__ : Dict = hf_model.unispeech.feature_extractor
for name, value in fairseq_dict.items():
a__ : List[str] = False
if "conv_layers" in name:
load_conv_layer(
_lowercase , _lowercase , _lowercase , _lowercase , hf_model.config.feat_extract_norm == """group""" , )
a__ : Any = True
else:
for key, mapped_key in MAPPING.items():
a__ : Optional[Any] = """unispeech.""" + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("""w2v_model.""")[-1] == name.split(""".""")[0]:
a__ : Dict = True
if "*" in mapped_key:
a__ : Optional[Any] = name.split(_lowercase)[0].split(""".""")[-2]
a__ : Union[str, Any] = mapped_key.replace("""*""" , _lowercase)
if "weight_g" in name:
a__ : Dict = """weight_g"""
elif "weight_v" in name:
a__ : List[Any] = """weight_v"""
elif "bias" in name:
a__ : int = """bias"""
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
a__ : int = """weight"""
else:
a__ : Any = None
set_recursively(_lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase)
continue
if not is_used:
unused_weights.append(_lowercase)
logger.warning(F'''Unused weights: {unused_weights}''')
def lowerCAmelCase_ ( _lowercase : str , _lowercase : Dict , _lowercase : Optional[int] , _lowercase : Union[str, Any] , _lowercase : int) -> Tuple:
"""simple docstring"""
a__ : Any = full_name.split("""conv_layers.""")[-1]
a__ : Tuple = name.split(""".""")
a__ : Optional[Any] = int(items[0])
a__ : Optional[int] = int(items[1])
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
a__ : Union[str, Any] = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''')
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
a__ : int = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''')
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
a__ : List[Any] = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''')
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
a__ : List[str] = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''')
else:
unused_weights.append(_lowercase)
@torch.no_grad()
def lowerCAmelCase_ ( _lowercase : List[Any] , _lowercase : str , _lowercase : Dict=None , _lowercase : Dict=None , _lowercase : Optional[Any]=True) -> List[Any]:
"""simple docstring"""
if config_path is not None:
a__ : int = UniSpeechConfig.from_pretrained(_lowercase)
else:
a__ : Optional[Any] = UniSpeechConfig()
if is_finetuned:
if dict_path:
a__ : List[Any] = Dictionary.load_from_json(_lowercase)
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
a__ : Dict = target_dict.pad_index
a__ : Optional[int] = target_dict.bos_index
a__ : Optional[int] = target_dict.eos_index
a__ : int = len(target_dict.symbols)
a__ : int = os.path.join(_lowercase , """vocab.json""")
if not os.path.isdir(_lowercase):
logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(_lowercase))
return
os.makedirs(_lowercase , exist_ok=_lowercase)
a__ : str = target_dict.indices
# fairseq has the <pad> and <s> switched
a__ : Dict = 42
a__ : Optional[Any] = 43
with open(_lowercase , """w""" , encoding="""utf-8""") as vocab_handle:
json.dump(_lowercase , _lowercase)
a__ : Dict = WavaVecaPhonemeCTCTokenizer(
_lowercase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="""|""" , do_lower_case=_lowercase , )
a__ : Dict = True if config.feat_extract_norm == """layer""" else False
a__ : Tuple = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=_lowercase , return_attention_mask=_lowercase , )
a__ : Optional[Any] = WavaVecaProcessor(feature_extractor=_lowercase , tokenizer=_lowercase)
processor.save_pretrained(_lowercase)
a__ : List[Any] = UniSpeechForCTC(_lowercase)
else:
a__ : Optional[Any] = UniSpeechForPreTraining(_lowercase)
if is_finetuned:
a__ , a__ , a__ : Union[str, Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""")[:-1]), """w2v_path""": checkpoint_path})
else:
a__ , a__ , a__ : Union[str, Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path])
a__ : Tuple = model[0].eval()
recursively_load_weights(_lowercase , _lowercase , _lowercase)
hf_unispeech.save_pretrained(_lowercase)
if __name__ == "__main__":
_lowercase : Optional[int] =argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
_lowercase : Union[str, Any] =parser.parse_args()
convert_unispeech_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 136
| 0
|
'''simple docstring'''
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __UpperCamelCase ( self ):
snake_case__ : str = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , """tf_padding""" ) )
self.parent.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , """depth_multiplier""" ) )
class __snake_case :
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=1_3 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=3_2 , __SCREAMING_SNAKE_CASE=0.25 , __SCREAMING_SNAKE_CASE=8 , __SCREAMING_SNAKE_CASE=8 , __SCREAMING_SNAKE_CASE=6 , __SCREAMING_SNAKE_CASE=3_2 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE="relu6" , __SCREAMING_SNAKE_CASE=1_2_8_0 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=1_0 , __SCREAMING_SNAKE_CASE=None , ):
snake_case__ : Union[str, Any] = parent
snake_case__ : str = batch_size
snake_case__ : List[str] = num_channels
snake_case__ : Any = image_size
snake_case__ : str = depth_multiplier
snake_case__ : Dict = depth_divisible_by
snake_case__ : Union[str, Any] = min_depth
snake_case__ : Any = expand_ratio
snake_case__ : int = tf_padding
snake_case__ : str = output_stride
snake_case__ : Optional[Any] = first_layer_is_expansion
snake_case__ : Tuple = finegrained_output
snake_case__ : Optional[int] = hidden_act
snake_case__ : Optional[Any] = last_hidden_size if finegrained_output else int(last_hidden_size * depth_multiplier )
snake_case__ : str = classifier_dropout_prob
snake_case__ : Tuple = use_labels
snake_case__ : Optional[Any] = is_training
snake_case__ : int = num_labels
snake_case__ : Dict = initializer_range
snake_case__ : Any = scope
def __UpperCamelCase ( self ):
snake_case__ : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case__ : Dict = None
snake_case__ : Tuple = None
if self.use_labels:
snake_case__ : List[Any] = ids_tensor([self.batch_size] , self.num_labels )
snake_case__ : Optional[Any] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
snake_case__ : str = self.get_config()
return config, pixel_values, labels, pixel_labels
def __UpperCamelCase ( self ):
return MobileNetVaConfig(
num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , depth_divisible_by=self.depth_divisible_by , min_depth=self.min_depth , expand_ratio=self.expand_ratio , output_stride=self.output_stride , first_layer_is_expansion=self.first_layer_is_expansion , finegrained_output=self.finegrained_output , hidden_act=self.hidden_act , tf_padding=self.tf_padding , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : Dict = MobileNetVaModel(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
snake_case__ : Dict = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
self.parent.assertEqual(
result.pooler_output.shape , (self.batch_size, self.last_hidden_size) , )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : int = self.num_labels
snake_case__ : Union[str, Any] = MobileNetVaForImageClassification(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
snake_case__ : str = model(__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : int = self.num_labels
snake_case__ : Optional[Any] = MobileNetVaForSemanticSegmentation(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
snake_case__ : str = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
snake_case__ : int = model(__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def __UpperCamelCase ( self ):
snake_case__ : List[str] = self.prepare_config_and_inputs()
snake_case__ , snake_case__ , snake_case__ , snake_case__ : Dict = config_and_inputs
snake_case__ : str = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class __snake_case ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = (
(MobileNetVaModel, MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation)
if is_torch_available()
else ()
)
lowerCamelCase__ = (
{
'''feature-extraction''': MobileNetVaModel,
'''image-classification''': MobileNetVaForImageClassification,
'''image-segmentation''': MobileNetVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
def __UpperCamelCase ( self ):
snake_case__ : Tuple = MobileNetVaModelTester(self )
snake_case__ : List[str] = MobileNetVaConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , has_text_modality=__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""MobileNetV2 does not use inputs_embeds""" )
def __UpperCamelCase ( self ):
pass
@unittest.skip(reason="""MobileNetV2 does not support input and output embeddings""" )
def __UpperCamelCase ( self ):
pass
@unittest.skip(reason="""MobileNetV2 does not output attentions""" )
def __UpperCamelCase ( self ):
pass
def __UpperCamelCase ( self ):
snake_case__ , snake_case__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ : str = model_class(__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case__ : List[Any] = [*signature.parameters.keys()]
snake_case__ : Union[str, Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
snake_case__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
def check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : str = model_class(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
snake_case__ : str = model(**self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
snake_case__ : Union[str, Any] = outputs.hidden_states
snake_case__ : List[Any] = 1_6
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
snake_case__ , snake_case__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ : List[str] = True
check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case__ : Tuple = True
check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
snake_case__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
snake_case__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__SCREAMING_SNAKE_CASE )
@slow
def __UpperCamelCase ( self ):
for model_name in MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case__ : str = MobileNetVaModel.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
def UpperCamelCase__ ( ) -> Any:
'''simple docstring'''
snake_case__ : Union[str, Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __UpperCamelCase ( self ):
return (
MobileNetVaImageProcessor.from_pretrained("""google/mobilenet_v2_1.0_224""" ) if is_vision_available() else None
)
@slow
def __UpperCamelCase ( self ):
snake_case__ : str = MobileNetVaForImageClassification.from_pretrained("""google/mobilenet_v2_1.0_224""" ).to(__SCREAMING_SNAKE_CASE )
snake_case__ : Dict = self.default_image_processor
snake_case__ : Dict = prepare_img()
snake_case__ : str = image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).to(__SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
snake_case__ : Optional[int] = model(**__SCREAMING_SNAKE_CASE )
# verify the logits
snake_case__ : Dict = torch.Size((1, 1_0_0_1) )
self.assertEqual(outputs.logits.shape , __SCREAMING_SNAKE_CASE )
snake_case__ : Dict = torch.tensor([0.2445, -1.1993, 0.1905] ).to(__SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __SCREAMING_SNAKE_CASE , atol=1e-4 ) )
@slow
def __UpperCamelCase ( self ):
snake_case__ : Union[str, Any] = MobileNetVaForSemanticSegmentation.from_pretrained("""google/deeplabv3_mobilenet_v2_1.0_513""" )
snake_case__ : Any = model.to(__SCREAMING_SNAKE_CASE )
snake_case__ : Dict = MobileNetVaImageProcessor.from_pretrained("""google/deeplabv3_mobilenet_v2_1.0_513""" )
snake_case__ : Dict = prepare_img()
snake_case__ : Tuple = image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).to(__SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
snake_case__ : Tuple = model(**__SCREAMING_SNAKE_CASE )
snake_case__ : List[str] = outputs.logits
# verify the logits
snake_case__ : Optional[int] = torch.Size((1, 2_1, 6_5, 6_5) )
self.assertEqual(logits.shape , __SCREAMING_SNAKE_CASE )
snake_case__ : Any = torch.tensor(
[
[[17.5790, 17.7581, 18.3355], [18.3257, 18.4230, 18.8973], [18.6169, 18.8650, 19.2187]],
[[-2.1595, -2.0977, -2.3741], [-2.4226, -2.3028, -2.6835], [-2.7819, -2.5991, -2.7706]],
[[4.2058, 4.8317, 4.7638], [4.4136, 5.0361, 4.9383], [4.5028, 4.9644, 4.8734]],
] , device=__SCREAMING_SNAKE_CASE , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , __SCREAMING_SNAKE_CASE , atol=1e-4 ) )
| 419
|
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mobilebert import MobileBertTokenizer
A_ : Any = logging.get_logger(__name__)
A_ : Any = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
A_ : int = {
"vocab_file": {"mobilebert-uncased": "https://huggingface.co/google/mobilebert-uncased/resolve/main/vocab.txt"},
"tokenizer_file": {
"mobilebert-uncased": "https://huggingface.co/google/mobilebert-uncased/resolve/main/tokenizer.json"
},
}
A_ : Optional[Any] = {"mobilebert-uncased": 512}
A_ : Optional[Any] = {}
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCamelCase__ = VOCAB_FILES_NAMES
lowerCamelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ = PRETRAINED_INIT_CONFIGURATION
lowerCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ = MobileBertTokenizer
def __init__( self , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE="[UNK]" , __SCREAMING_SNAKE_CASE="[SEP]" , __SCREAMING_SNAKE_CASE="[PAD]" , __SCREAMING_SNAKE_CASE="[CLS]" , __SCREAMING_SNAKE_CASE="[MASK]" , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE , ):
super().__init__(
__SCREAMING_SNAKE_CASE , tokenizer_file=__SCREAMING_SNAKE_CASE , do_lower_case=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , sep_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , cls_token=__SCREAMING_SNAKE_CASE , mask_token=__SCREAMING_SNAKE_CASE , tokenize_chinese_chars=__SCREAMING_SNAKE_CASE , strip_accents=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
snake_case__ : List[str] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , __SCREAMING_SNAKE_CASE ) != do_lower_case
or normalizer_state.get("""strip_accents""" , __SCREAMING_SNAKE_CASE ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , __SCREAMING_SNAKE_CASE ) != tokenize_chinese_chars
):
snake_case__ : Optional[Any] = getattr(__SCREAMING_SNAKE_CASE , normalizer_state.pop("""type""" ) )
snake_case__ : List[str] = do_lower_case
snake_case__ : List[Any] = strip_accents
snake_case__ : Any = tokenize_chinese_chars
snake_case__ : List[Any] = normalizer_class(**__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[Any] = do_lower_case
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None ):
snake_case__ : Optional[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ):
snake_case__ : int = [self.sep_token_id]
snake_case__ : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ):
snake_case__ : Optional[int] = self._tokenizer.model.save(__SCREAMING_SNAKE_CASE , name=__SCREAMING_SNAKE_CASE )
return tuple(__SCREAMING_SNAKE_CASE )
| 419
| 1
|
import argparse
import fairseq
import torch
from torch import nn
from transformers import (
MBartaaTokenizer,
MBartConfig,
MBartForCausalLM,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
}
_lowerCAmelCase = [
"lm_head",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
]
def _snake_case ( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case ):
for attribute in key.split('''.''' ):
_UpperCamelCase = getattr(__snake_case , __snake_case )
if weight_type is not None:
_UpperCamelCase = getattr(__snake_case , __snake_case ).shape
else:
_UpperCamelCase = hf_pointer.shape
assert hf_shape == value.shape, (
f"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
_UpperCamelCase = value
elif weight_type == "weight_g":
_UpperCamelCase = value
elif weight_type == "weight_v":
_UpperCamelCase = value
elif weight_type == "bias":
_UpperCamelCase = value
else:
_UpperCamelCase = value
logger.info(f"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def _snake_case ( __snake_case , __snake_case ):
_UpperCamelCase = []
_UpperCamelCase = fairseq_model.state_dict()
_UpperCamelCase = hf_model.feature_extractor
_UpperCamelCase = hf_model.adapter
for name, value in fairseq_dict.items():
_UpperCamelCase = False
if "conv_layers" in name:
load_conv_layer(
__snake_case , __snake_case , __snake_case , __snake_case , hf_model.config.feat_extract_norm == '''group''' , )
_UpperCamelCase = True
elif any(x in name for x in ['''adaptor''', '''w2v_encoder.proj.''', '''w2v_proj_ln.'''] ):
load_adapter(__snake_case , __snake_case , __snake_case , __snake_case )
_UpperCamelCase = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
_UpperCamelCase = True
if "*" in mapped_key:
_UpperCamelCase = name.split(__snake_case )[0].split('''.''' )[-2]
_UpperCamelCase = mapped_key.replace('''*''' , __snake_case )
if "weight_g" in name:
_UpperCamelCase = '''weight_g'''
elif "weight_v" in name:
_UpperCamelCase = '''weight_v'''
elif "bias" in name:
_UpperCamelCase = '''bias'''
elif "weight" in name:
_UpperCamelCase = '''weight'''
else:
_UpperCamelCase = None
set_recursively(__snake_case , __snake_case , __snake_case , __snake_case , __snake_case )
continue
if not is_used:
unused_weights.append(__snake_case )
logger.warning(f"""Unused weights: {unused_weights}""" )
def _snake_case ( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case ):
_UpperCamelCase = full_name.split('''conv_layers.''' )[-1]
_UpperCamelCase = name.split('''.''' )
_UpperCamelCase = int(items[0] )
_UpperCamelCase = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
_UpperCamelCase = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
_UpperCamelCase = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
_UpperCamelCase = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
_UpperCamelCase = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(__snake_case )
def _snake_case ( __snake_case , __snake_case , __snake_case , __snake_case ):
_UpperCamelCase = full_name.split('''adaptor.''' )[-1]
_UpperCamelCase = name.split('''.''' )
if items[1].isdigit():
_UpperCamelCase = int(items[1] )
else:
_UpperCamelCase = None
if "adaptor" not in full_name:
if "proj_ln" in full_name:
# has to be layer norm
if "bias" in name:
assert (
value.shape == adapter.proj_layer_norm.bias.data.shape
), f"""{full_name} has size {value.shape}, but {adapter.proj_layer_norm.bias.data.shape} was found."""
_UpperCamelCase = value
logger.info(f"""Adapter proj layer norm bias was initialized from {full_name}.""" )
if "weight" in name:
assert (
value.shape == adapter.proj_layer_norm.weight.data.shape
), f"""{full_name} has size {value.shape}, but {adapter.proj_layer_norm.weight.data.shape} was found."""
_UpperCamelCase = value
else:
# has to be projection layer
if "bias" in name:
assert (
value.shape == adapter.proj.bias.data.shape
), f"""{full_name} has size {value.shape}, but {adapter.proj.bias.data.shape} was found."""
_UpperCamelCase = value
logger.info(f"""Adapter proj layer bias was initialized from {full_name}.""" )
if "weight" in name:
assert (
value.shape == adapter.proj.weight.data.shape
), f"""{full_name} has size {value.shape}, but {adapter.proj.weight.data.shape} was found."""
_UpperCamelCase = value
logger.info(f"""Adapter proj layer weight was initialized from {full_name}.""" )
elif isinstance(__snake_case , __snake_case ):
if "bias" in name:
assert (
value.shape == adapter.layers[layer_id].conv.bias.data.shape
), f"""{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.bias.data.shape} was found."""
_UpperCamelCase = value
logger.info(f"""Adapter layer {layer_id} bias was initialized from {full_name}.""" )
elif "weight" in name:
assert (
value.shape == adapter.layers[layer_id].conv.weight.data.shape
), f"""{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.weight.data.shape} was found."""
_UpperCamelCase = value
logger.info(f"""Adapter layer {layer_id} bias was initialized from {full_name}.""" )
else:
unused_weights.append(__snake_case )
def _snake_case ( __snake_case ):
_UpperCamelCase , _UpperCamelCase = emb.weight.shape
_UpperCamelCase = nn.Linear(__snake_case , __snake_case , bias=__snake_case )
_UpperCamelCase = emb.weight.data
return lin_layer
@torch.no_grad()
def _snake_case ( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , ):
_UpperCamelCase = WavaVecaConfig.from_pretrained(
__snake_case , add_adapter=__snake_case , adapter_stride=__snake_case , adapter_kernel_size=__snake_case , use_auth_token=__snake_case , output_hidden_size=__snake_case , )
_UpperCamelCase = MBartConfig.from_pretrained(__snake_case )
# load model
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={
'''config_yaml''': config_yaml_path,
'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] ),
'''w2v_path''': checkpoint_path,
'''load_pretrained_decoder_from''': None,
} , )
_UpperCamelCase = model[0].eval()
# load feature extractor
_UpperCamelCase = WavaVecaFeatureExtractor.from_pretrained(__snake_case , use_auth_token=__snake_case )
# set weights for wav2vec2 encoder
_UpperCamelCase = WavaVecaModel(__snake_case )
recursively_load_weights_wavaveca(model.encoder , __snake_case )
# load decoder weights
_UpperCamelCase = MBartForCausalLM(__snake_case )
_UpperCamelCase , _UpperCamelCase = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=__snake_case )
logger.warning(f"""The following keys are missing when loading the decoder weights: {missing_keys}""" )
logger.warning(f"""The following keys are unexpected when loading the decoder weights: {unexpected_keys}""" )
_UpperCamelCase = SpeechEncoderDecoderModel(encoder=__snake_case , decoder=__snake_case )
_UpperCamelCase = False
_UpperCamelCase = MBartaaTokenizer(__snake_case )
tokenizer.save_pretrained(__snake_case )
_UpperCamelCase = hf_wavavec.config.to_dict()
_UpperCamelCase = tokenizer.pad_token_id
_UpperCamelCase = tokenizer.bos_token_id
_UpperCamelCase = tokenizer.eos_token_id
_UpperCamelCase = '''mbart50'''
_UpperCamelCase = '''wav2vec2'''
_UpperCamelCase = tokenizer.eos_token_id
_UpperCamelCase = 250004
_UpperCamelCase = tokenizer.eos_token_id
_UpperCamelCase = SpeechEncoderDecoderConfig.from_dict(__snake_case )
hf_wavavec.save_pretrained(__snake_case )
feature_extractor.save_pretrained(__snake_case )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_yaml_path", default=None, type=str, help="Path to yaml file of fine-tuned model")
parser.add_argument(
"--encoder_config_path",
default="facebook/wav2vec2-xls-r-1b",
type=str,
help="Path to hf encoder wav2vec2 checkpoint config",
)
parser.add_argument(
"--decoder_config_path",
default="facebook/mbart-large-50-one-to-many-mmt",
type=str,
help="Path to hf decoder checkpoint config",
)
parser.add_argument("--add_adapter", default=True, type=bool, help="whethere to add model adapter layers")
parser.add_argument("--adapter_stride", default=2, type=int, help="stride of adapter layers")
parser.add_argument("--adapter_kernel_size", default=3, type=int, help="kernel size of adapter layers")
parser.add_argument("--encoder_output_dim", default=1_024, type=int, help="encoder output dim")
parser.add_argument("--start_token_id", default=250_004, type=int, help="`decoder_start_token_id` of model config")
_lowerCAmelCase = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
args.config_yaml_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
add_adapter=args.add_adapter,
adapter_kernel_size=args.adapter_kernel_size,
adapter_stride=args.adapter_stride,
decoder_start_token_id=args.start_token_id,
encoder_output_dim=args.encoder_output_dim,
)
| 10
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
"microsoft/trocr-base-handwritten": (
"https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json"
),
# See all TrOCR models at https://huggingface.co/models?filter=trocr
}
class lowerCAmelCase_ ( __lowercase ):
UpperCAmelCase = "trocr"
UpperCAmelCase = ["past_key_values"]
UpperCAmelCase = {
"num_attention_heads": "decoder_attention_heads",
"hidden_size": "d_model",
"num_hidden_layers": "decoder_layers",
}
def __init__( self : List[str] , _A : Optional[Any]=5_0265 , _A : Optional[Any]=1024 , _A : Optional[Any]=12 , _A : Any=16 , _A : Any=4096 , _A : Optional[Any]="gelu" , _A : Union[str, Any]=512 , _A : Dict=0.1 , _A : List[str]=0.0 , _A : Optional[Any]=0.0 , _A : Union[str, Any]=2 , _A : Any=0.02 , _A : List[str]=0.0 , _A : List[str]=True , _A : str=False , _A : List[str]=True , _A : Optional[Any]=True , _A : Optional[int]=1 , _A : int=0 , _A : Any=2 , **_A : Optional[int] , ):
_UpperCamelCase = vocab_size
_UpperCamelCase = d_model
_UpperCamelCase = decoder_layers
_UpperCamelCase = decoder_attention_heads
_UpperCamelCase = decoder_ffn_dim
_UpperCamelCase = activation_function
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = dropout
_UpperCamelCase = attention_dropout
_UpperCamelCase = activation_dropout
_UpperCamelCase = init_std
_UpperCamelCase = decoder_layerdrop
_UpperCamelCase = use_cache
_UpperCamelCase = scale_embedding
_UpperCamelCase = use_learned_position_embeddings
_UpperCamelCase = layernorm_embedding
super().__init__(
pad_token_id=_A , bos_token_id=_A , eos_token_id=_A , decoder_start_token_id=_A , **_A , )
| 10
| 1
|
import pytest
from datasets.utils.sharding import _distribute_shards, _number_of_shards_in_gen_kwargs, _split_gen_kwargs
@pytest.mark.parametrize(
'''kwargs, expected''' , [
({'''num_shards''': 0, '''max_num_jobs''': 1}, []),
({'''num_shards''': 10, '''max_num_jobs''': 1}, [range(10 )]),
({'''num_shards''': 10, '''max_num_jobs''': 10}, [range(_snake_case , i + 1 ) for i in range(10 )]),
({'''num_shards''': 1, '''max_num_jobs''': 10}, [range(1 )]),
({'''num_shards''': 10, '''max_num_jobs''': 3}, [range(0 , 4 ), range(4 , 7 ), range(7 , 10 )]),
({'''num_shards''': 3, '''max_num_jobs''': 10}, [range(0 , 1 ), range(1 , 2 ), range(2 , 3 )]),
] , )
def UpperCAmelCase ( _snake_case , _snake_case ):
lowerCAmelCase = _distribute_shards(**_snake_case )
assert out == expected
@pytest.mark.parametrize(
'''gen_kwargs, max_num_jobs, expected''' , [
({'''foo''': 0}, 10, [{'''foo''': 0}]),
({'''shards''': [0, 1, 2, 3]}, 1, [{'''shards''': [0, 1, 2, 3]}]),
({'''shards''': [0, 1, 2, 3]}, 4, [{'''shards''': [0]}, {'''shards''': [1]}, {'''shards''': [2]}, {'''shards''': [3]}]),
({'''shards''': [0, 1]}, 4, [{'''shards''': [0]}, {'''shards''': [1]}]),
({'''shards''': [0, 1, 2, 3]}, 2, [{'''shards''': [0, 1]}, {'''shards''': [2, 3]}]),
] , )
def UpperCAmelCase ( _snake_case , _snake_case , _snake_case ):
lowerCAmelCase = _split_gen_kwargs(_snake_case , _snake_case )
assert out == expected
@pytest.mark.parametrize(
'''gen_kwargs, expected''' , [
({'''foo''': 0}, 1),
({'''shards''': [0]}, 1),
({'''shards''': [0, 1, 2, 3]}, 4),
({'''shards''': [0, 1, 2, 3], '''foo''': 0}, 4),
({'''shards''': [0, 1, 2, 3], '''other''': (0, 1)}, 4),
({'''shards''': [0, 1, 2, 3], '''shards2''': [0, 1]}, RuntimeError),
] , )
def UpperCAmelCase ( _snake_case , _snake_case ):
if expected is RuntimeError:
with pytest.raises(_snake_case ):
_number_of_shards_in_gen_kwargs(_snake_case )
else:
lowerCAmelCase = _number_of_shards_in_gen_kwargs(_snake_case )
assert out == expected
| 33
|
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from typing import Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import randn_tensor
from .scheduling_utils import SchedulerMixin
class __UpperCamelCase ( __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__a : Any =1
@register_to_config
def __init__( self , UpperCAmelCase_=20_00 , UpperCAmelCase_=0.1 , UpperCAmelCase_=20 , UpperCAmelCase_=1E-3 ):
lowerCAmelCase = None
lowerCAmelCase = None
lowerCAmelCase = None
def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ = None ):
lowerCAmelCase = torch.linspace(1 , self.config.sampling_eps , UpperCAmelCase_ , device=UpperCAmelCase_ )
def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_=None ):
if self.timesteps is None:
raise ValueError(
'''`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler''' )
# TODO(Patrick) better comments + non-PyTorch
# postprocess model score
lowerCAmelCase = (
-0.25 * t**2 * (self.config.beta_max - self.config.beta_min) - 0.5 * t * self.config.beta_min
)
lowerCAmelCase = torch.sqrt(1.0 - torch.exp(2.0 * log_mean_coeff ) )
lowerCAmelCase = std.flatten()
while len(std.shape ) < len(score.shape ):
lowerCAmelCase = std.unsqueeze(-1 )
lowerCAmelCase = -score / std
# compute
lowerCAmelCase = -1.0 / len(self.timesteps )
lowerCAmelCase = self.config.beta_min + t * (self.config.beta_max - self.config.beta_min)
lowerCAmelCase = beta_t.flatten()
while len(beta_t.shape ) < len(x.shape ):
lowerCAmelCase = beta_t.unsqueeze(-1 )
lowerCAmelCase = -0.5 * beta_t * x
lowerCAmelCase = torch.sqrt(UpperCAmelCase_ )
lowerCAmelCase = drift - diffusion**2 * score
lowerCAmelCase = x + drift * dt
# add noise
lowerCAmelCase = randn_tensor(x.shape , layout=x.layout , generator=UpperCAmelCase_ , device=x.device , dtype=x.dtype )
lowerCAmelCase = x_mean + diffusion * math.sqrt(-dt ) * noise
return x, x_mean
def __len__( self ):
return self.config.num_train_timesteps
| 33
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase : int = {"""configuration_xlnet""": ["""XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """XLNetConfig"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : List[Any] = ["""XLNetTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Tuple = ["""XLNetTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Any = [
"""XLNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XLNetForMultipleChoice""",
"""XLNetForQuestionAnswering""",
"""XLNetForQuestionAnsweringSimple""",
"""XLNetForSequenceClassification""",
"""XLNetForTokenClassification""",
"""XLNetLMHeadModel""",
"""XLNetModel""",
"""XLNetPreTrainedModel""",
"""load_tf_weights_in_xlnet""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Any = [
"""TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFXLNetForMultipleChoice""",
"""TFXLNetForQuestionAnsweringSimple""",
"""TFXLNetForSequenceClassification""",
"""TFXLNetForTokenClassification""",
"""TFXLNetLMHeadModel""",
"""TFXLNetMainLayer""",
"""TFXLNetModel""",
"""TFXLNetPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet import XLNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet_fast import XLNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlnet import (
XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
XLNetForMultipleChoice,
XLNetForQuestionAnswering,
XLNetForQuestionAnsweringSimple,
XLNetForSequenceClassification,
XLNetForTokenClassification,
XLNetLMHeadModel,
XLNetModel,
XLNetPreTrainedModel,
load_tf_weights_in_xlnet,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlnet import (
TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLNetForMultipleChoice,
TFXLNetForQuestionAnsweringSimple,
TFXLNetForSequenceClassification,
TFXLNetForTokenClassification,
TFXLNetLMHeadModel,
TFXLNetMainLayer,
TFXLNetModel,
TFXLNetPreTrainedModel,
)
else:
import sys
lowerCamelCase : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 367
|
import io
import json
import fsspec
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.json import JsonDatasetReader, JsonDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def lowerCAmelCase ( UpperCamelCase__ : Any , UpperCamelCase__ : Optional[Any] ) -> str:
"""simple docstring"""
assert isinstance(UpperCamelCase__ , UpperCamelCase__ )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def lowerCAmelCase ( UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : List[Any] ) -> Tuple:
"""simple docstring"""
__SCREAMING_SNAKE_CASE: Dict = tmp_path / '''cache'''
__SCREAMING_SNAKE_CASE: Dict = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__SCREAMING_SNAKE_CASE: Tuple = JsonDatasetReader(UpperCamelCase__ , cache_dir=UpperCamelCase__ , keep_in_memory=UpperCamelCase__ ).read()
_check_json_dataset(UpperCamelCase__ , UpperCamelCase__ )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def lowerCAmelCase ( UpperCamelCase__ : List[str] , UpperCamelCase__ : int , UpperCamelCase__ : Optional[int] ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE: List[str] = tmp_path / '''cache'''
__SCREAMING_SNAKE_CASE: Tuple = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__SCREAMING_SNAKE_CASE: Any = features.copy() if features else default_expected_features
__SCREAMING_SNAKE_CASE: List[Any] = (
Features({feature: Value(UpperCamelCase__ ) for feature, dtype in features.items()} ) if features is not None else None
)
__SCREAMING_SNAKE_CASE: Tuple = JsonDatasetReader(UpperCamelCase__ , features=UpperCamelCase__ , cache_dir=UpperCamelCase__ ).read()
_check_json_dataset(UpperCamelCase__ , UpperCamelCase__ )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_3''': '''float64''', '''col_1''': '''string''', '''col_2''': '''int64'''},
] , )
def lowerCAmelCase ( UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Optional[int] ) -> Dict:
"""simple docstring"""
__SCREAMING_SNAKE_CASE: Optional[int] = tmp_path / '''cache'''
__SCREAMING_SNAKE_CASE: Dict = {'''col_3''': '''float64''', '''col_1''': '''string''', '''col_2''': '''int64'''}
__SCREAMING_SNAKE_CASE: int = features.copy() if features else default_expected_features
__SCREAMING_SNAKE_CASE: List[Any] = (
Features({feature: Value(UpperCamelCase__ ) for feature, dtype in features.items()} ) if features is not None else None
)
__SCREAMING_SNAKE_CASE: Union[str, Any] = JsonDatasetReader(UpperCamelCase__ , features=UpperCamelCase__ , cache_dir=UpperCamelCase__ ).read()
assert isinstance(UpperCamelCase__ , UpperCamelCase__ )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_3", "col_1", "col_2"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
def lowerCAmelCase ( UpperCamelCase__ : str , UpperCamelCase__ : Optional[int] ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE: str = {'''col_2''': '''int64''', '''col_3''': '''float64''', '''col_1''': '''string'''}
__SCREAMING_SNAKE_CASE: Dict = features.copy()
__SCREAMING_SNAKE_CASE: Optional[int] = (
Features({feature: Value(UpperCamelCase__ ) for feature, dtype in features.items()} ) if features is not None else None
)
__SCREAMING_SNAKE_CASE: Union[str, Any] = tmp_path / '''cache'''
__SCREAMING_SNAKE_CASE: Tuple = JsonDatasetReader(UpperCamelCase__ , features=UpperCamelCase__ , cache_dir=UpperCamelCase__ ).read()
assert isinstance(UpperCamelCase__ , UpperCamelCase__ )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_2", "col_3", "col_1"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def lowerCAmelCase ( UpperCamelCase__ : Any , UpperCamelCase__ : str , UpperCamelCase__ : int ) -> Optional[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE: List[Any] = tmp_path / '''cache'''
__SCREAMING_SNAKE_CASE: Optional[Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__SCREAMING_SNAKE_CASE: Dict = JsonDatasetReader(UpperCamelCase__ , cache_dir=UpperCamelCase__ , split=UpperCamelCase__ ).read()
_check_json_dataset(UpperCamelCase__ , UpperCamelCase__ )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('''path_type''' , [str, list] )
def lowerCAmelCase ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : str ) -> Any:
"""simple docstring"""
if issubclass(UpperCamelCase__ , UpperCamelCase__ ):
__SCREAMING_SNAKE_CASE: Optional[Any] = jsonl_path
elif issubclass(UpperCamelCase__ , UpperCamelCase__ ):
__SCREAMING_SNAKE_CASE: List[str] = [jsonl_path]
__SCREAMING_SNAKE_CASE: List[Any] = tmp_path / '''cache'''
__SCREAMING_SNAKE_CASE: Union[str, Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__SCREAMING_SNAKE_CASE: Tuple = JsonDatasetReader(UpperCamelCase__ , cache_dir=UpperCamelCase__ ).read()
_check_json_dataset(UpperCamelCase__ , UpperCamelCase__ )
def lowerCAmelCase ( UpperCamelCase__ : Optional[int] , UpperCamelCase__ : int , UpperCamelCase__ : Union[str, Any]=("train",) ) -> List[str]:
"""simple docstring"""
assert isinstance(UpperCamelCase__ , UpperCamelCase__ )
for split in splits:
__SCREAMING_SNAKE_CASE: Tuple = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def lowerCAmelCase ( UpperCamelCase__ : List[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Tuple ) -> Dict:
"""simple docstring"""
__SCREAMING_SNAKE_CASE: Optional[Any] = tmp_path / '''cache'''
__SCREAMING_SNAKE_CASE: List[str] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__SCREAMING_SNAKE_CASE: Dict = JsonDatasetReader({'''train''': jsonl_path} , cache_dir=UpperCamelCase__ , keep_in_memory=UpperCamelCase__ ).read()
_check_json_datasetdict(UpperCamelCase__ , UpperCamelCase__ )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def lowerCAmelCase ( UpperCamelCase__ : Dict , UpperCamelCase__ : Tuple , UpperCamelCase__ : List[str] ) -> Union[str, Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE: Optional[Any] = tmp_path / '''cache'''
__SCREAMING_SNAKE_CASE: Union[str, Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__SCREAMING_SNAKE_CASE: str = features.copy() if features else default_expected_features
__SCREAMING_SNAKE_CASE: Optional[int] = (
Features({feature: Value(UpperCamelCase__ ) for feature, dtype in features.items()} ) if features is not None else None
)
__SCREAMING_SNAKE_CASE: Dict = JsonDatasetReader({'''train''': jsonl_path} , features=UpperCamelCase__ , cache_dir=UpperCamelCase__ ).read()
_check_json_datasetdict(UpperCamelCase__ , UpperCamelCase__ )
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def lowerCAmelCase ( UpperCamelCase__ : List[Any] , UpperCamelCase__ : int , UpperCamelCase__ : int ) -> Optional[Any]:
"""simple docstring"""
if split:
__SCREAMING_SNAKE_CASE: str = {split: jsonl_path}
else:
__SCREAMING_SNAKE_CASE: Dict = '''train'''
__SCREAMING_SNAKE_CASE: Tuple = {'''train''': jsonl_path, '''test''': jsonl_path}
__SCREAMING_SNAKE_CASE: Tuple = tmp_path / '''cache'''
__SCREAMING_SNAKE_CASE: str = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__SCREAMING_SNAKE_CASE: int = JsonDatasetReader(UpperCamelCase__ , cache_dir=UpperCamelCase__ ).read()
_check_json_datasetdict(UpperCamelCase__ , UpperCamelCase__ , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def lowerCAmelCase ( UpperCamelCase__ : Dict ) -> Optional[int]:
"""simple docstring"""
return json.load(UpperCamelCase__ )
def lowerCAmelCase ( UpperCamelCase__ : Any ) -> Any:
"""simple docstring"""
return [json.loads(UpperCamelCase__ ) for line in buffer]
class a :
@pytest.mark.parametrize('''lines, load_json_function''' , [(True, load_json_lines), (False, load_json)] )
def snake_case_ ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
with io.BytesIO() as buffer:
JsonDatasetWriter(_lowerCAmelCase , _lowerCAmelCase , lines=_lowerCAmelCase ).write()
buffer.seek(0 )
__SCREAMING_SNAKE_CASE: str = load_json_function(_lowerCAmelCase )
assert isinstance(_lowerCAmelCase , _lowerCAmelCase )
assert isinstance(exported_content[0] , _lowerCAmelCase )
assert len(_lowerCAmelCase ) == 10
@pytest.mark.parametrize(
'''orient, container, keys, len_at''' , [
('''records''', list, {'''tokens''', '''labels''', '''answers''', '''id'''}, None),
('''split''', dict, {'''columns''', '''data'''}, '''data'''),
('''index''', dict, set('''0123456789''' ), None),
('''columns''', dict, {'''tokens''', '''labels''', '''answers''', '''id'''}, '''tokens'''),
('''values''', list, None, None),
('''table''', dict, {'''schema''', '''data'''}, '''data'''),
] , )
def snake_case_ ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
with io.BytesIO() as buffer:
JsonDatasetWriter(_lowerCAmelCase , _lowerCAmelCase , lines=_lowerCAmelCase , orient=_lowerCAmelCase ).write()
buffer.seek(0 )
__SCREAMING_SNAKE_CASE: Optional[int] = load_json(_lowerCAmelCase )
assert isinstance(_lowerCAmelCase , _lowerCAmelCase )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(_lowerCAmelCase , '''keys''' ) and not hasattr(exported_content[0] , '''keys''' )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(_lowerCAmelCase ) == 10
@pytest.mark.parametrize('''lines, load_json_function''' , [(True, load_json_lines), (False, load_json)] )
def snake_case_ ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
with io.BytesIO() as buffer:
JsonDatasetWriter(_lowerCAmelCase , _lowerCAmelCase , lines=_lowerCAmelCase , num_proc=2 ).write()
buffer.seek(0 )
__SCREAMING_SNAKE_CASE: Optional[Any] = load_json_function(_lowerCAmelCase )
assert isinstance(_lowerCAmelCase , _lowerCAmelCase )
assert isinstance(exported_content[0] , _lowerCAmelCase )
assert len(_lowerCAmelCase ) == 10
@pytest.mark.parametrize(
'''orient, container, keys, len_at''' , [
('''records''', list, {'''tokens''', '''labels''', '''answers''', '''id'''}, None),
('''split''', dict, {'''columns''', '''data'''}, '''data'''),
('''index''', dict, set('''0123456789''' ), None),
('''columns''', dict, {'''tokens''', '''labels''', '''answers''', '''id'''}, '''tokens'''),
('''values''', list, None, None),
('''table''', dict, {'''schema''', '''data'''}, '''data'''),
] , )
def snake_case_ ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
with io.BytesIO() as buffer:
JsonDatasetWriter(_lowerCAmelCase , _lowerCAmelCase , lines=_lowerCAmelCase , orient=_lowerCAmelCase , num_proc=2 ).write()
buffer.seek(0 )
__SCREAMING_SNAKE_CASE: Dict = load_json(_lowerCAmelCase )
assert isinstance(_lowerCAmelCase , _lowerCAmelCase )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(_lowerCAmelCase , '''keys''' ) and not hasattr(exported_content[0] , '''keys''' )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(_lowerCAmelCase ) == 10
def snake_case_ ( self , _lowerCAmelCase ):
"""simple docstring"""
with pytest.raises(_lowerCAmelCase ):
with io.BytesIO() as buffer:
JsonDatasetWriter(_lowerCAmelCase , _lowerCAmelCase , num_proc=0 )
@pytest.mark.parametrize('''compression, extension''' , [('''gzip''', '''gz'''), ('''bz2''', '''bz2'''), ('''xz''', '''xz''')] )
def snake_case_ ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: Any = tmp_path_factory.mktemp('''data''' ) / f"""test.json.{extension}"""
__SCREAMING_SNAKE_CASE: Any = str(shared_datadir / f"""test_file.json.{extension}""" )
JsonDatasetWriter(_lowerCAmelCase , _lowerCAmelCase , compression=_lowerCAmelCase ).write()
with fsspec.open(_lowerCAmelCase , '''rb''' , compression='''infer''' ) as f:
__SCREAMING_SNAKE_CASE: Optional[Any] = f.read()
with fsspec.open(_lowerCAmelCase , '''rb''' , compression='''infer''' ) as f:
__SCREAMING_SNAKE_CASE: Any = f.read()
assert exported_content == original_content
| 202
| 0
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
'funnel-transformer/small': 'https://huggingface.co/funnel-transformer/small/resolve/main/config.json',
'funnel-transformer/small-base': 'https://huggingface.co/funnel-transformer/small-base/resolve/main/config.json',
'funnel-transformer/medium': 'https://huggingface.co/funnel-transformer/medium/resolve/main/config.json',
'funnel-transformer/medium-base': 'https://huggingface.co/funnel-transformer/medium-base/resolve/main/config.json',
'funnel-transformer/intermediate': (
'https://huggingface.co/funnel-transformer/intermediate/resolve/main/config.json'
),
'funnel-transformer/intermediate-base': (
'https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/config.json'
),
'funnel-transformer/large': 'https://huggingface.co/funnel-transformer/large/resolve/main/config.json',
'funnel-transformer/large-base': 'https://huggingface.co/funnel-transformer/large-base/resolve/main/config.json',
'funnel-transformer/xlarge': 'https://huggingface.co/funnel-transformer/xlarge/resolve/main/config.json',
'funnel-transformer/xlarge-base': 'https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/config.json',
}
class _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
A = 'funnel'
A = {
'hidden_size': 'd_model',
'num_attention_heads': 'n_head',
}
def __init__( self ,__SCREAMING_SNAKE_CASE=30522 ,__SCREAMING_SNAKE_CASE=[4, 4, 4] ,__SCREAMING_SNAKE_CASE=None ,__SCREAMING_SNAKE_CASE=2 ,__SCREAMING_SNAKE_CASE=768 ,__SCREAMING_SNAKE_CASE=12 ,__SCREAMING_SNAKE_CASE=64 ,__SCREAMING_SNAKE_CASE=3072 ,__SCREAMING_SNAKE_CASE="gelu_new" ,__SCREAMING_SNAKE_CASE=0.1 ,__SCREAMING_SNAKE_CASE=0.1 ,__SCREAMING_SNAKE_CASE=0.0 ,__SCREAMING_SNAKE_CASE=0.1 ,__SCREAMING_SNAKE_CASE=None ,__SCREAMING_SNAKE_CASE=1e-9 ,__SCREAMING_SNAKE_CASE="mean" ,__SCREAMING_SNAKE_CASE="relative_shift" ,__SCREAMING_SNAKE_CASE=True ,__SCREAMING_SNAKE_CASE=True ,__SCREAMING_SNAKE_CASE=True ,**__SCREAMING_SNAKE_CASE ,):
SCREAMING_SNAKE_CASE : Union[str, Any] = vocab_size
SCREAMING_SNAKE_CASE : List[str] = block_sizes
SCREAMING_SNAKE_CASE : Optional[int] = [1] * len(_UpperCAmelCase ) if block_repeats is None else block_repeats
assert len(_UpperCAmelCase ) == len(
self.block_repeats ), "`block_sizes` and `block_repeats` should have the same length."
SCREAMING_SNAKE_CASE : Union[str, Any] = num_decoder_layers
SCREAMING_SNAKE_CASE : Union[str, Any] = d_model
SCREAMING_SNAKE_CASE : str = n_head
SCREAMING_SNAKE_CASE : Tuple = d_head
SCREAMING_SNAKE_CASE : int = d_inner
SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_act
SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_dropout
SCREAMING_SNAKE_CASE : int = attention_dropout
SCREAMING_SNAKE_CASE : Any = activation_dropout
SCREAMING_SNAKE_CASE : List[str] = initializer_range
SCREAMING_SNAKE_CASE : List[str] = initializer_std
SCREAMING_SNAKE_CASE : Optional[Any] = layer_norm_eps
assert pooling_type in [
"mean",
"max",
], f"""Got {pooling_type} for `pooling_type` but only 'mean' and 'max' are supported."""
SCREAMING_SNAKE_CASE : Any = pooling_type
assert attention_type in [
"relative_shift",
"factorized",
], f"""Got {attention_type} for `attention_type` but only 'relative_shift' and 'factorized' are supported."""
SCREAMING_SNAKE_CASE : List[str] = attention_type
SCREAMING_SNAKE_CASE : Optional[Any] = separate_cls
SCREAMING_SNAKE_CASE : Tuple = truncate_seq
SCREAMING_SNAKE_CASE : Union[str, Any] = pool_q_only
super().__init__(**_UpperCAmelCase )
@property
def __a ( self ):
return sum(self.block_sizes )
@num_hidden_layers.setter
def __a ( self ,__SCREAMING_SNAKE_CASE ):
raise NotImplementedError(
'This model does not support the setting of `num_hidden_layers`. Please set `block_sizes`.' )
@property
def __a ( self ):
return len(self.block_sizes )
@num_blocks.setter
def __a ( self ,__SCREAMING_SNAKE_CASE ):
raise NotImplementedError('This model does not support the setting of `num_blocks`. Please set `block_sizes`.' )
| 714
|
'''simple docstring'''
import unittest
from transformers import BigBirdTokenizer, BigBirdTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
__UpperCAmelCase = '▁'
__UpperCAmelCase = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
@require_tokenizers
class _a ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
A = BigBirdTokenizer
A = BigBirdTokenizerFast
A = True
A = True
def __a ( self ):
super().setUp()
SCREAMING_SNAKE_CASE : Any = self.tokenizer_class(__SCREAMING_SNAKE_CASE ,keep_accents=__SCREAMING_SNAKE_CASE )
tokenizer.save_pretrained(self.tmpdirname )
def __a ( self ):
SCREAMING_SNAKE_CASE : int = '<s>'
SCREAMING_SNAKE_CASE : int = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__SCREAMING_SNAKE_CASE ) ,__SCREAMING_SNAKE_CASE )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__SCREAMING_SNAKE_CASE ) ,__SCREAMING_SNAKE_CASE )
def __a ( self ):
SCREAMING_SNAKE_CASE : Union[str, Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] ,'<unk>' )
self.assertEqual(vocab_keys[1] ,'<s>' )
self.assertEqual(vocab_keys[-1] ,'[MASK]' )
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) ,1004 )
def __a ( self ):
self.assertEqual(self.get_tokenizer().vocab_size ,1000 )
def __a ( self ):
if not self.test_rust_tokenizer:
return
SCREAMING_SNAKE_CASE : Optional[Any] = self.get_tokenizer()
SCREAMING_SNAKE_CASE : int = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE : int = 'I was born in 92000, and this is falsé.'
SCREAMING_SNAKE_CASE : Any = tokenizer.tokenize(__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : Tuple = rust_tokenizer.tokenize(__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : List[str] = tokenizer.encode(__SCREAMING_SNAKE_CASE ,add_special_tokens=__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : Dict = rust_tokenizer.encode(__SCREAMING_SNAKE_CASE ,add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : Optional[Any] = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE : Dict = tokenizer.encode(__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : Optional[Any] = rust_tokenizer.encode(__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE )
def __a ( self ):
SCREAMING_SNAKE_CASE : Optional[Any] = BigBirdTokenizer(__SCREAMING_SNAKE_CASE ,keep_accents=__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.tokenize('This is a test' )
self.assertListEqual(__SCREAMING_SNAKE_CASE ,['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE ) ,[285, 46, 10, 170, 382] ,)
SCREAMING_SNAKE_CASE : Tuple = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
__SCREAMING_SNAKE_CASE ,[
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] ,)
SCREAMING_SNAKE_CASE : int = tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE )
self.assertListEqual(
__SCREAMING_SNAKE_CASE ,[8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] ,)
SCREAMING_SNAKE_CASE : List[str] = tokenizer.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE )
self.assertListEqual(
__SCREAMING_SNAKE_CASE ,[
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] ,)
@cached_property
def __a ( self ):
return BigBirdTokenizer.from_pretrained('google/bigbird-roberta-base' )
@slow
def __a ( self ):
SCREAMING_SNAKE_CASE : Optional[int] = 'Hello World!'
SCREAMING_SNAKE_CASE : Union[str, Any] = [65, 18536, 2260, 101, 66]
self.assertListEqual(__SCREAMING_SNAKE_CASE ,self.big_tokenizer.encode(__SCREAMING_SNAKE_CASE ) )
@slow
def __a ( self ):
SCREAMING_SNAKE_CASE : Any = (
'This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'
' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'
)
# fmt: off
SCREAMING_SNAKE_CASE : Optional[int] = [65, 871, 419, 358, 946, 991, 2521, 452, 358, 1357, 387, 7751, 3536, 112, 985, 456, 126, 865, 938, 5400, 5734, 458, 1368, 467, 786, 2462, 5246, 1159, 633, 865, 4519, 457, 582, 852, 2557, 427, 916, 508, 405, 34324, 497, 391, 408, 11342, 1244, 385, 100, 938, 985, 456, 574, 362, 12597, 3200, 3129, 1172, 66] # noqa: E231
# fmt: on
self.assertListEqual(__SCREAMING_SNAKE_CASE ,self.big_tokenizer.encode(__SCREAMING_SNAKE_CASE ) )
@require_torch
@slow
def __a ( self ):
import torch
from transformers import BigBirdConfig, BigBirdModel
# Build sequence
SCREAMING_SNAKE_CASE : Union[str, Any] = list(self.big_tokenizer.get_vocab().keys() )[:10]
SCREAMING_SNAKE_CASE : Any = ' '.join(__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : str = self.big_tokenizer.encode_plus(__SCREAMING_SNAKE_CASE ,return_tensors='pt' ,return_token_type_ids=__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : int = self.big_tokenizer.batch_encode_plus(
[sequence + ' ' + sequence] ,return_tensors='pt' ,return_token_type_ids=__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : Optional[Any] = BigBirdConfig(attention_type='original_full' )
SCREAMING_SNAKE_CASE : Dict = BigBirdModel(__SCREAMING_SNAKE_CASE )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**__SCREAMING_SNAKE_CASE )
model(**__SCREAMING_SNAKE_CASE )
@slow
def __a ( self ):
SCREAMING_SNAKE_CASE : Optional[int] = BigBirdTokenizer.from_pretrained('google/bigbird-roberta-base' )
SCREAMING_SNAKE_CASE : Dict = tokenizer.decode(tokenizer('Paris is the [MASK].' ).input_ids )
self.assertTrue(decoded_text == '[CLS] Paris is the[MASK].[SEP]' )
@slow
def __a ( self ):
# fmt: off
SCREAMING_SNAKE_CASE : Tuple = {'input_ids': [[65, 39286, 458, 36335, 2001, 456, 13073, 13266, 455, 113, 7746, 1741, 11157, 391, 13073, 13266, 455, 113, 3967, 35412, 113, 4936, 109, 3870, 2377, 113, 30084, 45720, 458, 134, 17496, 112, 503, 11672, 113, 118, 112, 5665, 13347, 38687, 112, 1496, 31389, 112, 3268, 47264, 134, 962, 112, 16377, 8035, 23130, 430, 12169, 15518, 28592, 458, 146, 41697, 109, 391, 12169, 15518, 16689, 458, 146, 41358, 109, 452, 726, 4034, 111, 763, 35412, 5082, 388, 1903, 111, 9051, 391, 2870, 48918, 1900, 1123, 550, 998, 112, 9586, 15985, 455, 391, 410, 22955, 37636, 114, 66], [65, 448, 17496, 419, 3663, 385, 763, 113, 27533, 2870, 3283, 13043, 1639, 24713, 523, 656, 24013, 18550, 2521, 517, 27014, 21244, 420, 1212, 1465, 391, 927, 4833, 388, 578, 11786, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [65, 484, 2169, 7687, 21932, 18146, 726, 363, 17032, 3391, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__SCREAMING_SNAKE_CASE ,model_name='google/bigbird-roberta-base' ,revision='215c99f1600e06f83acce68422f2035b2b5c3510' ,)
| 220
| 0
|
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self : List[Any] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : int=13 , _lowerCAmelCase : str=7 , _lowerCAmelCase : Optional[Any]=True , _lowerCAmelCase : Tuple=True , _lowerCAmelCase : Optional[int]=True , _lowerCAmelCase : str=99 , _lowerCAmelCase : List[Any]=32 , _lowerCAmelCase : int=5 , _lowerCAmelCase : Tuple=4 , _lowerCAmelCase : Any=37 , _lowerCAmelCase : Optional[Any]="gelu" , _lowerCAmelCase : List[Any]=0.1 , _lowerCAmelCase : str=0.1 , _lowerCAmelCase : List[str]=512 , _lowerCAmelCase : Tuple=16 , _lowerCAmelCase : Any=2 , _lowerCAmelCase : int=0.02 , _lowerCAmelCase : Any=3 , _lowerCAmelCase : Tuple=4 , _lowerCAmelCase : List[str]=None , ):
SCREAMING_SNAKE_CASE_ = parent
SCREAMING_SNAKE_CASE_ = batch_size
SCREAMING_SNAKE_CASE_ = seq_length
SCREAMING_SNAKE_CASE_ = is_training
SCREAMING_SNAKE_CASE_ = use_token_type_ids
SCREAMING_SNAKE_CASE_ = use_labels
SCREAMING_SNAKE_CASE_ = vocab_size
SCREAMING_SNAKE_CASE_ = hidden_size
SCREAMING_SNAKE_CASE_ = num_hidden_layers
SCREAMING_SNAKE_CASE_ = num_attention_heads
SCREAMING_SNAKE_CASE_ = intermediate_size
SCREAMING_SNAKE_CASE_ = hidden_act
SCREAMING_SNAKE_CASE_ = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ = max_position_embeddings
SCREAMING_SNAKE_CASE_ = type_vocab_size
SCREAMING_SNAKE_CASE_ = type_sequence_label_size
SCREAMING_SNAKE_CASE_ = initializer_range
SCREAMING_SNAKE_CASE_ = num_labels
SCREAMING_SNAKE_CASE_ = num_choices
SCREAMING_SNAKE_CASE_ = scope
SCREAMING_SNAKE_CASE_ = self.vocab_size - 1
def lowerCAmelCase_ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE_ = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = None
if self.use_labels:
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE_ = OpenAIGPTConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
SCREAMING_SNAKE_CASE_ = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def lowerCAmelCase_ ( self : str , _lowerCAmelCase : List[Any] , _lowerCAmelCase : int , _lowerCAmelCase : Dict , _lowerCAmelCase : Dict , *_lowerCAmelCase : Union[str, Any] ):
SCREAMING_SNAKE_CASE_ = OpenAIGPTModel(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE_ = model(_lowerCAmelCase , token_type_ids=_lowerCAmelCase , head_mask=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = model(_lowerCAmelCase , token_type_ids=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = model(_lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase_ ( self : List[str] , _lowerCAmelCase : Tuple , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Tuple , _lowerCAmelCase : int , *_lowerCAmelCase : Dict ):
SCREAMING_SNAKE_CASE_ = OpenAIGPTLMHeadModel(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE_ = model(_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase_ ( self : Tuple , _lowerCAmelCase : Any , _lowerCAmelCase : int , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : List[Any] , *_lowerCAmelCase : Any ):
SCREAMING_SNAKE_CASE_ = OpenAIGPTDoubleHeadsModel(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE_ = model(_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase_ ( self : Any , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Dict , _lowerCAmelCase : List[str] , _lowerCAmelCase : Optional[int] , *_lowerCAmelCase : Tuple ):
SCREAMING_SNAKE_CASE_ = self.num_labels
SCREAMING_SNAKE_CASE_ = OpenAIGPTForSequenceClassification(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE_ = model(_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase_ ( self : Any ):
SCREAMING_SNAKE_CASE_ = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) ,
) = config_and_inputs
SCREAMING_SNAKE_CASE_ = {
'input_ids': input_ids,
'token_type_ids': token_type_ids,
'head_mask': head_mask,
}
return config, inputs_dict
@require_torch
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowercase_ = (
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
lowercase_ = (
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
lowercase_ = (
{
"feature-extraction": OpenAIGPTModel,
"text-classification": OpenAIGPTForSequenceClassification,
"text-generation": OpenAIGPTLMHeadModel,
"zero-shot": OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def lowerCAmelCase_ ( self : Tuple , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Tuple , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : int ):
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def lowerCAmelCase_ ( self : Dict , _lowerCAmelCase : Any , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Optional[int]=False ):
SCREAMING_SNAKE_CASE_ = super()._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase , return_labels=_lowerCAmelCase )
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
SCREAMING_SNAKE_CASE_ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=_lowerCAmelCase , )
SCREAMING_SNAKE_CASE_ = inputs_dict['labels']
SCREAMING_SNAKE_CASE_ = inputs_dict['labels']
SCREAMING_SNAKE_CASE_ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=_lowerCAmelCase , )
SCREAMING_SNAKE_CASE_ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_lowerCAmelCase )
return inputs_dict
def lowerCAmelCase_ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE_ = OpenAIGPTModelTester(self )
SCREAMING_SNAKE_CASE_ = ConfigTester(self , config_class=_lowerCAmelCase , n_embd=37 )
def lowerCAmelCase_ ( self : Dict ):
self.config_tester.run_common_tests()
def lowerCAmelCase_ ( self : str ):
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*_lowerCAmelCase )
def lowerCAmelCase_ ( self : Tuple ):
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*_lowerCAmelCase )
def lowerCAmelCase_ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*_lowerCAmelCase )
def lowerCAmelCase_ ( self : Tuple ):
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*_lowerCAmelCase )
@slow
def lowerCAmelCase_ ( self : Tuple ):
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE_ = OpenAIGPTModel.from_pretrained(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
@require_torch
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowerCAmelCase_ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE_ = OpenAIGPTLMHeadModel.from_pretrained('openai-gpt' )
model.to(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = torch.tensor([[481, 4_735, 544]] , dtype=torch.long , device=_lowerCAmelCase ) # the president is
SCREAMING_SNAKE_CASE_ = [
481,
4_735,
544,
246,
963,
870,
762,
239,
244,
40_477,
244,
249,
719,
881,
487,
544,
240,
244,
603,
481,
] # the president is a very good man. " \n " i\'m sure he is, " said the
SCREAMING_SNAKE_CASE_ = model.generate(_lowerCAmelCase , do_sample=_lowerCAmelCase )
self.assertListEqual(output_ids[0].tolist() , _lowerCAmelCase )
| 31
|
from diffusers.utils.testing_utils import require_onnxruntime
@require_onnxruntime
class _A :
'''simple docstring'''
pass
| 402
| 0
|
import asyncio
import os
import re
import sys
import tempfile
import unittest
from contextlib import contextmanager
from copy import deepcopy
from distutils.util import strtobool
from enum import Enum
from importlib.util import find_spec
from pathlib import Path
from unittest.mock import patch
import pyarrow as pa
import pytest
import requests
from packaging import version
from datasets import config
if config.PY_VERSION < version.parse('3.8'):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase=False ) -> Union[str, Any]:
try:
snake_case : Any = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
snake_case : Any = default
else:
# KEY is set, convert it to True or False.
try:
snake_case : List[Any] = strtobool(lowercase )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f"""If set, {key} must be yes or no.""" )
return _value
lowerCamelCase : List[str] = parse_flag_from_env('RUN_SLOW', default=False)
lowerCamelCase : Union[str, Any] = parse_flag_from_env('RUN_REMOTE', default=False)
lowerCamelCase : str = parse_flag_from_env('RUN_LOCAL', default=True)
lowerCamelCase : int = parse_flag_from_env('RUN_PACKAGED', default=True)
# Compression
lowerCamelCase : List[str] = pytest.mark.skipif(not config.LZ4_AVAILABLE, reason='test requires lz4')
lowerCamelCase : Optional[int] = pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason='test requires py7zr')
lowerCamelCase : Union[str, Any] = pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason='test requires zstandard')
# Audio
lowerCamelCase : List[str] = pytest.mark.skipif(
# On Windows and OS X, soundfile installs sndfile
find_spec('soundfile') is None or version.parse(importlib_metadata.version('soundfile')) < version.parse('0.12.0'),
reason='test requires sndfile>=0.12.1: \'pip install \"soundfile>=0.12.1\"\'; ',
)
# Beam
lowerCamelCase : str = pytest.mark.skipif(
not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse('0.3.2'),
reason='test requires apache-beam and a compatible dill version',
)
# Dill-cloudpickle compatibility
lowerCamelCase : Tuple = pytest.mark.skipif(
config.DILL_VERSION <= version.parse('0.3.2'),
reason='test requires dill>0.3.2 for cloudpickle compatibility',
)
# Windows
lowerCamelCase : List[Any] = pytest.mark.skipif(
sys.platform == 'win32',
reason='test should not be run on Windows',
)
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> List[str]:
try:
import faiss # noqa
except ImportError:
snake_case : Tuple = unittest.skip("""test requires faiss""" )(lowercase )
return test_case
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> str:
try:
import regex # noqa
except ImportError:
snake_case : Tuple = unittest.skip("""test requires regex""" )(lowercase )
return test_case
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> Dict:
try:
import elasticsearch # noqa
except ImportError:
snake_case : Tuple = unittest.skip("""test requires elasticsearch""" )(lowercase )
return test_case
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> List[Any]:
try:
import sqlalchemy # noqa
except ImportError:
snake_case : Tuple = unittest.skip("""test requires sqlalchemy""" )(lowercase )
return test_case
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> List[str]:
if not config.TORCH_AVAILABLE:
snake_case : Optional[Any] = unittest.skip("""test requires PyTorch""" )(lowercase )
return test_case
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> str:
if not config.TF_AVAILABLE:
snake_case : Union[str, Any] = unittest.skip("""test requires TensorFlow""" )(lowercase )
return test_case
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> Dict:
if not config.JAX_AVAILABLE:
snake_case : List[str] = unittest.skip("""test requires JAX""" )(lowercase )
return test_case
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> List[Any]:
if not config.PIL_AVAILABLE:
snake_case : List[Any] = unittest.skip("""test requires Pillow""" )(lowercase )
return test_case
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> Tuple:
try:
import transformers # noqa F401
except ImportError:
return unittest.skip("""test requires transformers""" )(lowercase )
else:
return test_case
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> Any:
try:
import tiktoken # noqa F401
except ImportError:
return unittest.skip("""test requires tiktoken""" )(lowercase )
else:
return test_case
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> List[str]:
try:
import spacy # noqa F401
except ImportError:
return unittest.skip("""test requires spacy""" )(lowercase )
else:
return test_case
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> Any:
def _require_spacy_model(lowercase ):
try:
import spacy # noqa F401
spacy.load(lowercase )
except ImportError:
return unittest.skip("""test requires spacy""" )(lowercase )
except OSError:
return unittest.skip("""test requires spacy model '{}'""".format(lowercase ) )(lowercase )
else:
return test_case
return _require_spacy_model
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> Optional[int]:
try:
import pyspark # noqa F401
except ImportError:
return unittest.skip("""test requires pyspark""" )(lowercase )
else:
return test_case
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> Optional[Any]:
try:
import joblibspark # noqa F401
except ImportError:
return unittest.skip("""test requires joblibspark""" )(lowercase )
else:
return test_case
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> int:
if not _run_slow_tests or _run_slow_tests == 0:
snake_case : Optional[int] = unittest.skip("""test is slow""" )(lowercase )
return test_case
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> int:
if not _run_local_tests or _run_local_tests == 0:
snake_case : Tuple = unittest.skip("""test is local""" )(lowercase )
return test_case
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> Tuple:
if not _run_packaged_tests or _run_packaged_tests == 0:
snake_case : Tuple = unittest.skip("""test is packaged""" )(lowercase )
return test_case
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> List[Any]:
if not _run_remote_tests or _run_remote_tests == 0:
snake_case : Dict = unittest.skip("""test requires remote""" )(lowercase )
return test_case
def SCREAMING_SNAKE_CASE__ ( *lowercase ) -> int:
def decorate(cls ):
for name, fn in cls.__dict__.items():
if callable(lowercase ) and name.startswith("""test""" ):
for decorator in decorators:
snake_case : Any = decorator(lowercase )
setattr(cls ,lowercase ,lowercase )
return cls
return decorate
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
pass
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = 0
_snake_case = 1
_snake_case = 2
@contextmanager
def SCREAMING_SNAKE_CASE__ ( lowercase=OfflineSimulationMode.CONNECTION_FAILS ,lowercase=1E-16 ) -> List[Any]:
snake_case : Any = requests.Session().request
def timeout_request(lowercase ,lowercase ,lowercase ,**lowercase ):
# Change the url to an invalid url so that the connection hangs
snake_case : List[Any] = """https://10.255.255.1"""
if kwargs.get("""timeout""" ) is None:
raise RequestWouldHangIndefinitelyError(
f"""Tried a call to {url} in offline mode with no timeout set. Please set a timeout.""" )
snake_case : Any = timeout
try:
return online_request(lowercase ,lowercase ,**lowercase )
except Exception as e:
# The following changes in the error are just here to make the offline timeout error prettier
snake_case : Dict = url
snake_case : Optional[int] = e.args[0]
snake_case : List[Any] = (max_retry_error.args[0].replace("""10.255.255.1""" ,f"""OfflineMock[{url}]""" ),)
snake_case : Dict = (max_retry_error,)
raise
def raise_connection_error(lowercase ,lowercase ,**lowercase ):
raise requests.ConnectionError("""Offline mode is enabled.""" ,request=lowercase )
if mode is OfflineSimulationMode.CONNECTION_FAILS:
with patch("""requests.Session.send""" ,lowercase ):
yield
elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT:
# inspired from https://stackoverflow.com/a/904609
with patch("""requests.Session.request""" ,lowercase ):
yield
elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1:
with patch("""datasets.config.HF_DATASETS_OFFLINE""" ,lowercase ):
yield
else:
raise ValueError("""Please use a value from the OfflineSimulationMode enum.""" )
@contextmanager
def SCREAMING_SNAKE_CASE__ ( *lowercase ,**lowercase ) -> Optional[Any]:
snake_case : str = str(Path().resolve() )
with tempfile.TemporaryDirectory(*lowercase ,**lowercase ) as tmp_dir:
try:
os.chdir(lowercase )
yield
finally:
os.chdir(lowercase )
@contextmanager
def SCREAMING_SNAKE_CASE__ ( ) -> Dict:
import gc
gc.collect()
snake_case : Tuple = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase."
@contextmanager
def SCREAMING_SNAKE_CASE__ ( ) -> Union[str, Any]:
import gc
gc.collect()
snake_case : List[str] = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase."
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> List[Any]:
return deepcopy(lowercase ).integers(0 ,100 ,10 ).tolist() == deepcopy(lowercase ).integers(0 ,100 ,10 ).tolist()
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> int:
import decorator
from requests.exceptions import HTTPError
def _wrapper(lowercase ,*lowercase ,**lowercase ):
try:
return func(*lowercase ,**lowercase )
except HTTPError as err:
if str(lowercase ).startswith("""500""" ) or str(lowercase ).startswith("""502""" ):
pytest.xfail(str(lowercase ) )
raise err
return decorator.decorator(_wrapper ,lowercase )
class __lowercase :
"""simple docstring"""
def __init__( self , A , A , A ) -> Optional[int]:
snake_case : List[Any] = returncode
snake_case : Tuple = stdout
snake_case : Dict = stderr
async def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> List[Any]:
while True:
snake_case : List[str] = await stream.readline()
if line:
callback(lowercase )
else:
break
async def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase=None ,lowercase=None ,lowercase=None ,lowercase=False ,lowercase=False ) -> _RunOutput:
if echo:
print("""\nRunning: """ ,""" """.join(lowercase ) )
snake_case : Tuple = await asyncio.create_subprocess_exec(
cmd[0] ,*cmd[1:] ,stdin=lowercase ,stdout=asyncio.subprocess.PIPE ,stderr=asyncio.subprocess.PIPE ,env=lowercase ,)
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
snake_case : str = []
snake_case : List[str] = []
def tee(lowercase ,lowercase ,lowercase ,lowercase="" ):
snake_case : List[str] = line.decode("""utf-8""" ).rstrip()
sink.append(lowercase )
if not quiet:
print(lowercase ,lowercase ,file=lowercase )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout ,lambda lowercase : tee(lowercase ,lowercase ,sys.stdout ,label="""stdout:""" ) ),
_read_stream(p.stderr ,lambda lowercase : tee(lowercase ,lowercase ,sys.stderr ,label="""stderr:""" ) ),
] ,timeout=lowercase ,)
return _RunOutput(await p.wait() ,lowercase ,lowercase )
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase=None ,lowercase=None ,lowercase=180 ,lowercase=False ,lowercase=True ) -> _RunOutput:
snake_case : Dict = asyncio.get_event_loop()
snake_case : Optional[int] = loop.run_until_complete(
_stream_subprocess(lowercase ,env=lowercase ,stdin=lowercase ,timeout=lowercase ,quiet=lowercase ,echo=lowercase ) )
snake_case : Any = """ """.join(lowercase )
if result.returncode > 0:
snake_case : List[str] = """\n""".join(result.stderr )
raise RuntimeError(
f"""'{cmd_str}' failed with returncode {result.returncode}\n\n"""
f"""The combined stderr from workers follows:\n{stderr}""" )
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(f"""'{cmd_str}' produced no output.""" )
return result
def SCREAMING_SNAKE_CASE__ ( ) -> Tuple:
snake_case : Optional[Any] = os.environ.get("""PYTEST_XDIST_WORKER""" ,"""gw0""" )
snake_case : Tuple = re.sub(R"""^gw""" ,"""""" ,lowercase ,0 ,re.M )
return int(lowercase )
def SCREAMING_SNAKE_CASE__ ( ) -> Dict:
snake_case : Dict = 29500
snake_case : int = pytest_xdist_worker_id()
return port + uniq_delta
| 710
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
lowerCamelCase : Dict = {
'MIT/ast-finetuned-audioset-10-10-0.4593': (
'https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json'
),
}
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = """audio-spectrogram-transformer"""
def __init__( self , A=7_6_8 , A=1_2 , A=1_2 , A=3_0_7_2 , A="gelu" , A=0.0 , A=0.0 , A=0.02 , A=1e-1_2 , A=1_6 , A=True , A=1_0 , A=1_0 , A=1_0_2_4 , A=1_2_8 , **A , ) -> int:
super().__init__(**A )
snake_case : Any = hidden_size
snake_case : Tuple = num_hidden_layers
snake_case : Any = num_attention_heads
snake_case : Dict = intermediate_size
snake_case : int = hidden_act
snake_case : int = hidden_dropout_prob
snake_case : Tuple = attention_probs_dropout_prob
snake_case : int = initializer_range
snake_case : int = layer_norm_eps
snake_case : Any = patch_size
snake_case : List[Any] = qkv_bias
snake_case : int = frequency_stride
snake_case : Any = time_stride
snake_case : Union[str, Any] = max_length
snake_case : Any = num_mel_bins
| 684
| 0
|
"""simple docstring"""
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XGLMTokenizer, XGLMTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
lowerCamelCase_ = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
@require_tokenizers
class UpperCamelCase_ (__A , unittest.TestCase ):
__magic_name__ = XGLMTokenizer
__magic_name__ = XGLMTokenizerFast
__magic_name__ = True
__magic_name__ = True
def _SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]:
super().setUp()
# We have a SentencePiece fixture for testing
UpperCAmelCase_ : Tuple = XGLMTokenizer(lowerCAmelCase_ , keep_accents=lowerCAmelCase_ )
tokenizer.save_pretrained(self.tmpdirname )
def _SCREAMING_SNAKE_CASE ( self : str ) -> Optional[Any]:
UpperCAmelCase_ : Optional[int] = "<pad>"
UpperCAmelCase_ : int = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase_ ) , lowerCAmelCase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase_ ) , lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Union[str, Any]:
UpperCAmelCase_ : Dict = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<s>" )
self.assertEqual(vocab_keys[1] , "<pad>" )
self.assertEqual(len(lowerCAmelCase_ ) , 1_008 )
def _SCREAMING_SNAKE_CASE ( self : str ) -> List[Any]:
self.assertEqual(self.get_tokenizer().vocab_size , 1_008 )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[str]:
UpperCAmelCase_ : Tuple = XGLMTokenizer(lowerCAmelCase_ , keep_accents=lowerCAmelCase_ )
UpperCAmelCase_ : Any = tokenizer.tokenize("This is a test" )
self.assertListEqual(lowerCAmelCase_ , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCAmelCase_ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
UpperCAmelCase_ : List[Any] = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
lowerCAmelCase_ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
UpperCAmelCase_ : Optional[int] = tokenizer.convert_tokens_to_ids(lowerCAmelCase_ )
self.assertListEqual(
lowerCAmelCase_ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
UpperCAmelCase_ : str = tokenizer.convert_ids_to_tokens(lowerCAmelCase_ )
self.assertListEqual(
lowerCAmelCase_ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
@cached_property
def _SCREAMING_SNAKE_CASE ( self : str ) -> Tuple:
return XGLMTokenizer.from_pretrained("facebook/xglm-564M" )
def _SCREAMING_SNAKE_CASE ( self : Any ) -> int:
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(lowerCAmelCase_ , f.name )
UpperCAmelCase_ : Optional[Any] = XGLMTokenizer(f.name , keep_accents=lowerCAmelCase_ )
UpperCAmelCase_ : Tuple = pickle.dumps(lowerCAmelCase_ )
pickle.loads(lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Tuple:
if not self.test_rust_tokenizer:
return
UpperCAmelCase_ : List[str] = self.get_tokenizer()
UpperCAmelCase_ : Tuple = self.get_rust_tokenizer()
UpperCAmelCase_ : int = "I was born in 92000, and this is falsé."
UpperCAmelCase_ : Optional[int] = tokenizer.tokenize(lowerCAmelCase_ )
UpperCAmelCase_ : int = rust_tokenizer.tokenize(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase_ : Any = tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
UpperCAmelCase_ : Union[str, Any] = rust_tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase_ : Dict = self.get_rust_tokenizer()
UpperCAmelCase_ : List[str] = tokenizer.encode(lowerCAmelCase_ )
UpperCAmelCase_ : int = rust_tokenizer.encode(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
@slow
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Dict:
UpperCAmelCase_ : Tuple = "Hello World!"
UpperCAmelCase_ : Tuple = [2, 31_227, 4_447, 35]
self.assertListEqual(lowerCAmelCase_ , self.big_tokenizer.encode(lowerCAmelCase_ ) )
@slow
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Tuple:
UpperCAmelCase_ : List[str] = (
"This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"
" add words that should not exsist and be tokenized to unk, such as saoneuhaoesuth"
)
# fmt: off
UpperCAmelCase_ : Dict = [2, 1_018, 67, 11, 1_988, 2_617, 5_631, 278, 11, 3_407, 48, 71_630, 28_085, 4, 3_234, 157, 13, 6, 5, 6, 4, 3_526, 768, 15, 659, 57, 298, 3_983, 864, 129, 21, 6, 5, 13_675, 377, 652, 7_580, 10_341, 155, 2_817, 422, 1_666, 7, 1_674, 53, 113, 202_277, 17_892, 33, 60, 87, 4, 3_234, 157, 61, 2_667, 52_376, 19, 88, 23, 735]
# fmt: on
self.assertListEqual(lowerCAmelCase_ , self.big_tokenizer.encode(lowerCAmelCase_ ) )
@slow
def _SCREAMING_SNAKE_CASE ( self : Any ) -> Any:
# fmt: off
UpperCAmelCase_ : Union[str, Any] = {
"input_ids": [[2, 108_825, 1_163, 15, 88_010, 473, 15_898, 157, 13_672, 1_857, 312, 8, 238_021, 1_163, 53, 13_672, 1_857, 312, 8, 53_283, 182_396, 8, 18_566, 16, 36_733, 4_101, 8, 230, 244_017, 122_553, 7, 15, 132_597, 4, 293, 12_511, 7_610, 4, 3_414, 132_597, 9, 4, 32_361, 362, 4, 734, 28_512, 32_569, 18, 4, 32_361, 26_096, 14_982, 73, 18_715, 21_433, 235_261, 15, 492, 12_427, 16, 53, 18_715, 21_433, 65_454, 15, 23_659, 563, 16, 278, 597, 2_843, 595, 7_931, 182_396, 64_186, 22, 886, 595, 132_981, 53, 25_540, 3_449, 43_982, 39_901, 5_951, 878, 330, 4, 27_694, 80_269, 312, 53, 6_517, 11_780, 611, 20_408, 5], [2, 6, 132_597, 67, 42_897, 33, 592, 8, 163_729, 25_540, 361, 136_997, 109_514, 173_230, 7, 501, 60, 102_913, 196, 5_631, 235, 63_243, 473, 6, 231_757, 74, 5_277, 7_905, 53, 3_095, 37_317, 22, 454, 183_874, 5], [2, 268, 31_298, 46_530, 6, 132_935, 43_831, 7, 597, 32, 24, 3_688, 9_865, 5]],
"attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]
} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCAmelCase_ , model_name="facebook/xglm-564M" , padding=lowerCAmelCase_ , )
| 95
|
from __future__ import annotations
import os
import tempfile
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import is_tensorflow_text_available, is_tf_available
from transformers.testing_utils import require_tensorflow_text, require_tf, slow
from ..test_modeling_tf_common import floats_tensor
from .test_framework_agnostic import GenerationIntegrationTestsMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
AutoTokenizer,
TFAutoModelForCausalLM,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSpeechSeqaSeq,
TFAutoModelForVisionaSeq,
TFBartForConditionalGeneration,
TFLogitsProcessorList,
TFMinLengthLogitsProcessor,
tf_top_k_top_p_filtering,
)
if is_tensorflow_text_available():
import tensorflow_text as text
@require_tf
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def snake_case__ ( self ) -> List[str]:
"""simple docstring"""
lowercase_ : str = tf.convert_to_tensor(
[
[
8.2220991, # 3rd highest value; idx. 0
-0.5620044,
5.23229752,
4.0386393,
-6.8798378,
-0.54785802,
-3.2012153,
2.92777176,
1.88171953,
7.35341276, # 5th highest value; idx. 9
8.43207833, # 2nd highest value; idx. 10
-9.85711836,
-5.96209236,
-1.13039161,
-7.1115294,
-0.8369633,
-5.3186408,
7.06427407,
0.81369344,
-0.82023817,
-5.9179796,
0.58813443,
-6.99778438,
4.71551189,
-0.18771637,
7.44020759, # 4th highest value; idx. 25
9.38450987, # 1st highest value; idx. 26
2.12662941,
-9.32562038,
2.35652522,
], # cummulative prob of 5 highest values <= 0.6
[
0.58425518,
4.53139238,
-5.57510464,
-6.28030699,
-7.19529503,
-4.02122551,
1.39337037,
-6.06707057,
1.59480517,
-9.643119,
0.03907799,
0.67231762,
-8.88206726,
6.27115922, # 4th highest value; idx. 13
2.28520723,
4.82767506,
4.30421368,
8.8275313, # 2nd highest value; idx. 17
5.44029958, # 5th highest value; idx. 18
-4.4735794,
7.38579536, # 3rd highest value; idx. 20
-2.91051663,
2.61946077,
-2.5674762,
-9.48959302,
-4.02922645,
-1.35416918,
9.67702323, # 1st highest value; idx. 27
-5.89478553,
1.85370467,
], # cummulative prob of 5 highest values <= 0.6
], dtype=tf.floataa, )
lowercase_ : int = tf.convert_to_tensor(
[[0, 0], [0, 9], [0, 10], [0, 25], [0, 26], [1, 13], [1, 17], [1, 18], [1, 20], [1, 27]], dtype=tf.intaa, ) # expected non filtered idx as noted above
lowercase_ : List[Any] = tf.convert_to_tensor(
[8.222099, 7.3534126, 8.432078, 7.4402075, 9.38451, 6.271159, 8.827531, 5.4402995, 7.3857956, 9.677023], dtype=tf.floataa, ) # expected non filtered values as noted above
lowercase_ : int = tf_top_k_top_p_filtering(snake_case__, top_k=10, top_p=0.6, min_tokens_to_keep=4 )
lowercase_ : Dict = output[output != -float("""inf""" )]
lowercase_ : Tuple = tf.cast(
tf.where(tf.not_equal(snake_case__, tf.constant(-float("""inf""" ), dtype=tf.floataa ) ) ), dtype=tf.intaa, )
tf.debugging.assert_near(snake_case__, snake_case__, rtol=1E-12 )
tf.debugging.assert_equal(snake_case__, snake_case__ )
@require_tf
class UpperCamelCase__ ( unittest.TestCase , lowerCamelCase__ ):
'''simple docstring'''
if is_tf_available():
__a : Optional[int] = {
"""AutoModelForCausalLM""": TFAutoModelForCausalLM,
"""AutoModelForSpeechSeq2Seq""": TFAutoModelForSpeechSeqaSeq,
"""AutoModelForSeq2SeqLM""": TFAutoModelForSeqaSeqLM,
"""AutoModelForVision2Seq""": TFAutoModelForVisionaSeq,
"""LogitsProcessorList""": TFLogitsProcessorList,
"""MinLengthLogitsProcessor""": TFMinLengthLogitsProcessor,
"""create_tensor_fn""": tf.convert_to_tensor,
"""floats_tensor""": floats_tensor,
"""return_tensors""": """tf""",
}
@slow
def snake_case__ ( self ) -> str:
"""simple docstring"""
# TF-only test: tf.saved_model export
lowercase_ : Optional[Any] = TFAutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
lowercase_ : int = 2
lowercase_ : Optional[Any] = 2
class UpperCamelCase__ ( tf.Module ):
'''simple docstring'''
def __init__( self, snake_case__ ) -> Any:
"""simple docstring"""
super(snake_case__, self ).__init__()
lowercase_ : int = model
@tf.function(
input_signature=(
tf.TensorSpec((None, input_length), tf.intaa, name="""input_ids""" ),
tf.TensorSpec((None, input_length), tf.intaa, name="""attention_mask""" ),
), jit_compile=snake_case__, )
def snake_case__ ( self, snake_case__, snake_case__ ) -> int:
"""simple docstring"""
lowercase_ : Tuple = self.model.generate(
input_ids=snake_case__, attention_mask=snake_case__, max_new_tokens=snake_case__, return_dict_in_generate=snake_case__, )
return {"sequences": outputs["sequences"]}
lowercase_ : Union[str, Any] = [[2, 0], [1_02, 1_03]]
lowercase_ : Tuple = [[1, 0], [1, 1]]
lowercase_ : int = DummyModel(model=snake_case__ )
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(snake_case__, snake_case__, signatures={"""serving_default""": dummy_model.serving} )
lowercase_ : List[Any] = tf.saved_model.load(snake_case__ ).signatures["""serving_default"""]
for batch_size in range(1, len(snake_case__ ) + 1 ):
lowercase_ : str = {
"""input_ids""": tf.constant(dummy_input_ids[:batch_size] ),
"""attention_mask""": tf.constant(dummy_attention_masks[:batch_size] ),
}
lowercase_ : Optional[int] = serving_func(**snake_case__ )["""sequences"""]
lowercase_ : Optional[int] = test_model.generate(**snake_case__, max_new_tokens=snake_case__ )
tf.debugging.assert_equal(snake_case__, snake_case__ )
@slow
def snake_case__ ( self ) -> Tuple:
"""simple docstring"""
# TF-only test: tf.saved_model export
lowercase_ : int = TFAutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
lowercase_ : Tuple = 1
lowercase_ : Tuple = 2
class UpperCamelCase__ ( tf.Module ):
'''simple docstring'''
def __init__( self, snake_case__ ) -> List[str]:
"""simple docstring"""
super(snake_case__, self ).__init__()
lowercase_ : Union[str, Any] = model
@tf.function(
input_signature=(
tf.TensorSpec((batch_size, None), tf.intaa, name="""input_ids""" ),
tf.TensorSpec((batch_size, None), tf.intaa, name="""attention_mask""" ),
), jit_compile=snake_case__, )
def snake_case__ ( self, snake_case__, snake_case__ ) -> Optional[Any]:
"""simple docstring"""
lowercase_ : str = self.model.generate(
input_ids=snake_case__, attention_mask=snake_case__, max_new_tokens=snake_case__, return_dict_in_generate=snake_case__, )
return {"sequences": outputs["sequences"]}
lowercase_ : Union[str, Any] = [[2], [1_02, 1_03]]
lowercase_ : List[str] = [[1], [1, 1]]
lowercase_ : Union[str, Any] = DummyModel(model=snake_case__ )
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(snake_case__, snake_case__, signatures={"""serving_default""": dummy_model.serving} )
lowercase_ : Optional[Any] = tf.saved_model.load(snake_case__ ).signatures["""serving_default"""]
for input_row in range(len(snake_case__ ) ):
lowercase_ : List[str] = {
"""input_ids""": tf.constant([dummy_input_ids[input_row]] ),
"""attention_mask""": tf.constant([dummy_attention_masks[input_row]] ),
}
lowercase_ : Union[str, Any] = serving_func(**snake_case__ )["""sequences"""]
lowercase_ : Optional[Any] = test_model.generate(**snake_case__, max_new_tokens=snake_case__ )
tf.debugging.assert_equal(snake_case__, snake_case__ )
@slow
@require_tensorflow_text
def snake_case__ ( self ) -> Any:
"""simple docstring"""
# TF-only test: tf.saved_model export
with tempfile.TemporaryDirectory() as tmp_dir:
# file needed to load the TF tokenizer
hf_hub_download(repo_id="""google/flan-t5-small""", filename="""spiece.model""", local_dir=snake_case__ )
class UpperCamelCase__ ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self ) -> Optional[Any]:
"""simple docstring"""
super().__init__()
lowercase_ : Tuple = text.SentencepieceTokenizer(
model=tf.io.gfile.GFile(os.path.join(snake_case__, """spiece.model""" ), """rb""" ).read() )
lowercase_ : Dict = TFAutoModelForSeqaSeqLM.from_pretrained("""hf-internal-testing/tiny-random-t5""" )
def snake_case__ ( self, snake_case__, *snake_case__, **snake_case__ ) -> Optional[Any]:
"""simple docstring"""
lowercase_ : Union[str, Any] = self.tokenizer.tokenize(snake_case__ )
lowercase_ , lowercase_ : Union[str, Any] = text.pad_model_inputs(
snake_case__, max_seq_length=64, pad_value=self.model.config.pad_token_id )
lowercase_ : str = self.model.generate(input_ids=snake_case__, attention_mask=snake_case__ )
return self.tokenizer.detokenize(snake_case__ )
lowercase_ : Optional[int] = CompleteSentenceTransformer()
lowercase_ : Union[str, Any] = tf.keras.layers.Input(shape=(1,), dtype=tf.string, name="""inputs""" )
lowercase_ : Any = complete_model(snake_case__ )
lowercase_ : Optional[Any] = tf.keras.Model(snake_case__, snake_case__ )
keras_model.save(snake_case__ )
def snake_case__ ( self ) -> Optional[int]:
"""simple docstring"""
# Has PT equivalent: this test relies on random sampling
lowercase_ : Any = {
"""do_sample""": True,
"""num_beams""": 1,
"""top_p""": 0.7,
"""top_k""": 10,
"""temperature""": 0.7,
}
lowercase_ : List[str] = 14
lowercase_ : Optional[Any] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
lowercase_ : Dict = """Hello, my dog is cute and"""
lowercase_ : List[str] = tokenizer(snake_case__, return_tensors="""tf""" )
lowercase_ : Tuple = TFAutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
lowercase_ : int = 6_38
# forces the generation to happen on CPU, to avoid GPU-related quirks
with tf.device(""":/CPU:0""" ):
tf.random.set_seed(0 )
lowercase_ : Optional[int] = model.generate(**snake_case__, eos_token_id=snake_case__, **snake_case__ )
self.assertTrue(expectation == len(generated_tokens[0] ) )
lowercase_ : int = [6_38, 1_98]
with tf.device(""":/CPU:0""" ):
tf.random.set_seed(0 )
lowercase_ : Optional[int] = model.generate(**snake_case__, eos_token_id=snake_case__, **snake_case__ )
self.assertTrue(expectation == len(generated_tokens[0] ) )
def snake_case__ ( self ) -> str:
"""simple docstring"""
# Has PT equivalent: ample use of framework-specific code
lowercase_ : List[Any] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-bart""" )
lowercase_ : int = """Hugging Face is a technology company based in New York and Paris."""
lowercase_ : int = bart_tokenizer(snake_case__, return_tensors="""tf""" ).input_ids
lowercase_ : Tuple = TFBartForConditionalGeneration.from_pretrained("""hf-internal-testing/tiny-random-bart""" )
lowercase_ : Union[str, Any] = bart_model.generate(snake_case__ ).numpy()
class UpperCamelCase__ ( lowerCamelCase__ ):
'''simple docstring'''
def snake_case__ ( self, snake_case__, snake_case__=None, **snake_case__ ) -> str:
"""simple docstring"""
return super().call(snake_case__, **snake_case__ )
lowercase_ : Tuple = FakeBart.from_pretrained("""hf-internal-testing/tiny-random-bart""" )
lowercase_ : str = bart_model.generate(snake_case__, foo="""bar""" ).numpy()
self.assertTrue(np.array_equal(snake_case__, snake_case__ ) )
class UpperCamelCase__ ( bart_model.model.encoder.__class__ ):
'''simple docstring'''
def snake_case__ ( self, snake_case__, **snake_case__ ) -> List[str]:
"""simple docstring"""
return super().call(snake_case__, **snake_case__ )
lowercase_ : Optional[int] = FakeEncoder(bart_model.config, bart_model.model.shared )
lowercase_ : Any = fake_encoder
# Normal generation still works (the output will be different because the encoder weights are different)
lowercase_ : Any = bart_model.generate(snake_case__ ).numpy()
with self.assertRaises(snake_case__ ):
# FakeEncoder.call() accepts **kwargs -> no filtering -> value error due to unexpected input "foo"
bart_model.generate(snake_case__, foo="""bar""" )
| 458
| 0
|
'''simple docstring'''
import os
import time
import pytest
from datasets.utils.filelock import FileLock, Timeout
def _UpperCAmelCase ( a : List[Any] ):
snake_case__ = FileLock(str(tmpdir / """foo.lock""" ) )
snake_case__ = FileLock(str(tmpdir / """foo.lock""" ) )
snake_case__ = 0.01
with locka.acquire():
with pytest.raises(a ):
snake_case__ = time.time()
locka.acquire(a )
assert time.time() - _start > timeout
def _UpperCAmelCase ( a : str ):
snake_case__ = """a""" * 1000 + """.lock"""
snake_case__ = FileLock(str(tmpdir / filename ) )
assert locka._lock_file.endswith(""".lock""" )
assert not locka._lock_file.endswith(a )
assert len(os.path.basename(locka._lock_file ) ) <= 255
snake_case__ = FileLock(tmpdir / filename )
with locka.acquire():
with pytest.raises(a ):
locka.acquire(0 )
| 701
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a__ = {
"""configuration_lilt""": ["""LILT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LiltConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ = [
"""LILT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""LiltForQuestionAnswering""",
"""LiltForSequenceClassification""",
"""LiltForTokenClassification""",
"""LiltModel""",
"""LiltPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_lilt import LILT_PRETRAINED_CONFIG_ARCHIVE_MAP, LiltConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lilt import (
LILT_PRETRAINED_MODEL_ARCHIVE_LIST,
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
LiltPreTrainedModel,
)
else:
import sys
a__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 99
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.