code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
import enum
import warnings
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
lowercase__ =logging.get_logger(__name__)
class UpperCamelCase__ ( enum.Enum ):
_SCREAMING_SNAKE_CASE : List[str] = 0
_SCREAMING_SNAKE_CASE : Optional[int] = 1
@add_end_docstrings(__lowercase )
class UpperCamelCase__ ( __lowercase ):
_SCREAMING_SNAKE_CASE : int = "generated"
def __init__(self : Dict , *snake_case_ : Optional[Any] , **snake_case_ : List[Any] ):
super().__init__(*snake_case_ , **snake_case_ )
self.check_model_type(
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if self.framework == '''tf'''
else MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING )
def lowerCAmelCase (self : Any , snake_case_ : Optional[Any]=None , snake_case_ : Dict=None , snake_case_ : List[Any]=None , snake_case_ : Dict=None , snake_case_ : Any=None , snake_case_ : int=None , **snake_case_ : List[str] , ):
__a : Dict = {}
if truncation is not None:
__a : str = truncation
__a : Tuple = generate_kwargs
__a : Optional[int] = {}
if return_tensors is not None and return_type is None:
__a : int = ReturnType.TENSORS if return_tensors else ReturnType.TEXT
if return_type is not None:
__a : str = return_type
if clean_up_tokenization_spaces is not None:
__a : Union[str, Any] = clean_up_tokenization_spaces
if stop_sequence is not None:
__a : Dict = self.tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ )
if len(snake_case_ ) > 1:
warnings.warn(
'''Stopping on a multiple token sequence is not yet supported on transformers. The first token of'''
''' the stop sequence will be used as the stop sequence string in the interim.''' )
__a : Tuple = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def lowerCAmelCase (self : List[Any] , snake_case_ : int , snake_case_ : int , snake_case_ : int ):
return True
def lowerCAmelCase (self : List[Any] , *snake_case_ : str , snake_case_ : Dict ):
__a : Tuple = self.model.config.prefix if self.model.config.prefix is not None else ''''''
if isinstance(args[0] , snake_case_ ):
if self.tokenizer.pad_token_id is None:
raise ValueError('''Please make sure that the tokenizer has a pad_token_id when using a batch input''' )
__a : List[str] = ([prefix + arg for arg in args[0]],)
__a : List[Any] = True
elif isinstance(args[0] , snake_case_ ):
__a : str = (prefix + args[0],)
__a : int = False
else:
raise ValueError(
f" `args[0]`: {args[0]} have the wrong format. The should be either of type `str` or type `list`" )
__a : Any = self.tokenizer(*snake_case_ , padding=snake_case_ , truncation=snake_case_ , return_tensors=self.framework )
# This is produced by tokenizers but is an invalid generate kwargs
if "token_type_ids" in inputs:
del inputs["token_type_ids"]
return inputs
def __call__(self : int , *snake_case_ : Optional[int] , **snake_case_ : List[str] ):
__a : str = super().__call__(*snake_case_ , **snake_case_ )
if (
isinstance(args[0] , snake_case_ )
and all(isinstance(snake_case_ , snake_case_ ) for el in args[0] )
and all(len(snake_case_ ) == 1 for res in result )
):
return [res[0] for res in result]
return result
def lowerCAmelCase (self : Optional[Any] , snake_case_ : List[Any] , snake_case_ : Optional[Any]=TruncationStrategy.DO_NOT_TRUNCATE , **snake_case_ : int ):
__a : Optional[int] = self._parse_and_tokenize(snake_case_ , truncation=snake_case_ , **snake_case_ )
return inputs
def lowerCAmelCase (self : Any , snake_case_ : List[str] , **snake_case_ : Union[str, Any] ):
if self.framework == "pt":
__a , __a : List[str] = model_inputs['''input_ids'''].shape
elif self.framework == "tf":
__a , __a : List[Any] = tf.shape(model_inputs['''input_ids'''] ).numpy()
__a : Optional[Any] = generate_kwargs.get('''min_length''' , self.model.config.min_length )
__a : Union[str, Any] = generate_kwargs.get('''max_length''' , self.model.config.max_length )
self.check_inputs(snake_case_ , generate_kwargs['''min_length'''] , generate_kwargs['''max_length'''] )
__a : str = self.model.generate(**snake_case_ , **snake_case_ )
__a : Union[str, Any] = output_ids.shape[0]
if self.framework == "pt":
__a : Optional[Any] = output_ids.reshape(snake_case_ , out_b // in_b , *output_ids.shape[1:] )
elif self.framework == "tf":
__a : int = tf.reshape(snake_case_ , (in_b, out_b // in_b, *output_ids.shape[1:]) )
return {"output_ids": output_ids}
def lowerCAmelCase (self : Dict , snake_case_ : Dict , snake_case_ : List[str]=ReturnType.TEXT , snake_case_ : str=False ):
__a : Optional[int] = []
for output_ids in model_outputs["output_ids"][0]:
if return_type == ReturnType.TENSORS:
__a : Optional[Any] = {f"{self.return_name}_token_ids": output_ids}
elif return_type == ReturnType.TEXT:
__a : str = {
f"{self.return_name}_text": self.tokenizer.decode(
snake_case_ , skip_special_tokens=snake_case_ , clean_up_tokenization_spaces=snake_case_ , )
}
records.append(snake_case_ )
return records
@add_end_docstrings(__lowercase )
class UpperCamelCase__ ( __lowercase ):
_SCREAMING_SNAKE_CASE : Tuple = "summary"
def __call__(self : Optional[Any] , *snake_case_ : Optional[int] , **snake_case_ : List[str] ):
return super().__call__(*snake_case_ , **snake_case_ )
def lowerCAmelCase (self : Union[str, Any] , snake_case_ : int , snake_case_ : int , snake_case_ : int ):
if max_length < min_length:
logger.warning(f"Your min_length={min_length} must be inferior than your max_length={max_length}." )
if input_length < max_length:
logger.warning(
f"Your max_length is set to {max_length}, but your input_length is only {input_length}. Since this is "
'''a summarization task, where outputs shorter than the input are typically wanted, you might '''
f"consider decreasing max_length manually, e.g. summarizer('...', max_length={input_length//2})" )
@add_end_docstrings(__lowercase )
class UpperCamelCase__ ( __lowercase ):
_SCREAMING_SNAKE_CASE : Dict = "translation"
def lowerCAmelCase (self : Union[str, Any] , snake_case_ : int , snake_case_ : int , snake_case_ : int ):
if input_length > 0.9 * max_length:
logger.warning(
f"Your input_length: {input_length} is bigger than 0.9 * max_length: {max_length}. You might consider "
'''increasing your max_length manually, e.g. translator(\'...\', max_length=400)''' )
return True
def lowerCAmelCase (self : Any , *snake_case_ : int , snake_case_ : Optional[int]=TruncationStrategy.DO_NOT_TRUNCATE , snake_case_ : Any=None , snake_case_ : Tuple=None ):
if getattr(self.tokenizer , '''_build_translation_inputs''' , snake_case_ ):
return self.tokenizer._build_translation_inputs(
*snake_case_ , return_tensors=self.framework , truncation=snake_case_ , src_lang=snake_case_ , tgt_lang=snake_case_ )
else:
return super()._parse_and_tokenize(*snake_case_ , truncation=snake_case_ )
def lowerCAmelCase (self : Optional[int] , snake_case_ : int=None , snake_case_ : str=None , **snake_case_ : Optional[Any] ):
__a , __a , __a : str = super()._sanitize_parameters(**snake_case_ )
if src_lang is not None:
__a : Optional[int] = src_lang
if tgt_lang is not None:
__a : Tuple = tgt_lang
if src_lang is None and tgt_lang is None:
# Backward compatibility, direct arguments use is preferred.
__a : int = kwargs.get('''task''' , self.task )
__a : Union[str, Any] = task.split('''_''' )
if task and len(snake_case_ ) == 4:
# translation, XX, to YY
__a : str = items[1]
__a : str = items[3]
return preprocess_params, forward_params, postprocess_params
def __call__(self : Optional[int] , *snake_case_ : Optional[Any] , **snake_case_ : Any ):
return super().__call__(*snake_case_ , **snake_case_ )
| 521
|
import unittest
from transformers import (
MODEL_FOR_OBJECT_DETECTION_MAPPING,
AutoFeatureExtractor,
AutoModelForObjectDetection,
ObjectDetectionPipeline,
is_vision_available,
pipeline,
)
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_pytesseract,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class UpperCamelCase__ :
@staticmethod
def lowerCAmelCase (*snake_case_ : int , **snake_case_ : List[str] ):
pass
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class UpperCamelCase__ ( unittest.TestCase ):
_SCREAMING_SNAKE_CASE : Any = MODEL_FOR_OBJECT_DETECTION_MAPPING
def lowerCAmelCase (self : Tuple , snake_case_ : Union[str, Any] , snake_case_ : List[Any] , snake_case_ : Dict ):
__a : Union[str, Any] = ObjectDetectionPipeline(model=snake_case_ , image_processor=snake_case_ )
return object_detector, ["./tests/fixtures/tests_samples/COCO/000000039769.png"]
def lowerCAmelCase (self : Tuple , snake_case_ : List[str] , snake_case_ : Any ):
__a : Any = object_detector('''./tests/fixtures/tests_samples/COCO/000000039769.png''' , threshold=0.0 )
self.assertGreater(len(snake_case_ ) , 0 )
for detected_object in outputs:
self.assertEqual(
snake_case_ , {
'''score''': ANY(snake_case_ ),
'''label''': ANY(snake_case_ ),
'''box''': {'''xmin''': ANY(snake_case_ ), '''ymin''': ANY(snake_case_ ), '''xmax''': ANY(snake_case_ ), '''ymax''': ANY(snake_case_ )},
} , )
import datasets
__a : List[Any] = datasets.load_dataset('''hf-internal-testing/fixtures_image_utils''' , '''image''' , split='''test''' )
__a : List[str] = [
Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ),
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
# RGBA
dataset[0]['''file'''],
# LA
dataset[1]['''file'''],
# L
dataset[2]['''file'''],
]
__a : Optional[int] = object_detector(snake_case_ , threshold=0.0 )
self.assertEqual(len(snake_case_ ) , len(snake_case_ ) )
for outputs in batch_outputs:
self.assertGreater(len(snake_case_ ) , 0 )
for detected_object in outputs:
self.assertEqual(
snake_case_ , {
'''score''': ANY(snake_case_ ),
'''label''': ANY(snake_case_ ),
'''box''': {'''xmin''': ANY(snake_case_ ), '''ymin''': ANY(snake_case_ ), '''xmax''': ANY(snake_case_ ), '''ymax''': ANY(snake_case_ )},
} , )
@require_tf
@unittest.skip('''Object detection not implemented in TF''' )
def lowerCAmelCase (self : int ):
pass
@require_torch
def lowerCAmelCase (self : Tuple ):
__a : str = '''hf-internal-testing/tiny-detr-mobilenetsv3'''
__a : int = AutoModelForObjectDetection.from_pretrained(snake_case_ )
__a : int = AutoFeatureExtractor.from_pretrained(snake_case_ )
__a : Union[str, Any] = ObjectDetectionPipeline(model=snake_case_ , feature_extractor=snake_case_ )
__a : List[str] = object_detector('''http://images.cocodataset.org/val2017/000000039769.jpg''' , threshold=0.0 )
self.assertEqual(
nested_simplify(snake_case_ , decimals=4 ) , [
{'''score''': 0.3376, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 1_5_9, '''ymin''': 1_2_0, '''xmax''': 4_8_0, '''ymax''': 3_5_9}},
{'''score''': 0.3376, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 1_5_9, '''ymin''': 1_2_0, '''xmax''': 4_8_0, '''ymax''': 3_5_9}},
] , )
__a : Dict = object_detector(
[
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
] , threshold=0.0 , )
self.assertEqual(
nested_simplify(snake_case_ , decimals=4 ) , [
[
{'''score''': 0.3376, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 1_5_9, '''ymin''': 1_2_0, '''xmax''': 4_8_0, '''ymax''': 3_5_9}},
{'''score''': 0.3376, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 1_5_9, '''ymin''': 1_2_0, '''xmax''': 4_8_0, '''ymax''': 3_5_9}},
],
[
{'''score''': 0.3376, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 1_5_9, '''ymin''': 1_2_0, '''xmax''': 4_8_0, '''ymax''': 3_5_9}},
{'''score''': 0.3376, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 1_5_9, '''ymin''': 1_2_0, '''xmax''': 4_8_0, '''ymax''': 3_5_9}},
],
] , )
@require_torch
@slow
def lowerCAmelCase (self : List[Any] ):
__a : Optional[Any] = '''facebook/detr-resnet-50'''
__a : int = AutoModelForObjectDetection.from_pretrained(snake_case_ )
__a : str = AutoFeatureExtractor.from_pretrained(snake_case_ )
__a : Tuple = ObjectDetectionPipeline(model=snake_case_ , feature_extractor=snake_case_ )
__a : Union[str, Any] = object_detector('''http://images.cocodataset.org/val2017/000000039769.jpg''' )
self.assertEqual(
nested_simplify(snake_case_ , decimals=4 ) , [
{'''score''': 0.9982, '''label''': '''remote''', '''box''': {'''xmin''': 4_0, '''ymin''': 7_0, '''xmax''': 1_7_5, '''ymax''': 1_1_7}},
{'''score''': 0.9960, '''label''': '''remote''', '''box''': {'''xmin''': 3_3_3, '''ymin''': 7_2, '''xmax''': 3_6_8, '''ymax''': 1_8_7}},
{'''score''': 0.9955, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 6_3_9, '''ymax''': 4_7_3}},
{'''score''': 0.9988, '''label''': '''cat''', '''box''': {'''xmin''': 1_3, '''ymin''': 5_2, '''xmax''': 3_1_4, '''ymax''': 4_7_0}},
{'''score''': 0.9987, '''label''': '''cat''', '''box''': {'''xmin''': 3_4_5, '''ymin''': 2_3, '''xmax''': 6_4_0, '''ymax''': 3_6_8}},
] , )
__a : Optional[Any] = object_detector(
[
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
] )
self.assertEqual(
nested_simplify(snake_case_ , decimals=4 ) , [
[
{'''score''': 0.9982, '''label''': '''remote''', '''box''': {'''xmin''': 4_0, '''ymin''': 7_0, '''xmax''': 1_7_5, '''ymax''': 1_1_7}},
{'''score''': 0.9960, '''label''': '''remote''', '''box''': {'''xmin''': 3_3_3, '''ymin''': 7_2, '''xmax''': 3_6_8, '''ymax''': 1_8_7}},
{'''score''': 0.9955, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 6_3_9, '''ymax''': 4_7_3}},
{'''score''': 0.9988, '''label''': '''cat''', '''box''': {'''xmin''': 1_3, '''ymin''': 5_2, '''xmax''': 3_1_4, '''ymax''': 4_7_0}},
{'''score''': 0.9987, '''label''': '''cat''', '''box''': {'''xmin''': 3_4_5, '''ymin''': 2_3, '''xmax''': 6_4_0, '''ymax''': 3_6_8}},
],
[
{'''score''': 0.9982, '''label''': '''remote''', '''box''': {'''xmin''': 4_0, '''ymin''': 7_0, '''xmax''': 1_7_5, '''ymax''': 1_1_7}},
{'''score''': 0.9960, '''label''': '''remote''', '''box''': {'''xmin''': 3_3_3, '''ymin''': 7_2, '''xmax''': 3_6_8, '''ymax''': 1_8_7}},
{'''score''': 0.9955, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 6_3_9, '''ymax''': 4_7_3}},
{'''score''': 0.9988, '''label''': '''cat''', '''box''': {'''xmin''': 1_3, '''ymin''': 5_2, '''xmax''': 3_1_4, '''ymax''': 4_7_0}},
{'''score''': 0.9987, '''label''': '''cat''', '''box''': {'''xmin''': 3_4_5, '''ymin''': 2_3, '''xmax''': 6_4_0, '''ymax''': 3_6_8}},
],
] , )
@require_torch
@slow
def lowerCAmelCase (self : Optional[int] ):
__a : Any = '''facebook/detr-resnet-50'''
__a : Optional[Any] = pipeline('''object-detection''' , model=snake_case_ )
__a : Optional[int] = object_detector('''http://images.cocodataset.org/val2017/000000039769.jpg''' )
self.assertEqual(
nested_simplify(snake_case_ , decimals=4 ) , [
{'''score''': 0.9982, '''label''': '''remote''', '''box''': {'''xmin''': 4_0, '''ymin''': 7_0, '''xmax''': 1_7_5, '''ymax''': 1_1_7}},
{'''score''': 0.9960, '''label''': '''remote''', '''box''': {'''xmin''': 3_3_3, '''ymin''': 7_2, '''xmax''': 3_6_8, '''ymax''': 1_8_7}},
{'''score''': 0.9955, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 6_3_9, '''ymax''': 4_7_3}},
{'''score''': 0.9988, '''label''': '''cat''', '''box''': {'''xmin''': 1_3, '''ymin''': 5_2, '''xmax''': 3_1_4, '''ymax''': 4_7_0}},
{'''score''': 0.9987, '''label''': '''cat''', '''box''': {'''xmin''': 3_4_5, '''ymin''': 2_3, '''xmax''': 6_4_0, '''ymax''': 3_6_8}},
] , )
__a : List[str] = object_detector(
[
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
] )
self.assertEqual(
nested_simplify(snake_case_ , decimals=4 ) , [
[
{'''score''': 0.9982, '''label''': '''remote''', '''box''': {'''xmin''': 4_0, '''ymin''': 7_0, '''xmax''': 1_7_5, '''ymax''': 1_1_7}},
{'''score''': 0.9960, '''label''': '''remote''', '''box''': {'''xmin''': 3_3_3, '''ymin''': 7_2, '''xmax''': 3_6_8, '''ymax''': 1_8_7}},
{'''score''': 0.9955, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 6_3_9, '''ymax''': 4_7_3}},
{'''score''': 0.9988, '''label''': '''cat''', '''box''': {'''xmin''': 1_3, '''ymin''': 5_2, '''xmax''': 3_1_4, '''ymax''': 4_7_0}},
{'''score''': 0.9987, '''label''': '''cat''', '''box''': {'''xmin''': 3_4_5, '''ymin''': 2_3, '''xmax''': 6_4_0, '''ymax''': 3_6_8}},
],
[
{'''score''': 0.9982, '''label''': '''remote''', '''box''': {'''xmin''': 4_0, '''ymin''': 7_0, '''xmax''': 1_7_5, '''ymax''': 1_1_7}},
{'''score''': 0.9960, '''label''': '''remote''', '''box''': {'''xmin''': 3_3_3, '''ymin''': 7_2, '''xmax''': 3_6_8, '''ymax''': 1_8_7}},
{'''score''': 0.9955, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 6_3_9, '''ymax''': 4_7_3}},
{'''score''': 0.9988, '''label''': '''cat''', '''box''': {'''xmin''': 1_3, '''ymin''': 5_2, '''xmax''': 3_1_4, '''ymax''': 4_7_0}},
{'''score''': 0.9987, '''label''': '''cat''', '''box''': {'''xmin''': 3_4_5, '''ymin''': 2_3, '''xmax''': 6_4_0, '''ymax''': 3_6_8}},
],
] , )
@require_torch
@slow
def lowerCAmelCase (self : Union[str, Any] ):
__a : Tuple = 0.9985
__a : Tuple = '''facebook/detr-resnet-50'''
__a : int = pipeline('''object-detection''' , model=snake_case_ )
__a : Optional[Any] = object_detector('''http://images.cocodataset.org/val2017/000000039769.jpg''' , threshold=snake_case_ )
self.assertEqual(
nested_simplify(snake_case_ , decimals=4 ) , [
{'''score''': 0.9988, '''label''': '''cat''', '''box''': {'''xmin''': 1_3, '''ymin''': 5_2, '''xmax''': 3_1_4, '''ymax''': 4_7_0}},
{'''score''': 0.9987, '''label''': '''cat''', '''box''': {'''xmin''': 3_4_5, '''ymin''': 2_3, '''xmax''': 6_4_0, '''ymax''': 3_6_8}},
] , )
@require_torch
@require_pytesseract
@slow
def lowerCAmelCase (self : List[str] ):
__a : Optional[int] = '''Narsil/layoutlmv3-finetuned-funsd'''
__a : Any = 0.9993
__a : Tuple = pipeline('''object-detection''' , model=snake_case_ , threshold=snake_case_ )
__a : Optional[Any] = object_detector(
'''https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png''' )
self.assertEqual(
nested_simplify(snake_case_ , decimals=4 ) , [
{'''score''': 0.9993, '''label''': '''I-ANSWER''', '''box''': {'''xmin''': 2_9_4, '''ymin''': 2_5_4, '''xmax''': 3_4_3, '''ymax''': 2_6_4}},
{'''score''': 0.9993, '''label''': '''I-ANSWER''', '''box''': {'''xmin''': 2_9_4, '''ymin''': 2_5_4, '''xmax''': 3_4_3, '''ymax''': 2_6_4}},
] , )
| 521
| 1
|
"""simple docstring"""
def lowercase (snake_case__ : str ) -> str:
'''simple docstring'''
return " ".join(
"""""".join(word[::-1] ) if len(snake_case__ ) > 4 else word for word in sentence.split() )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(reverse_long_words('Hey wollef sroirraw'))
| 529
|
"""simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.utils import ComputeEnvironment
from .cluster import get_cluster_input
from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401
from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401
from .sagemaker import get_sagemaker_input
a = 'Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine'
def lowercase () -> Optional[Any]:
'''simple docstring'''
lowerCAmelCase = _ask_options(
"""In which compute environment are you running?""" , ["""This machine""", """AWS (Amazon SageMaker)"""] , _convert_compute_environment , )
if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
lowerCAmelCase = get_sagemaker_input()
else:
lowerCAmelCase = get_cluster_input()
return config
def lowercase (snake_case__ : List[str]=None ) -> int:
'''simple docstring'''
if subparsers is not None:
lowerCAmelCase = subparsers.add_parser("""config""" , description=snake_case__ )
else:
lowerCAmelCase = argparse.ArgumentParser("""Accelerate config command""" , description=snake_case__ )
parser.add_argument(
"""--config_file""" , default=snake_case__ , help=(
"""The path to use to store the config file. Will default to a file named default_config.yaml in the cache """
"""location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have """
"""such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed """
"""with 'huggingface'."""
) , )
if subparsers is not None:
parser.set_defaults(func=snake_case__ )
return parser
def lowercase (snake_case__ : str ) -> Optional[Any]:
'''simple docstring'''
lowerCAmelCase = get_user_input()
if args.config_file is not None:
lowerCAmelCase = args.config_file
else:
if not os.path.isdir(snake_case__ ):
os.makedirs(snake_case__ )
lowerCAmelCase = default_yaml_config_file
if config_file.endswith(""".json""" ):
config.to_json_file(snake_case__ )
else:
config.to_yaml_file(snake_case__ )
print(f'''accelerate configuration saved at {config_file}''' )
def lowercase () -> Optional[int]:
'''simple docstring'''
lowerCAmelCase = config_command_parser()
lowerCAmelCase = parser.parse_args()
config_command(snake_case__ )
if __name__ == "__main__":
main()
| 529
| 1
|
import operator as op
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = []
SCREAMING_SNAKE_CASE : Any = lambda lowercase , lowercase : int(x / y ) # noqa: E731 integer division operation
SCREAMING_SNAKE_CASE : str = {
"^": op.pow,
"*": op.mul,
"/": div,
"+": op.add,
"-": op.sub,
} # operators & their respective operation
# print table header
print("Symbol".center(8 ) , "Action".center(12 ) , "Stack" , sep=" | " )
print("-" * (30 + len(lowercase )) )
for x in post_fix:
if x.isdigit(): # if x in digit
stack.append(lowercase ) # append x to stack
# output in tabular format
print(x.rjust(8 ) , ("push(" + x + ")").ljust(12 ) , ",".join(lowercase ) , sep=" | " )
else:
SCREAMING_SNAKE_CASE : Any = stack.pop() # pop stack
# output in tabular format
print("".rjust(8 ) , ("pop(" + b + ")").ljust(12 ) , ",".join(lowercase ) , sep=" | " )
SCREAMING_SNAKE_CASE : int = stack.pop() # pop stack
# output in tabular format
print("".rjust(8 ) , ("pop(" + a + ")").ljust(12 ) , ",".join(lowercase ) , sep=" | " )
stack.append(
str(opr[x](int(lowercase ) , int(lowercase ) ) ) ) # evaluate the 2 values popped from stack & push result to stack
# output in tabular format
print(
x.rjust(8 ) , ("push(" + a + x + b + ")").ljust(12 ) , ",".join(lowercase ) , sep=" | " , )
return int(stack[0] )
if __name__ == "__main__":
snake_case = input("""\n\nEnter a Postfix Equation (space separated) = """).split(""" """)
print("""\n\tResult = """, solve(Postfix))
| 62
|
"""simple docstring"""
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self ):
"""simple docstring"""
snake_case_ :Optional[Any] = 0
snake_case_ :Dict = 0
snake_case_ :Any = {}
def _a ( self , a ):
"""simple docstring"""
if vertex not in self.adjacency:
snake_case_ :str = {}
self.num_vertices += 1
def _a ( self , a , a , a ):
"""simple docstring"""
self.add_vertex(a )
self.add_vertex(a )
if head == tail:
return
snake_case_ :List[Any] = weight
snake_case_ :str = weight
def _a ( self ):
"""simple docstring"""
snake_case_ :Optional[int] = self.get_edges()
for edge in edges:
snake_case_ , snake_case_ , snake_case_ :Any = edge
edges.remove((tail, head, weight) )
for i in range(len(a ) ):
snake_case_ :Dict = list(edges[i] )
edges.sort(key=lambda a : e[2] )
for i in range(len(a ) - 1 ):
if edges[i][2] >= edges[i + 1][2]:
snake_case_ :Tuple = edges[i][2] + 1
for edge in edges:
snake_case_ , snake_case_ , snake_case_ :Any = edge
snake_case_ :Dict = weight
snake_case_ :int = weight
def __str__( self ):
"""simple docstring"""
snake_case_ :List[Any] = ""
for tail in self.adjacency:
for head in self.adjacency[tail]:
snake_case_ :List[Any] = self.adjacency[head][tail]
string += F'''{head} -> {tail} == {weight}\n'''
return string.rstrip("\n" )
def _a ( self ):
"""simple docstring"""
snake_case_ :int = []
for tail in self.adjacency:
for head in self.adjacency[tail]:
output.append((tail, head, self.adjacency[head][tail]) )
return output
def _a ( self ):
"""simple docstring"""
return self.adjacency.keys()
@staticmethod
def _a ( a=None , a=None ):
"""simple docstring"""
snake_case_ :Optional[Any] = Graph()
if vertices is None:
snake_case_ :int = []
if edges is None:
snake_case_ :Any = []
for vertex in vertices:
g.add_vertex(a )
for edge in edges:
g.add_edge(*a )
return g
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self ):
"""simple docstring"""
snake_case_ :Dict = {}
snake_case_ :Optional[int] = {}
def __len__( self ):
"""simple docstring"""
return len(self.parent )
def _a ( self , a ):
"""simple docstring"""
if item in self.parent:
return self.find(a )
snake_case_ :Optional[Any] = item
snake_case_ :str = 0
return item
def _a ( self , a ):
"""simple docstring"""
if item not in self.parent:
return self.make_set(a )
if item != self.parent[item]:
snake_case_ :Optional[int] = self.find(self.parent[item] )
return self.parent[item]
def _a ( self , a , a ):
"""simple docstring"""
snake_case_ :Any = self.find(a )
snake_case_ :str = self.find(a )
if roota == roota:
return roota
if self.rank[roota] > self.rank[roota]:
snake_case_ :Union[str, Any] = roota
return roota
if self.rank[roota] < self.rank[roota]:
snake_case_ :Any = roota
return roota
if self.rank[roota] == self.rank[roota]:
self.rank[roota] += 1
snake_case_ :Union[str, Any] = roota
return roota
return None
@staticmethod
def _a ( a ):
"""simple docstring"""
snake_case_ :Any = graph.num_vertices
snake_case_ :Any = Graph.UnionFind()
snake_case_ :Optional[Any] = []
while num_components > 1:
snake_case_ :List[Any] = {}
for vertex in graph.get_vertices():
snake_case_ :str = -1
snake_case_ :Tuple = graph.get_edges()
for edge in edges:
snake_case_ , snake_case_ , snake_case_ :Optional[int] = edge
edges.remove((tail, head, weight) )
for edge in edges:
snake_case_ , snake_case_ , snake_case_ :Optional[int] = edge
snake_case_ :List[Any] = union_find.find(a )
snake_case_ :Union[str, Any] = union_find.find(a )
if seta != seta:
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
snake_case_ :Optional[int] = [head, tail, weight]
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
snake_case_ :Tuple = [head, tail, weight]
for vertex in cheap_edge:
if cheap_edge[vertex] != -1:
snake_case_ , snake_case_ , snake_case_ :List[Any] = cheap_edge[vertex]
if union_find.find(a ) != union_find.find(a ):
union_find.union(a , a )
mst_edges.append(cheap_edge[vertex] )
snake_case_ :List[Any] = num_components - 1
snake_case_ :Any = Graph.build(edges=a )
return mst
| 584
| 0
|
import argparse
snake_case_ : Any ='''docs/source/_static/js/custom.js'''
def UpperCAmelCase ( lowerCAmelCase__ ):
'''simple docstring'''
with open(lowerCAmelCase__ , encoding="utf-8" , newline="\n" ) as f:
__A = f.readlines()
__A = 0
# First let's put the right version
while not lines[index].startswith("const stableVersion =" ):
index += 1
__A = F"""const stableVersion = \"v{version}\"\n"""
# Then update the dictionary
while not lines[index].startswith("const versionMapping = {" ):
index += 1
# We go until the end
while not lines[index].startswith("}" ):
index += 1
# We add the new version at the end
lines[index - 1] += F""" \"v{version}\": \"v{version}\",\n"""
with open(lowerCAmelCase__ , "w" , encoding="utf-8" , newline="\n" ) as f:
f.writelines(lowerCAmelCase__ )
if __name__ == "__main__":
snake_case_ : Dict =argparse.ArgumentParser()
parser.add_argument('''--version''', help='''Release version.''')
snake_case_ : Union[str, Any] =parser.parse_args()
update_custom_js(args.version)
| 711
|
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import TimesformerConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
TimesformerForVideoClassification,
TimesformerModel,
)
from transformers.models.timesformer.modeling_timesformer import TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class a__ :
def __init__( self , lowercase__ , lowercase__=13 , lowercase__=10 , lowercase__=3 , lowercase__=2 , lowercase__=2 , lowercase__=True , lowercase__=True , lowercase__=32 , lowercase__=5 , lowercase__=4 , lowercase__=37 , lowercase__="gelu" , lowercase__=0.1 , lowercase__=0.1 , lowercase__=10 , lowercase__=0.02 , lowercase__="divided_space_time" , lowercase__=None , ) -> Any:
__A = parent
__A = batch_size
__A = image_size
__A = num_channels
__A = patch_size
__A = num_frames
__A = is_training
__A = use_labels
__A = hidden_size
__A = num_hidden_layers
__A = num_attention_heads
__A = intermediate_size
__A = hidden_act
__A = hidden_dropout_prob
__A = attention_probs_dropout_prob
__A = attention_type
__A = initializer_range
__A = scope
__A = num_labels
# in TimeSformer, the number of spatial tokens equals num_frames * num_patches per frame + 1 CLS token
__A = (image_size // patch_size) ** 2
__A = (num_frames) * self.num_patches_per_frame + 1
def _lowerCamelCase ( self ) -> Any:
__A = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
__A = None
if self.use_labels:
__A = ids_tensor([self.batch_size] , self.num_labels )
__A = self.get_config()
return config, pixel_values, labels
def _lowerCamelCase ( self ) -> List[Any]:
__A = TimesformerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , attention_type=self.attention_type , )
__A = self.num_labels
return config
def _lowerCamelCase ( self , lowercase__ , lowercase__ , lowercase__ ) -> Dict:
__A = TimesformerModel(config=lowercase__ )
model.to(lowercase__ )
model.eval()
__A = model(lowercase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCamelCase ( self , lowercase__ , lowercase__ , lowercase__ ) -> List[str]:
__A = TimesformerForVideoClassification(lowercase__ )
model.to(lowercase__ )
model.eval()
__A = model(lowercase__ )
# verify the logits shape
__A = torch.Size((self.batch_size, self.num_labels) )
self.parent.assertEqual(result.logits.shape , lowercase__ )
def _lowerCamelCase ( self ) -> Union[str, Any]:
__A = self.prepare_config_and_inputs()
__A , __A , __A = config_and_inputs
__A = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class a__ ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
UpperCAmelCase_ : Optional[Any] = (TimesformerModel, TimesformerForVideoClassification) if is_torch_available() else ()
UpperCAmelCase_ : Optional[Any] = (
{'feature-extraction': TimesformerModel, 'video-classification': TimesformerForVideoClassification}
if is_torch_available()
else {}
)
UpperCAmelCase_ : List[str] = False
UpperCAmelCase_ : str = False
UpperCAmelCase_ : str = False
UpperCAmelCase_ : Dict = False
def _lowerCamelCase ( self ) -> int:
__A = TimesformerModelTester(self )
__A = ConfigTester(
self , config_class=lowercase__ , has_text_modality=lowercase__ , hidden_size=37 )
def _lowerCamelCase ( self , lowercase__ , lowercase__ , lowercase__=False ) -> int:
__A = copy.deepcopy(lowercase__ )
if return_labels:
if model_class in get_values(lowercase__ ):
__A = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowercase__ )
return inputs_dict
def _lowerCamelCase ( self ) -> Any:
self.config_tester.run_common_tests()
@unittest.skip(reason="TimeSformer does not use inputs_embeds" )
def _lowerCamelCase ( self ) -> Optional[Any]:
pass
def _lowerCamelCase ( self ) -> int:
__A , __A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__A = model_class(lowercase__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__A = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowercase__ , nn.Linear ) )
def _lowerCamelCase ( self ) -> int:
__A , __A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__A = model_class(lowercase__ )
__A = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__A = [*signature.parameters.keys()]
__A = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowercase__ )
def _lowerCamelCase ( self ) -> List[Any]:
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase__ )
def _lowerCamelCase ( self ) -> Dict:
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_video_classification(*lowercase__ )
@slow
def _lowerCamelCase ( self ) -> Optional[Any]:
for model_name in TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__A = TimesformerModel.from_pretrained(lowercase__ )
self.assertIsNotNone(lowercase__ )
def _lowerCamelCase ( self ) -> str:
if not self.has_attentions:
pass
else:
__A , __A = self.model_tester.prepare_config_and_inputs_for_common()
__A = True
for model_class in self.all_model_classes:
__A = self.model_tester.seq_length
__A = self.model_tester.num_frames
__A = True
__A = False
__A = True
__A = model_class(lowercase__ )
model.to(lowercase__ )
model.eval()
with torch.no_grad():
__A = model(**self._prepare_for_class(lowercase__ , lowercase__ ) )
__A = outputs.attentions
self.assertEqual(len(lowercase__ ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__A = True
__A = model_class(lowercase__ )
model.to(lowercase__ )
model.eval()
with torch.no_grad():
__A = model(**self._prepare_for_class(lowercase__ , lowercase__ ) )
__A = outputs.attentions
self.assertEqual(len(lowercase__ ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
__A = len(lowercase__ )
# Check attention is always last and order is fine
__A = True
__A = True
__A = model_class(lowercase__ )
model.to(lowercase__ )
model.eval()
with torch.no_grad():
__A = model(**self._prepare_for_class(lowercase__ , lowercase__ ) )
self.assertEqual(out_len + 1 , len(lowercase__ ) )
__A = outputs.attentions
self.assertEqual(len(lowercase__ ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
def _lowerCamelCase ( self ) -> Union[str, Any]:
def check_hidden_states_output(lowercase__ , lowercase__ , lowercase__ ):
__A = model_class(lowercase__ )
model.to(lowercase__ )
model.eval()
with torch.no_grad():
__A = model(**self._prepare_for_class(lowercase__ , lowercase__ ) )
__A = outputs.hidden_states
__A = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(lowercase__ ) , lowercase__ )
__A = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
__A , __A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__A = True
check_hidden_states_output(lowercase__ , lowercase__ , lowercase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__A = True
check_hidden_states_output(lowercase__ , lowercase__ , lowercase__ )
def UpperCAmelCase ( ):
'''simple docstring'''
__A = hf_hub_download(
repo_id="hf-internal-testing/spaghetti-video" , filename="eating_spaghetti.npy" , repo_type="dataset" )
__A = np.load(lowerCAmelCase__ )
return list(lowerCAmelCase__ )
@require_torch
@require_vision
class a__ ( unittest.TestCase ):
@cached_property
def _lowerCamelCase ( self ) -> int:
# logits were tested with a different mean and std, so we use the same here
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def _lowerCamelCase ( self ) -> Optional[int]:
__A = TimesformerForVideoClassification.from_pretrained("facebook/timesformer-base-finetuned-k400" ).to(
lowercase__ )
__A = self.default_image_processor
__A = prepare_video()
__A = image_processor(video[:8] , return_tensors="pt" ).to(lowercase__ )
# forward pass
with torch.no_grad():
__A = model(**lowercase__ )
# verify the logits
__A = torch.Size((1, 400) )
self.assertEqual(outputs.logits.shape , lowercase__ )
__A = torch.tensor([-0.3016, -0.7713, -0.4205] ).to(lowercase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowercase__ , atol=1e-4 ) )
| 205
| 0
|
'''simple docstring'''
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=__A )
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
UpperCamelCase_ = field(default='''summarization''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
UpperCamelCase_ = Features({'''text''': Value('''string''' )} )
UpperCamelCase_ = Features({'''summary''': Value('''string''' )} )
UpperCamelCase_ = "text"
UpperCamelCase_ = "summary"
@property
def A__ ( self : int ) -> Dict[str, str]:
'''simple docstring'''
return {self.text_column: "text", self.summary_column: "summary"}
| 94
|
"""simple docstring"""
import argparse
import json
import os
from collections import OrderedDict
import numpy as np
import tensorflow as tf
import torch
def __snake_case ( __A ) -> Any:
lowercase : List[str] = os.path.join(args.tf_model_dir ,"""parameters.json""" )
lowercase : Any = json.loads(open(__A ).read() )
if not params:
raise ValueError(
F'''It seems that the json file at {parameter_file} is empty. Make sure you have a correct json file.''' )
if not args.output.endswith(""".pt""" ):
lowercase : List[str] = args.output + """.pt"""
lowercase : Dict = OrderedDict()
with tf.device("""/CPU:0""" ):
lowercase : Any = tf.train.load_checkpoint(args.tf_model_dir )
lowercase : List[Any] = reader.get_variable_to_shape_map()
for key_name in shapes.keys():
lowercase : Optional[Any] = reader.get_tensor(__A ).astype(np.floataa )
if key_name.endswith("""/adam_m""" ) or key_name.endswith("""/adam_v""" ):
continue
if key_name.startswith("""pasts/""" ):
if key_name.startswith("""pasts/mlp""" ):
lowercase : str = int(key_name[9] )
elif key_name.startswith("""pasts/out""" ):
lowercase : List[Any] = 8
lowercase : Optional[int] = """model.sqout.%d.weight""" % (player * 2) # enter to nn.Sequencial with Tanh, so 2 at a time
lowercase : int = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowercase : int = torch.tensor(__A )
elif key_name.startswith("""model/moe""" ):
lowercase : Optional[int] = int(key_name[9:].split("""/""" )[0] )
if key_name.endswith("""/switch_gating/kernel""" ):
lowercase : Any = """model.blocks.%d.feed_forward.mlp.router.classifier.weight""" % player
lowercase : List[Any] = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowercase : Tuple = torch.tensor(__A )
elif key_name.endswith("""/softmlp/kernel""" ):
lowercase : str = """model.blocks.%d.feed_forward.soft_bypass_mlp.weight""" % player
lowercase : Optional[int] = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowercase : Optional[int] = torch.tensor(__A )
elif key_name.endswith("""/wo/kernel""" ) or key_name.endswith("""/wi/kernel""" ):
lowercase : Union[str, Any] = key_name[-9:-7]
for i in range(16 ):
lowercase : int = """model.blocks.%d.feed_forward.mlp.experts.expert_%d.%s.weight""" % (player, i, nlayer)
lowercase : str = (
vnp[i].transpose([1, 0] ).copy()
) # In Mesh-Tensorflow, it is one array, so it is divided
lowercase : List[Any] = torch.tensor(__A )
elif key_name.startswith("""model/mlp""" ):
lowercase : Any = int(key_name[9:].split("""/""" )[0] )
if key_name.endswith("""/p1/kernel""" ):
lowercase : Dict = """model.blocks.%d.feed_forward.mlp.wi.weight""" % player
lowercase : Any = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowercase : int = torch.tensor(__A )
elif key_name.endswith("""/p1/bias""" ):
lowercase : Tuple = """model.blocks.%d.feed_forward.mlp.wi.bias""" % player
lowercase : Tuple = vnp.copy() # same because it is one dimensional
lowercase : Tuple = torch.tensor(__A )
elif key_name.endswith("""/p2/kernel""" ):
lowercase : int = """model.blocks.%d.feed_forward.mlp.wo.weight""" % player
lowercase : Any = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowercase : Optional[Any] = torch.tensor(__A )
elif key_name.endswith("""/p2/bias""" ):
lowercase : Any = """model.blocks.%d.feed_forward.mlp.wo.bias""" % player
lowercase : str = vnp.copy() # same because it is one dimensional
lowercase : str = torch.tensor(__A )
elif key_name.startswith("""model/ln""" ):
lowercase : Union[str, Any] = int(key_name[8:].split("""/""" )[0] )
if key_name.endswith("""/b""" ):
lowercase : Any = """model.blocks.%d.feed_forward.norm.bias""" % player
lowercase : List[str] = vnp.copy() # same because it is one dimensional
lowercase : Any = torch.tensor(__A )
elif key_name.endswith("""/g""" ):
lowercase : int = """model.blocks.%d.feed_forward.norm.weight""" % player
lowercase : Union[str, Any] = vnp.copy() # same because it is one dimensional
lowercase : Optional[Any] = torch.tensor(__A )
elif key_name.startswith("""model/att""" ):
lowercase : Optional[int] = int(key_name[9:].split("""/""" )[0] )
if key_name.endswith("""/qkv/kernel""" ):
lowercase : Optional[int] = vnp.copy() # Compute same dimension as Mesh-tensorflow using einsum
lowercase : Any = state[:, 0, :, :]
lowercase : List[Any] = state[:, 1, :, :]
lowercase : Optional[Any] = state[:, 2, :, :]
lowercase : Dict = (
state_q.reshape([state_q.shape[0], state_q.shape[1] * state_q.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
lowercase : List[str] = (
state_k.reshape([state_k.shape[0], state_k.shape[1] * state_k.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
lowercase : Optional[Any] = (
state_v.reshape([state_v.shape[0], state_v.shape[1] * state_v.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
lowercase : str = """model.blocks.%d.self_attn.self_attn.q_proj.weight""" % player
lowercase : Optional[int] = torch.tensor(__A )
lowercase : int = """model.blocks.%d.self_attn.self_attn.k_proj.weight""" % player
lowercase : Optional[Any] = torch.tensor(__A )
lowercase : Tuple = """model.blocks.%d.self_attn.self_attn.v_proj.weight""" % player
lowercase : List[str] = torch.tensor(__A )
elif key_name.endswith("""/o/kernel""" ):
lowercase : Any = """model.blocks.%d.self_attn.self_attn.out_proj.weight""" % player
lowercase : List[Any] = (
vnp.reshape([vnp.shape[0] * vnp.shape[1], vnp.shape[2]] ).transpose([1, 0] ).copy()
) # Mesh-Tensorflow is a diagonal matrix
lowercase : Optional[int] = torch.tensor(__A )
elif key_name.startswith("""model/an""" ):
lowercase : Tuple = int(key_name[8:].split("""/""" )[0] )
if key_name.endswith("""/b""" ):
lowercase : List[str] = """model.blocks.%d.self_attn.norm.bias""" % player
lowercase : List[str] = vnp.copy() # same because it is one dimensional
lowercase : int = torch.tensor(__A )
elif key_name.endswith("""/g""" ):
lowercase : Any = """model.blocks.%d.self_attn.norm.weight""" % player
lowercase : Union[str, Any] = vnp.copy() # same because it is one dimensional
lowercase : int = torch.tensor(__A )
elif (
key_name.startswith("""model/wte""" )
or key_name.startswith("""model/wpe""" )
or key_name.startswith("""model/ete""" )
):
lowercase : Any = {"""wte""": """embed_tokens""", """wpe""": """position_embeddings""", """ete""": """extra_position_embeddings"""}[
key_name[-3:]
]
lowercase : Optional[int] = """model.%s.weight""" % nlayer
lowercase : Any = vnp.copy() # same in embedded
lowercase : Dict = torch.tensor(__A )
if key_name.startswith("""model/wte""" ):
lowercase : Optional[Any] = """lm_head.weight"""
lowercase : int = vnp.copy() # same in embedded
lowercase : str = torch.tensor(__A )
elif key_name.startswith("""model/wob""" ):
lowercase : str = """final_logits_bias"""
lowercase : List[str] = vnp.copy() # same in embedded
lowercase : str = state.reshape((1, -1) )
lowercase : Tuple = torch.tensor(__A )
elif key_name == "model/dense/kernel":
lowercase : Dict = """model.last_project.weight"""
lowercase : Dict = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowercase : int = torch.tensor(__A )
elif key_name == "model/dense_1/bias":
lowercase : List[str] = """model.last_project.bias"""
lowercase : Dict = vnp.copy() # same because it is one dimensional
lowercase : List[str] = torch.tensor(__A )
torch.save(__A ,args.output )
if __name__ == "__main__":
lowerCAmelCase: Tuple =argparse.ArgumentParser(
description="model converter.", formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument("--tf_model_dir", metavar="PATH", type=str, required=True, help="import model")
parser.add_argument("--output", metavar="PATH", type=str, required=True, help="output model")
lowerCAmelCase: Tuple =parser.parse_args()
convert_tf_gptsan_to_pt(args)
| 607
| 0
|
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import DDIMPipeline, DDIMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow, torch_device
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class UpperCAmelCase ( UpperCamelCase_ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase_ = DDIMPipeline
lowerCamelCase_ = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
lowerCamelCase_ = PipelineTesterMixin.required_optional_params - {
'''num_images_per_prompt''',
'''latents''',
'''callback''',
'''callback_steps''',
}
lowerCamelCase_ = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
lowerCamelCase_ = False
def lowerCAmelCase_ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
A_ : Optional[Any] = UNetaDModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=3 , out_channels=3 , down_block_types=('DownBlock2D', 'AttnDownBlock2D') , up_block_types=('AttnUpBlock2D', 'UpBlock2D') , )
A_ : Dict = DDIMScheduler()
A_ : Dict = {"unet": unet, "scheduler": scheduler}
return components
def lowerCAmelCase_ ( self , lowercase , lowercase=0 ):
"""simple docstring"""
if str(__A ).startswith('mps' ):
A_ : Union[str, Any] = torch.manual_seed(__A )
else:
A_ : Union[str, Any] = torch.Generator(device=__A ).manual_seed(__A )
A_ : Optional[int] = {
"batch_size": 1,
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : str = "cpu"
A_ : List[str] = self.get_dummy_components()
A_ : List[Any] = self.pipeline_class(**__A )
pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
A_ : int = self.get_dummy_inputs(__A )
A_ : Any = pipe(**__A ).images
A_ : Any = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 3_2, 3_2, 3) )
A_ : int = np.array(
[1.0_00E00, 5.7_17E-01, 4.7_17E-01, 1.0_00E00, 0.0_00E00, 1.0_00E00, 3.0_00E-04, 0.0_00E00, 9.0_00E-04] )
A_ : Optional[int] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(__A , 1E-3 )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
super().test_save_load_local(expected_max_difference=3E-3 )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
super().test_save_load_optional_components(expected_max_difference=3E-3 )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Any = "google/ddpm-cifar10-32"
A_ : Optional[Any] = UNetaDModel.from_pretrained(__A )
A_ : int = DDIMScheduler()
A_ : Union[str, Any] = DDIMPipeline(unet=__A , scheduler=__A )
ddim.to(__A )
ddim.set_progress_bar_config(disable=__A )
A_ : Tuple = torch.manual_seed(0 )
A_ : List[Any] = ddim(generator=__A , eta=0.0 , output_type='numpy' ).images
A_ : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
A_ : Tuple = np.array([0.1723, 0.1617, 0.1600, 0.1626, 0.1497, 0.1513, 0.1505, 0.1442, 0.1453] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Optional[int] = "google/ddpm-ema-bedroom-256"
A_ : List[Any] = UNetaDModel.from_pretrained(__A )
A_ : Union[str, Any] = DDIMScheduler.from_pretrained(__A )
A_ : Tuple = DDIMPipeline(unet=__A , scheduler=__A )
ddpm.to(__A )
ddpm.set_progress_bar_config(disable=__A )
A_ : Any = torch.manual_seed(0 )
A_ : Any = ddpm(generator=__A , output_type='numpy' ).images
A_ : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 2_5_6, 2_5_6, 3)
A_ : str = np.array([0.0060, 0.0201, 0.0344, 0.0024, 0.0018, 0.0002, 0.0022, 0.0000, 0.0069] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 703
|
def UpperCamelCase ( __lowercase : str ):
'''simple docstring'''
A_ : int = len(__lowercase )
A_ : List[Any] = sum(__lowercase )
A_ : List[str] = [[False for x in range(s + 1 )] for y in range(n + 1 )]
for i in range(1 ,n + 1 ):
A_ : Optional[Any] = True
for i in range(1 ,s + 1 ):
A_ : Tuple = False
for i in range(1 ,n + 1 ):
for j in range(1 ,s + 1 ):
A_ : Dict = dp[i][j - 1]
if arr[i - 1] <= j:
A_ : Dict = dp[i][j] or dp[i - 1][j - arr[i - 1]]
for j in range(int(s / 2 ) ,-1 ,-1 ):
if dp[n][j] is True:
A_ : List[Any] = s - 2 * j
break
return diff
| 70
| 0
|
"""simple docstring"""
def __A (_SCREAMING_SNAKE_CASE ) ->str:
"""simple docstring"""
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
raise TypeError('\'float\' object cannot be interpreted as an integer' )
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
raise TypeError('\'str\' object cannot be interpreted as an integer' )
if num == 0:
return "0b0"
lowerCAmelCase__ :Any = False
if num < 0:
lowerCAmelCase__ :Tuple = True
lowerCAmelCase__ :Optional[Any] = -num
lowerCAmelCase__ :list[int] = []
while num > 0:
binary.insert(0 , num % 2 )
num >>= 1
if negative:
return "-0b" + "".join(str(_SCREAMING_SNAKE_CASE ) for e in binary )
return "0b" + "".join(str(_SCREAMING_SNAKE_CASE ) for e in binary )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 93
|
def a ( snake_case__: int ):
'''simple docstring'''
if not isinstance(snake_case__ , snake_case__ ):
raise ValueError('''Input must be an integer''' )
if input_num <= 0:
raise ValueError('''Input must be positive''' )
return sum(
divisor for divisor in range(1 , input_num // 2 + 1 ) if input_num % divisor == 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 97
| 0
|
import os
import unittest
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
BertTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class a_( lowercase__ , unittest.TestCase ):
"""simple docstring"""
__snake_case : Optional[Any] =BertTokenizer
__snake_case : Tuple =BertTokenizerFast
__snake_case : List[Any] =True
__snake_case : str =True
__snake_case : int =filter_non_english
def __UpperCamelCase ( self : List[str]) -> Union[str, Any]:
"""simple docstring"""
super().setUp()
SCREAMING_SNAKE_CASE = [
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'])
with open(self.vocab_file , 'w' , encoding='utf-8') as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens]))
def __UpperCamelCase ( self : Union[str, Any] , lowerCAmelCase__ : Union[str, Any]) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE = 'UNwant\u00E9d,running'
SCREAMING_SNAKE_CASE = 'unwanted, running'
return input_text, output_text
def __UpperCamelCase ( self : List[Any]) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.tokenizer_class(self.vocab_file)
SCREAMING_SNAKE_CASE = tokenizer.tokenize('UNwant\u00E9d,running')
self.assertListEqual(lowerCAmelCase__ , ['un', '##want', '##ed', ',', 'runn', '##ing'])
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase__) , [9, 6, 7, 1_2, 1_0, 1_1])
def __UpperCamelCase ( self : Any) -> Union[str, Any]:
"""simple docstring"""
if not self.test_rust_tokenizer:
return
SCREAMING_SNAKE_CASE = self.get_tokenizer()
SCREAMING_SNAKE_CASE = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE = 'UNwant\u00E9d,running'
SCREAMING_SNAKE_CASE = tokenizer.tokenize(lowerCAmelCase__)
SCREAMING_SNAKE_CASE = rust_tokenizer.tokenize(lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__)
SCREAMING_SNAKE_CASE = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__)
SCREAMING_SNAKE_CASE = rust_tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__)
SCREAMING_SNAKE_CASE = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE = tokenizer.encode(lowerCAmelCase__)
SCREAMING_SNAKE_CASE = rust_tokenizer.encode(lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__)
# With lower casing
SCREAMING_SNAKE_CASE = self.get_tokenizer(do_lower_case=lowerCAmelCase__)
SCREAMING_SNAKE_CASE = self.get_rust_tokenizer(do_lower_case=lowerCAmelCase__)
SCREAMING_SNAKE_CASE = 'UNwant\u00E9d,running'
SCREAMING_SNAKE_CASE = tokenizer.tokenize(lowerCAmelCase__)
SCREAMING_SNAKE_CASE = rust_tokenizer.tokenize(lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__)
SCREAMING_SNAKE_CASE = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__)
SCREAMING_SNAKE_CASE = rust_tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__)
SCREAMING_SNAKE_CASE = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE = tokenizer.encode(lowerCAmelCase__)
SCREAMING_SNAKE_CASE = rust_tokenizer.encode(lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__)
def __UpperCamelCase ( self : Optional[int]) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize('ah\u535A\u63A8zz') , ['ah', '\u535A', '\u63A8', 'zz'])
def __UpperCamelCase ( self : Dict) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE = BasicTokenizer(do_lower_case=lowerCAmelCase__)
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ') , ['hello', '!', 'how', 'are', 'you', '?'])
self.assertListEqual(tokenizer.tokenize('H\u00E9llo') , ['hello'])
def __UpperCamelCase ( self : List[Any]) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE = BasicTokenizer(do_lower_case=lowerCAmelCase__ , strip_accents=lowerCAmelCase__)
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ') , ['hällo', '!', 'how', 'are', 'you', '?'])
self.assertListEqual(tokenizer.tokenize('H\u00E9llo') , ['h\u00E9llo'])
def __UpperCamelCase ( self : Union[str, Any]) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE = BasicTokenizer(do_lower_case=lowerCAmelCase__ , strip_accents=lowerCAmelCase__)
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ') , ['hallo', '!', 'how', 'are', 'you', '?'])
self.assertListEqual(tokenizer.tokenize('H\u00E9llo') , ['hello'])
def __UpperCamelCase ( self : int) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE = BasicTokenizer(do_lower_case=lowerCAmelCase__)
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ') , ['hallo', '!', 'how', 'are', 'you', '?'])
self.assertListEqual(tokenizer.tokenize('H\u00E9llo') , ['hello'])
def __UpperCamelCase ( self : str) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = BasicTokenizer(do_lower_case=lowerCAmelCase__)
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ') , ['HeLLo', '!', 'how', 'Are', 'yoU', '?'])
def __UpperCamelCase ( self : List[str]) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE = BasicTokenizer(do_lower_case=lowerCAmelCase__ , strip_accents=lowerCAmelCase__)
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ') , ['HäLLo', '!', 'how', 'Are', 'yoU', '?'])
def __UpperCamelCase ( self : int) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = BasicTokenizer(do_lower_case=lowerCAmelCase__ , strip_accents=lowerCAmelCase__)
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ') , ['HaLLo', '!', 'how', 'Are', 'yoU', '?'])
def __UpperCamelCase ( self : str) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE = BasicTokenizer(do_lower_case=lowerCAmelCase__ , never_split=['[UNK]'])
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? [UNK]') , ['HeLLo', '!', 'how', 'Are', 'yoU', '?', '[UNK]'])
def __UpperCamelCase ( self : Union[str, Any]) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE = BasicTokenizer()
SCREAMING_SNAKE_CASE = 'a\n\'ll !!to?\'d of, can\'t.'
SCREAMING_SNAKE_CASE = ['a', '\'', 'll', '!', '!', 'to', '?', '\'', 'd', 'of', ',', 'can', '\'', 't', '.']
self.assertListEqual(tokenizer.tokenize(lowerCAmelCase__) , lowerCAmelCase__)
def __UpperCamelCase ( self : Any) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = ['[UNK]', '[CLS]', '[SEP]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing']
SCREAMING_SNAKE_CASE = {}
for i, token in enumerate(lowerCAmelCase__):
SCREAMING_SNAKE_CASE = i
SCREAMING_SNAKE_CASE = WordpieceTokenizer(vocab=lowerCAmelCase__ , unk_token='[UNK]')
self.assertListEqual(tokenizer.tokenize('') , [])
self.assertListEqual(tokenizer.tokenize('unwanted running') , ['un', '##want', '##ed', 'runn', '##ing'])
self.assertListEqual(tokenizer.tokenize('unwantedX running') , ['[UNK]', 'runn', '##ing'])
def __UpperCamelCase ( self : Any) -> str:
"""simple docstring"""
self.assertTrue(_is_whitespace(' '))
self.assertTrue(_is_whitespace('\t'))
self.assertTrue(_is_whitespace('\r'))
self.assertTrue(_is_whitespace('\n'))
self.assertTrue(_is_whitespace('\u00A0'))
self.assertFalse(_is_whitespace('A'))
self.assertFalse(_is_whitespace('-'))
def __UpperCamelCase ( self : List[str]) -> Optional[int]:
"""simple docstring"""
self.assertTrue(_is_control('\u0005'))
self.assertFalse(_is_control('A'))
self.assertFalse(_is_control(' '))
self.assertFalse(_is_control('\t'))
self.assertFalse(_is_control('\r'))
def __UpperCamelCase ( self : int) -> str:
"""simple docstring"""
self.assertTrue(_is_punctuation('-'))
self.assertTrue(_is_punctuation('$'))
self.assertTrue(_is_punctuation('`'))
self.assertTrue(_is_punctuation('.'))
self.assertFalse(_is_punctuation('A'))
self.assertFalse(_is_punctuation(' '))
def __UpperCamelCase ( self : Optional[Any]) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.get_tokenizer()
SCREAMING_SNAKE_CASE = self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(lowerCAmelCase__) for t in ['Test', '\xad', 'test']] , [['[UNK]'], [], ['[UNK]']])
self.assertListEqual(
[rust_tokenizer.tokenize(lowerCAmelCase__) for t in ['Test', '\xad', 'test']] , [['[UNK]'], [], ['[UNK]']])
@slow
def __UpperCamelCase ( self : Dict) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.tokenizer_class.from_pretrained('bert-base-uncased')
SCREAMING_SNAKE_CASE = tokenizer.encode('sequence builders' , add_special_tokens=lowerCAmelCase__)
SCREAMING_SNAKE_CASE = tokenizer.encode('multi-sequence build' , add_special_tokens=lowerCAmelCase__)
SCREAMING_SNAKE_CASE = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase__)
SCREAMING_SNAKE_CASE = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase__ , lowerCAmelCase__)
assert encoded_sentence == [1_0_1] + text + [1_0_2]
assert encoded_pair == [1_0_1] + text + [1_0_2] + text_a + [1_0_2]
def __UpperCamelCase ( self : Any) -> str:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})'''):
SCREAMING_SNAKE_CASE = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__)
SCREAMING_SNAKE_CASE = f'''A, naïve {tokenizer_r.mask_token} AllenNLP sentence.'''
SCREAMING_SNAKE_CASE = tokenizer_r.encode_plus(
lowerCAmelCase__ , return_attention_mask=lowerCAmelCase__ , return_token_type_ids=lowerCAmelCase__ , return_offsets_mapping=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , )
SCREAMING_SNAKE_CASE = tokenizer_r.do_lower_case if hasattr(lowerCAmelCase__ , 'do_lower_case') else False
SCREAMING_SNAKE_CASE = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), 'A'),
((1, 2), ','),
((3, 5), 'na'),
((5, 6), '##ï'),
((6, 8), '##ve'),
((9, 1_5), tokenizer_r.mask_token),
((1_6, 2_1), 'Allen'),
((2_1, 2_3), '##NL'),
((2_3, 2_4), '##P'),
((2_5, 3_3), 'sentence'),
((3_3, 3_4), '.'),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), 'a'),
((1, 2), ','),
((3, 8), 'naive'),
((9, 1_5), tokenizer_r.mask_token),
((1_6, 2_1), 'allen'),
((2_1, 2_3), '##nl'),
((2_3, 2_4), '##p'),
((2_5, 3_3), 'sentence'),
((3_3, 3_4), '.'),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['input_ids']))
self.assertEqual([e[0] for e in expected_results] , tokens['offset_mapping'])
def __UpperCamelCase ( self : Tuple) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = ['的', '人', '有']
SCREAMING_SNAKE_CASE = ''.join(lowerCAmelCase__)
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})'''):
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = self.tokenizer_class.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__)
SCREAMING_SNAKE_CASE = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__)
SCREAMING_SNAKE_CASE = tokenizer_p.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__)
SCREAMING_SNAKE_CASE = tokenizer_r.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__)
SCREAMING_SNAKE_CASE = tokenizer_r.convert_ids_to_tokens(lowerCAmelCase__)
SCREAMING_SNAKE_CASE = tokenizer_p.convert_ids_to_tokens(lowerCAmelCase__)
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__)
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__)
SCREAMING_SNAKE_CASE = self.tokenizer_class.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__)
SCREAMING_SNAKE_CASE = tokenizer_r.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__)
SCREAMING_SNAKE_CASE = tokenizer_p.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__)
SCREAMING_SNAKE_CASE = tokenizer_r.convert_ids_to_tokens(lowerCAmelCase__)
SCREAMING_SNAKE_CASE = tokenizer_p.convert_ids_to_tokens(lowerCAmelCase__)
# it is expected that only the first Chinese character is not preceded by "##".
SCREAMING_SNAKE_CASE = [
f'''##{token}''' if idx != 0 else token for idx, token in enumerate(lowerCAmelCase__)
]
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__)
| 259
|
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
__UpperCAmelCase = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, "utils"))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If BertLMPredictionHead is changed in modeling_bert.py, this code needs to be manually updated.
__UpperCAmelCase = " def __init__(self, config):\n super().__init__()\n self.transform = BertPredictionHeadTransform(config)\n\n # The output weights are the same as the input embeddings, but there is\n # an output-only bias for each token.\n self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)\n\n self.bias = nn.Parameter(torch.zeros(config.vocab_size))\n\n # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`\n self.decoder.bias = self.bias\n\n def forward(self, hidden_states):\n hidden_states = self.transform(hidden_states)\n hidden_states = self.decoder(hidden_states)\n return hidden_states\n"
class a_( unittest.TestCase ):
"""simple docstring"""
def __UpperCamelCase ( self : Optional[int]) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE = tempfile.mkdtemp()
os.makedirs(os.path.join(self.transformer_dir , 'models/bert/'))
SCREAMING_SNAKE_CASE = self.transformer_dir
shutil.copy(
os.path.join(lowerCAmelCase__ , 'src/transformers/models/bert/modeling_bert.py') , os.path.join(self.transformer_dir , 'models/bert/modeling_bert.py') , )
def __UpperCamelCase ( self : Optional[int]) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE = 'src/transformers'
shutil.rmtree(self.transformer_dir)
def __UpperCamelCase ( self : Optional[Any] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Dict=None) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = comment + f'''\nclass {class_name}(nn.Module):\n''' + class_code
if overwrite_result is not None:
SCREAMING_SNAKE_CASE = comment + f'''\nclass {class_name}(nn.Module):\n''' + overwrite_result
SCREAMING_SNAKE_CASE = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=1_1_9)
SCREAMING_SNAKE_CASE = black.format_str(lowerCAmelCase__ , mode=lowerCAmelCase__)
SCREAMING_SNAKE_CASE = os.path.join(self.transformer_dir , 'new_code.py')
with open(lowerCAmelCase__ , 'w' , newline='\n') as f:
f.write(lowerCAmelCase__)
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(lowerCAmelCase__)) == 0)
else:
check_copies.is_copy_consistent(f.name , overwrite=lowerCAmelCase__)
with open(lowerCAmelCase__ , 'r') as f:
self.assertTrue(f.read() , lowerCAmelCase__)
def __UpperCamelCase ( self : List[Any]) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE = check_copies.find_code_in_transformers('models.bert.modeling_bert.BertLMPredictionHead')
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__)
def __UpperCamelCase ( self : Optional[int]) -> Optional[Any]:
"""simple docstring"""
# Base copy consistency
self.check_copy_consistency(
'# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead' , 'BertLMPredictionHead' , REFERENCE_CODE + '\n' , )
# With no empty line at the end
self.check_copy_consistency(
'# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead' , 'BertLMPredictionHead' , lowerCAmelCase__ , )
# Copy consistency with rename
self.check_copy_consistency(
'# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel' , 'TestModelLMPredictionHead' , re.sub('Bert' , 'TestModel' , lowerCAmelCase__) , )
# Copy consistency with a really long name
SCREAMING_SNAKE_CASE = 'TestModelWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason'
self.check_copy_consistency(
f'''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->{long_class_name}''' , f'''{long_class_name}LMPredictionHead''' , re.sub('Bert' , lowerCAmelCase__ , lowerCAmelCase__) , )
# Copy consistency with overwrite
self.check_copy_consistency(
'# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel' , 'TestModelLMPredictionHead' , lowerCAmelCase__ , overwrite_result=re.sub('Bert' , 'TestModel' , lowerCAmelCase__) , )
def __UpperCamelCase ( self : Optional[int]) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = check_copies.LOCALIZED_READMES['README_zh-hans.md']
SCREAMING_SNAKE_CASE = (
'1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the'
' Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for'
' Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong'
' Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.\n1.'
' **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (from HuggingFace),'
' released together with the paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and'
' lighter](https://arxiv.org/abs/1910.01108) by Victor Sanh, Lysandre Debut and Thomas Wolf. The same'
' method has been applied to compress GPT2 into'
' [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into'
' [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),'
' Multilingual BERT into'
' [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German'
' version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)**'
' (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders'
' as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang'
' Luong, Quoc V. Le, Christopher D. Manning.'
)
SCREAMING_SNAKE_CASE = (
'1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'
' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'
' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'
' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'
)
SCREAMING_SNAKE_CASE = (
'1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'
' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'
' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'
' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n1.'
' **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (来自 HuggingFace) 伴随论文'
' [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and'
' lighter](https://arxiv.org/abs/1910.01108) 由 Victor Sanh, Lysandre Debut and Thomas Wolf 发布。 The same'
' method has been applied to compress GPT2 into'
' [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into'
' [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),'
' Multilingual BERT into'
' [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German'
' version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)** (来自'
' Google Research/Stanford University) 伴随论文 [ELECTRA: Pre-training text encoders as discriminators rather'
' than generators](https://arxiv.org/abs/2003.10555) 由 Kevin Clark, Minh-Thang Luong, Quoc V. Le,'
' Christopher D. Manning 发布。\n'
)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = check_copies.convert_to_localized_md(
lowerCAmelCase__ , lowerCAmelCase__ , localized_readme['format_model_list'])
self.assertFalse(lowerCAmelCase__)
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = check_copies.convert_to_localized_md(
lowerCAmelCase__ , lowerCAmelCase__ , localized_readme['format_model_list'])
# Check whether the number of models is equal to README.md after conversion.
self.assertTrue(lowerCAmelCase__)
SCREAMING_SNAKE_CASE = (
'1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the'
' Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for'
' Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong'
' Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.'
)
SCREAMING_SNAKE_CASE = (
'1. **[ALBERT](https://huggingface.co/transformers/main/model_doc/albert.html)** (来自 Google Research and'
' the Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'
' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'
' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'
)
SCREAMING_SNAKE_CASE = (
'1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'
' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'
' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'
' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'
)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = check_copies.convert_to_localized_md(
lowerCAmelCase__ , lowerCAmelCase__ , localized_readme['format_model_list'])
# Check if the model link is synchronized.
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__)
| 259
| 1
|
"""simple docstring"""
import torch
from diffusers import DPMSolverSDEScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import require_torchsde
from .test_schedulers import SchedulerCommonTest
@require_torchsde
class lowerCAmelCase__ ( lowercase__ ):
lowercase__ : Dict = (DPMSolverSDEScheduler,)
lowercase__ : List[Any] = 10
def lowercase_ ( self , **UpperCamelCase__ ):
'''simple docstring'''
A__ = {
'''num_train_timesteps''': 11_00,
'''beta_start''': 0.0001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''noise_sampler_seed''': 0,
}
config.update(**UpperCAmelCase__ )
return config
def lowercase_ ( self ):
'''simple docstring'''
for timesteps in [10, 50, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=UpperCAmelCase__ )
def lowercase_ ( self ):
'''simple docstring'''
for beta_start, beta_end in zip([0.0_0001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ):
self.check_over_configs(beta_start=UpperCAmelCase__ , beta_end=UpperCAmelCase__ )
def lowercase_ ( self ):
'''simple docstring'''
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=UpperCAmelCase__ )
def lowercase_ ( self ):
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=UpperCAmelCase__ )
def lowercase_ ( self ):
'''simple docstring'''
A__ = self.scheduler_classes[0]
A__ = self.get_scheduler_config()
A__ = scheduler_class(**UpperCAmelCase__ )
scheduler.set_timesteps(self.num_inference_steps )
A__ = self.dummy_model()
A__ = self.dummy_sample_deter * scheduler.init_noise_sigma
A__ = sample.to(UpperCAmelCase__ )
for i, t in enumerate(scheduler.timesteps ):
A__ = scheduler.scale_model_input(UpperCAmelCase__ , UpperCAmelCase__ )
A__ = model(UpperCAmelCase__ , UpperCAmelCase__ )
A__ = scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
A__ = output.prev_sample
A__ = torch.sum(torch.abs(UpperCAmelCase__ ) )
A__ = torch.mean(torch.abs(UpperCAmelCase__ ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 1_67.47_82_10_44_92_18_75 ) < 1e-2
assert abs(result_mean.item() - 0.2178_7059_6456_5277 ) < 1e-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 1_71.59_35_21_11_81_64_06 ) < 1e-2
assert abs(result_mean.item() - 0.2_2342_9068_9229_9652 ) < 1e-3
else:
assert abs(result_sum.item() - 1_62.52_38_34_22_85_15_62 ) < 1e-2
assert abs(result_mean.item() - 0.211_6195_7085_1326 ) < 1e-3
def lowercase_ ( self ):
'''simple docstring'''
A__ = self.scheduler_classes[0]
A__ = self.get_scheduler_config(prediction_type="v_prediction" )
A__ = scheduler_class(**UpperCAmelCase__ )
scheduler.set_timesteps(self.num_inference_steps )
A__ = self.dummy_model()
A__ = self.dummy_sample_deter * scheduler.init_noise_sigma
A__ = sample.to(UpperCAmelCase__ )
for i, t in enumerate(scheduler.timesteps ):
A__ = scheduler.scale_model_input(UpperCAmelCase__ , UpperCAmelCase__ )
A__ = model(UpperCAmelCase__ , UpperCAmelCase__ )
A__ = scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
A__ = output.prev_sample
A__ = torch.sum(torch.abs(UpperCAmelCase__ ) )
A__ = torch.mean(torch.abs(UpperCAmelCase__ ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 1_24.77_14_92_00_43_94_53 ) < 1e-2
assert abs(result_mean.item() - 0.1_6226_2890_1481_6284 ) < 1e-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 1_28.1_66_33_60_59_57_03 ) < 1e-2
assert abs(result_mean.item() - 0.1_6688_3260_0116_7297 ) < 1e-3
else:
assert abs(result_sum.item() - 1_19.8_48_75_48_82_81_25 ) < 1e-2
assert abs(result_mean.item() - 0.1560_5306_6253_6621 ) < 1e-3
def lowercase_ ( self ):
'''simple docstring'''
A__ = self.scheduler_classes[0]
A__ = self.get_scheduler_config()
A__ = scheduler_class(**UpperCAmelCase__ )
scheduler.set_timesteps(self.num_inference_steps , device=UpperCAmelCase__ )
A__ = self.dummy_model()
A__ = self.dummy_sample_deter.to(UpperCAmelCase__ ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
A__ = scheduler.scale_model_input(UpperCAmelCase__ , UpperCAmelCase__ )
A__ = model(UpperCAmelCase__ , UpperCAmelCase__ )
A__ = scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
A__ = output.prev_sample
A__ = torch.sum(torch.abs(UpperCAmelCase__ ) )
A__ = torch.mean(torch.abs(UpperCAmelCase__ ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 1_67.46_95_73_97_46_09_38 ) < 1e-2
assert abs(result_mean.item() - 0.2_1805_9346_0798_2635 ) < 1e-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 1_71.59_35_36_37_69_53_12 ) < 1e-2
assert abs(result_mean.item() - 0.2_2342_9083_8241_5771 ) < 1e-3
else:
assert abs(result_sum.item() - 1_62.52_38_34_22_85_15_62 ) < 1e-2
assert abs(result_mean.item() - 0.211_6195_7085_1326 ) < 1e-3
def lowercase_ ( self ):
'''simple docstring'''
A__ = self.scheduler_classes[0]
A__ = self.get_scheduler_config()
A__ = scheduler_class(**UpperCAmelCase__ , use_karras_sigmas=UpperCAmelCase__ )
scheduler.set_timesteps(self.num_inference_steps , device=UpperCAmelCase__ )
A__ = self.dummy_model()
A__ = self.dummy_sample_deter.to(UpperCAmelCase__ ) * scheduler.init_noise_sigma
A__ = sample.to(UpperCAmelCase__ )
for t in scheduler.timesteps:
A__ = scheduler.scale_model_input(UpperCAmelCase__ , UpperCAmelCase__ )
A__ = model(UpperCAmelCase__ , UpperCAmelCase__ )
A__ = scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
A__ = output.prev_sample
A__ = torch.sum(torch.abs(UpperCAmelCase__ ) )
A__ = torch.mean(torch.abs(UpperCAmelCase__ ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 1_76.66_97_41_35_74_21_88 ) < 1e-2
assert abs(result_mean.item() - 0.2_3003_8727_3098_1811 ) < 1e-2
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 1_77.63_65_35_64_45_31_25 ) < 1e-2
assert abs(result_mean.item() - 0.2_3003_8727_3098_1811 ) < 1e-2
else:
assert abs(result_sum.item() - 1_70.3_13_52_23_38_86_72 ) < 1e-2
assert abs(result_mean.item() - 0.2_3003_8727_3098_1811 ) < 1e-2
| 337
|
'''simple docstring'''
from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
# TODO Update this
UpperCamelCase_ = {
"""facebook/esm-1b""": """https://huggingface.co/facebook/esm-1b/resolve/main/config.json""",
# See all ESM models at https://huggingface.co/models?filter=esm
}
class __SCREAMING_SNAKE_CASE ( lowercase__ ):
lowerCamelCase_ = 'esm'
def __init__( self : Optional[Any] , UpperCAmelCase__ : str=None , UpperCAmelCase__ : Dict=None , UpperCAmelCase__ : Any=None , UpperCAmelCase__ : int=768 , UpperCAmelCase__ : Union[str, Any]=12 , UpperCAmelCase__ : List[Any]=12 , UpperCAmelCase__ : Optional[Any]=3072 , UpperCAmelCase__ : Dict=0.1 , UpperCAmelCase__ : int=0.1 , UpperCAmelCase__ : int=1026 , UpperCAmelCase__ : Tuple=0.02 , UpperCAmelCase__ : Optional[int]=1E-12 , UpperCAmelCase__ : List[Any]="absolute" , UpperCAmelCase__ : int=True , UpperCAmelCase__ : Dict=None , UpperCAmelCase__ : str=False , UpperCAmelCase__ : int=False , UpperCAmelCase__ : Optional[Any]=None , UpperCAmelCase__ : List[str]=None , **UpperCAmelCase__ : int , ):
'''simple docstring'''
super().__init__(pad_token_id=UpperCAmelCase__ , mask_token_id=UpperCAmelCase__ , **UpperCAmelCase__ )
lowercase : Any =vocab_size
lowercase : List[Any] =hidden_size
lowercase : Any =num_hidden_layers
lowercase : Optional[Any] =num_attention_heads
lowercase : Tuple =intermediate_size
lowercase : int =hidden_dropout_prob
lowercase : Dict =attention_probs_dropout_prob
lowercase : Optional[int] =max_position_embeddings
lowercase : Union[str, Any] =initializer_range
lowercase : Tuple =layer_norm_eps
lowercase : Union[str, Any] =position_embedding_type
lowercase : List[Any] =use_cache
lowercase : Dict =emb_layer_norm_before
lowercase : Optional[Any] =token_dropout
lowercase : Union[str, Any] =is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info('''No esmfold_config supplied for folding model, using default values.''' )
lowercase : Any =EsmFoldConfig()
elif isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
lowercase : Optional[int] =EsmFoldConfig(**UpperCAmelCase__ )
lowercase : Union[str, Any] =esmfold_config
if vocab_list is None:
logger.warning('''No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!''' )
lowercase : int =get_default_vocab_list()
else:
lowercase : Tuple =vocab_list
else:
lowercase : Union[str, Any] =None
lowercase : Dict =None
if self.esmfold_config is not None and getattr(self.esmfold_config , '''use_esm_attn_map''' , UpperCAmelCase__ ):
raise ValueError('''The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!''' )
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase : Union[str, Any] =super().to_dict()
if isinstance(self.esmfold_config , UpperCAmelCase__ ):
lowercase : Optional[Any] =self.esmfold_config.to_dict()
return output
@dataclass
class __SCREAMING_SNAKE_CASE :
lowerCamelCase_ = None
lowerCamelCase_ = True
lowerCamelCase_ = False
lowerCamelCase_ = False
lowerCamelCase_ = False
lowerCamelCase_ = 0
lowerCamelCase_ = True
lowerCamelCase_ = False
lowerCamelCase_ = 1_28
lowerCamelCase_ = None
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
if self.trunk is None:
lowercase : str =TrunkConfig()
elif isinstance(self.trunk , UpperCAmelCase__ ):
lowercase : int =TrunkConfig(**self.trunk )
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
lowercase : str =asdict(self )
lowercase : Union[str, Any] =self.trunk.to_dict()
return output
@dataclass
class __SCREAMING_SNAKE_CASE :
lowerCamelCase_ = 48
lowerCamelCase_ = 10_24
lowerCamelCase_ = 1_28
lowerCamelCase_ = 32
lowerCamelCase_ = 32
lowerCamelCase_ = 32
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ = False
lowerCamelCase_ = 4
lowerCamelCase_ = 1_28
lowerCamelCase_ = None
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
if self.structure_module is None:
lowercase : Any =StructureModuleConfig()
elif isinstance(self.structure_module , UpperCAmelCase__ ):
lowercase : Union[str, Any] =StructureModuleConfig(**self.structure_module )
if self.max_recycles <= 0:
raise ValueError(F'''`max_recycles` should be positive, got {self.max_recycles}.''' )
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
'''`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got'''
F''' {self.sequence_state_dim} and {self.sequence_state_dim}.''' )
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
'''`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got'''
F''' {self.pairwise_state_dim} and {self.pairwise_state_dim}.''' )
lowercase : str =self.sequence_state_dim // self.sequence_head_width
lowercase : int =self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
'''`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got'''
F''' {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.''' )
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
'''`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got'''
F''' {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.''' )
if self.pairwise_state_dim % 2 != 0:
raise ValueError(F'''`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.''' )
if self.dropout >= 0.4:
raise ValueError(F'''`dropout` should not be greater than 0.4, got {self.dropout}.''' )
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
lowercase : List[Any] =asdict(self )
lowercase : Any =self.structure_module.to_dict()
return output
@dataclass
class __SCREAMING_SNAKE_CASE :
lowerCamelCase_ = 3_84
lowerCamelCase_ = 1_28
lowerCamelCase_ = 16
lowerCamelCase_ = 1_28
lowerCamelCase_ = 12
lowerCamelCase_ = 4
lowerCamelCase_ = 8
lowerCamelCase_ = 0.1
lowerCamelCase_ = 8
lowerCamelCase_ = 1
lowerCamelCase_ = 2
lowerCamelCase_ = 7
lowerCamelCase_ = 10
lowerCamelCase_ = 1E-8
lowerCamelCase_ = 1E5
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
return asdict(self )
def _lowerCAmelCase ( ) -> Optional[int]:
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
)
| 92
| 0
|
def __lowerCAmelCase ( _UpperCamelCase : int ) -> str:
'''simple docstring'''
if isinstance(_UpperCamelCase , _UpperCamelCase ):
raise TypeError('\'float\' object cannot be interpreted as an integer' )
if isinstance(_UpperCamelCase , _UpperCamelCase ):
raise TypeError('\'str\' object cannot be interpreted as an integer' )
if num == 0:
return "0b0"
SCREAMING_SNAKE_CASE = False
if num < 0:
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = -num
SCREAMING_SNAKE_CASE = []
while num > 0:
binary.insert(0 , num % 2 )
num >>= 1
if negative:
return "-0b" + "".join(str(_UpperCamelCase ) for e in binary )
return "0b" + "".join(str(_UpperCamelCase ) for e in binary )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 673
|
import numpy as np
from nltk.translate import meteor_score
import datasets
from datasets.config import importlib_metadata, version
a_ : List[str] = version.parse(importlib_metadata.version("nltk"))
if NLTK_VERSION >= version.Version("3.6.4"):
from nltk import word_tokenize
a_ : Dict = "\\n@inproceedings{banarjee2005,\n title = {{METEOR}: An Automatic Metric for {MT} Evaluation with Improved Correlation with Human Judgments},\n author = {Banerjee, Satanjeev and Lavie, Alon},\n booktitle = {Proceedings of the {ACL} Workshop on Intrinsic and Extrinsic Evaluation Measures for Machine Translation and/or Summarization},\n month = jun,\n year = {2005},\n address = {Ann Arbor, Michigan},\n publisher = {Association for Computational Linguistics},\n url = {https://www.aclweb.org/anthology/W05-0909},\n pages = {65--72},\n}\n"
a_ : str = "\\nMETEOR, an automatic metric for machine translation evaluation\nthat is based on a generalized concept of unigram matching between the\nmachine-produced translation and human-produced reference translations.\nUnigrams can be matched based on their surface forms, stemmed forms,\nand meanings; furthermore, METEOR can be easily extended to include more\nadvanced matching strategies. Once all generalized unigram matches\nbetween the two strings have been found, METEOR computes a score for\nthis matching using a combination of unigram-precision, unigram-recall, and\na measure of fragmentation that is designed to directly capture how\nwell-ordered the matched words in the machine translation are in relation\nto the reference.\n\nMETEOR gets an R correlation value of 0.347 with human evaluation on the Arabic\ndata and 0.331 on the Chinese data. This is shown to be an improvement on\nusing simply unigram-precision, unigram-recall and their harmonic F1\ncombination.\n"
a_ : int = "\nComputes METEOR score of translated segments against one or more references.\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n alpha: Parameter for controlling relative weights of precision and recall. default: 0.9\n beta: Parameter for controlling shape of penalty as a function of fragmentation. default: 3\n gamma: Relative weight assigned to fragmentation penalty. default: 0.5\nReturns:\n 'meteor': meteor score.\nExamples:\n\n >>> meteor = datasets.load_metric('meteor')\n >>> predictions = [\"It is a guide to action which ensures that the military always obeys the commands of the party\"]\n >>> references = [\"It is a guide to action that ensures that the military will forever heed Party commands\"]\n >>> results = meteor.compute(predictions=predictions, references=references)\n >>> print(round(results[\"meteor\"], 4))\n 0.6944\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCamelCase ( datasets.Metric ):
def UpperCamelCase ( self : str ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Value('string' , id='sequence' ),
} ) , codebase_urls=['https://github.com/nltk/nltk/blob/develop/nltk/translate/meteor_score.py'] , reference_urls=[
'https://www.nltk.org/api/nltk.translate.html#module-nltk.translate.meteor_score',
'https://en.wikipedia.org/wiki/METEOR',
] , )
def UpperCamelCase ( self : Dict , snake_case__ : int ):
"""simple docstring"""
import nltk
nltk.download('wordnet' )
if NLTK_VERSION >= version.Version('3.6.5' ):
nltk.download('punkt' )
if NLTK_VERSION >= version.Version('3.6.6' ):
nltk.download('omw-1.4' )
def UpperCamelCase ( self : Union[str, Any] , snake_case__ : str , snake_case__ : List[Any] , snake_case__ : List[Any]=0.9 , snake_case__ : Optional[Any]=3 , snake_case__ : Any=0.5 ):
"""simple docstring"""
if NLTK_VERSION >= version.Version('3.6.5' ):
SCREAMING_SNAKE_CASE = [
meteor_score.single_meteor_score(
word_tokenize(snake_case__ ) , word_tokenize(snake_case__ ) , alpha=snake_case__ , beta=snake_case__ , gamma=snake_case__ )
for ref, pred in zip(snake_case__ , snake_case__ )
]
else:
SCREAMING_SNAKE_CASE = [
meteor_score.single_meteor_score(snake_case__ , snake_case__ , alpha=snake_case__ , beta=snake_case__ , gamma=snake_case__ )
for ref, pred in zip(snake_case__ , snake_case__ )
]
return {"meteor": np.mean(snake_case__ )}
| 673
| 1
|
'''simple docstring'''
import inspect
import jax
import jax.lax as lax
import jax.numpy as jnp
from ..utils import add_start_docstrings
from ..utils.logging import get_logger
A = get_logger(__name__)
A = R'''\n Args:\n input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n scores (`jnp.ndarray` of shape `(batch_size, config.vocab_size)`):\n Prediction scores of a language modeling head. These can be logits for each vocabulary when not using beam\n search or log softmax for each vocabulary token when using beam search\n kwargs (`Dict[str, Any]`, *optional*):\n Additional logits processor specific kwargs.\n\n Return:\n `jnp.ndarray` of shape `(batch_size, config.vocab_size)`: The processed prediction scores.\n\n'''
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
@add_start_docstrings(UpperCamelCase )
def __call__( self : Optional[int] ,UpperCamelCase : jnp.ndarray ,UpperCamelCase : jnp.ndarray ) -> Optional[Any]:
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
@add_start_docstrings(UpperCamelCase )
def __call__( self : List[Any] ,UpperCamelCase : jnp.ndarray ,UpperCamelCase : jnp.ndarray ) -> Tuple:
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
class __SCREAMING_SNAKE_CASE ( A_ ):
'''simple docstring'''
@add_start_docstrings(UpperCamelCase )
def __call__( self : Union[str, Any] ,UpperCamelCase : jnp.ndarray ,UpperCamelCase : jnp.ndarray ,UpperCamelCase : int ,**UpperCamelCase : List[str] ) -> Dict:
for processor in self:
_lowercase : Dict = inspect.signature(processor.__call__ ).parameters
if len(UpperCamelCase ) > 3:
if not all(arg in kwargs for arg in list(function_args.keys() )[2:] ):
raise ValueError(
F'''Make sure that all the required parameters: {list(function_args.keys() )} for '''
F'''{processor.__class__} are passed to the logits processor.''' )
_lowercase : Optional[int] = processor(UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,**UpperCamelCase )
else:
_lowercase : Optional[Any] = processor(UpperCamelCase ,UpperCamelCase ,UpperCamelCase )
return scores
class __SCREAMING_SNAKE_CASE ( A_ ):
'''simple docstring'''
def __init__( self : Tuple ,UpperCamelCase : float ) -> Optional[Any]:
if not isinstance(UpperCamelCase ,UpperCamelCase ) or not (temperature > 0):
raise ValueError(F'''`temperature` has to be a strictly positive float, but is {temperature}''' )
_lowercase : Any = temperature
def __call__( self : List[Any] ,UpperCamelCase : jnp.ndarray ,UpperCamelCase : jnp.ndarray ,UpperCamelCase : int ) -> str:
_lowercase : Optional[int] = scores / self.temperature
return scores
class __SCREAMING_SNAKE_CASE ( A_ ):
'''simple docstring'''
def __init__( self : Dict ,UpperCamelCase : float ,UpperCamelCase : float = -float('Inf' ) ,UpperCamelCase : int = 1 ) -> Optional[int]:
if not isinstance(UpperCamelCase ,UpperCamelCase ) or (top_p < 0 or top_p > 1.0):
raise ValueError(F'''`top_p` has to be a float > 0 and < 1, but is {top_p}''' )
if not isinstance(UpperCamelCase ,UpperCamelCase ) or (min_tokens_to_keep < 1):
raise ValueError(F'''`min_tokens_to_keep` has to be a positive integer, but is {min_tokens_to_keep}''' )
_lowercase : Optional[Any] = top_p
_lowercase : Union[str, Any] = filter_value
_lowercase : Dict = min_tokens_to_keep
def __call__( self : Dict ,UpperCamelCase : jnp.ndarray ,UpperCamelCase : jnp.ndarray ,UpperCamelCase : int ) -> Tuple:
_lowercase : Tuple = lax.top_k(UpperCamelCase ,scores.shape[-1] )
_lowercase : Dict = jnp.full_like(UpperCamelCase ,self.filter_value )
_lowercase : str = jax.nn.softmax(UpperCamelCase ,axis=-1 ).cumsum(axis=-1 )
_lowercase : List[Any] = cumulative_probs < self.top_p
# include the token that is higher than top_p as well
_lowercase : Tuple = jnp.roll(UpperCamelCase ,1 )
score_mask |= score_mask.at[:, 0].set(UpperCamelCase )
# min tokens to keep
_lowercase : str = score_mask.at[:, : self.min_tokens_to_keep].set(UpperCamelCase )
_lowercase : Union[str, Any] = jnp.where(UpperCamelCase ,UpperCamelCase ,UpperCamelCase )
_lowercase : List[str] = jax.lax.sort_key_val(UpperCamelCase ,UpperCamelCase )[-1]
return next_scores
class __SCREAMING_SNAKE_CASE ( A_ ):
'''simple docstring'''
def __init__( self : Optional[int] ,UpperCamelCase : int ,UpperCamelCase : float = -float('Inf' ) ,UpperCamelCase : int = 1 ) -> List[Any]:
if not isinstance(UpperCamelCase ,UpperCamelCase ) or top_k <= 0:
raise ValueError(F'''`top_k` has to be a strictly positive integer, but is {top_k}''' )
_lowercase : Optional[Any] = max(UpperCamelCase ,UpperCamelCase )
_lowercase : Tuple = filter_value
def __call__( self : Tuple ,UpperCamelCase : jnp.ndarray ,UpperCamelCase : jnp.ndarray ,UpperCamelCase : int ) -> int:
_lowercase : int = scores.shape
_lowercase : str = jnp.full(batch_size * vocab_size ,self.filter_value )
_lowercase : Dict = min(self.top_k ,scores.shape[-1] ) # Safety check
_lowercase : Optional[Any] = lax.top_k(UpperCamelCase ,UpperCamelCase )
_lowercase : Union[str, Any] = jnp.broadcast_to((jnp.arange(UpperCamelCase ) * vocab_size)[:, None] ,(batch_size, topk) ).flatten()
_lowercase : Tuple = topk_scores.flatten()
_lowercase : Tuple = topk_indices.flatten() + shift
_lowercase : Optional[int] = next_scores_flat.at[topk_indices_flat].set(UpperCamelCase )
_lowercase : str = next_scores_flat.reshape(UpperCamelCase ,UpperCamelCase )
return next_scores
class __SCREAMING_SNAKE_CASE ( A_ ):
'''simple docstring'''
def __init__( self : Optional[int] ,UpperCamelCase : int ) -> Tuple:
_lowercase : Optional[int] = bos_token_id
def __call__( self : Tuple ,UpperCamelCase : jnp.ndarray ,UpperCamelCase : jnp.ndarray ,UpperCamelCase : int ) -> Optional[int]:
_lowercase : int = jnp.full(scores.shape ,-float('inf' ) )
_lowercase : Optional[int] = 1 - jnp.bool_(cur_len - 1 )
_lowercase : List[str] = jnp.where(UpperCamelCase ,new_scores.at[:, self.bos_token_id].set(0 ) ,UpperCamelCase )
return scores
class __SCREAMING_SNAKE_CASE ( A_ ):
'''simple docstring'''
def __init__( self : str ,UpperCamelCase : int ,UpperCamelCase : int ) -> List[str]:
_lowercase : Union[str, Any] = max_length
_lowercase : Union[str, Any] = eos_token_id
def __call__( self : str ,UpperCamelCase : jnp.ndarray ,UpperCamelCase : jnp.ndarray ,UpperCamelCase : int ) -> str:
_lowercase : Dict = jnp.full(scores.shape ,-float('inf' ) )
_lowercase : Union[str, Any] = 1 - jnp.bool_(cur_len - self.max_length + 1 )
_lowercase : Union[str, Any] = jnp.where(UpperCamelCase ,new_scores.at[:, self.eos_token_id].set(0 ) ,UpperCamelCase )
return scores
class __SCREAMING_SNAKE_CASE ( A_ ):
'''simple docstring'''
def __init__( self : Dict ,UpperCamelCase : int ,UpperCamelCase : int ) -> Any:
if not isinstance(UpperCamelCase ,UpperCamelCase ) or min_length < 0:
raise ValueError(F'''`min_length` has to be a positive integer, but is {min_length}''' )
if not isinstance(UpperCamelCase ,UpperCamelCase ) or eos_token_id < 0:
raise ValueError(F'''`eos_token_id` has to be a positive integer, but is {eos_token_id}''' )
_lowercase : Union[str, Any] = min_length
_lowercase : List[str] = eos_token_id
def __call__( self : List[str] ,UpperCamelCase : jnp.ndarray ,UpperCamelCase : jnp.ndarray ,UpperCamelCase : int ) -> Dict:
_lowercase : List[Any] = 1 - jnp.clip(cur_len - self.min_length ,0 ,1 )
_lowercase : Dict = jnp.where(UpperCamelCase ,scores.at[:, self.eos_token_id].set(-float('inf' ) ) ,UpperCamelCase )
return scores
class __SCREAMING_SNAKE_CASE ( A_ ):
'''simple docstring'''
def __init__( self : Optional[Any] ,UpperCamelCase : Optional[int] ,UpperCamelCase : Tuple ) -> Optional[int]:
_lowercase : str = list(UpperCamelCase )
_lowercase : Optional[Any] = begin_index
def __call__( self : Tuple ,UpperCamelCase : int ,UpperCamelCase : List[str] ,UpperCamelCase : int ) -> Optional[int]:
_lowercase : Tuple = 1 - jnp.bool_(cur_len - self.begin_index )
_lowercase : Optional[int] = jnp.where(UpperCamelCase ,scores.at[:, self.begin_suppress_tokens].set(-float('inf' ) ) ,UpperCamelCase )
return scores
class __SCREAMING_SNAKE_CASE ( A_ ):
'''simple docstring'''
def __init__( self : Union[str, Any] ,UpperCamelCase : list ) -> List[str]:
_lowercase : Union[str, Any] = list(UpperCamelCase )
def __call__( self : str ,UpperCamelCase : jnp.ndarray ,UpperCamelCase : jnp.ndarray ,UpperCamelCase : int ) -> List[Any]:
_lowercase : int = scores.at[..., self.suppress_tokens].set(-float('inf' ) )
return scores
class __SCREAMING_SNAKE_CASE ( A_ ):
'''simple docstring'''
def __init__( self : Optional[int] ,UpperCamelCase : str ) -> int:
_lowercase : Tuple = dict(UpperCamelCase )
# Converts the dictionary of format {index: token} containing the tokens to be forced to an array, where the
# index of the array corresponds to the index of the token to be forced, for XLA compatibility.
# Indexes without forced tokens will have a negative value.
_lowercase : Optional[int] = jnp.ones((max(force_token_map.keys() ) + 1) ,dtype=jnp.intaa ) * -1
for index, token in force_token_map.items():
if token is not None:
_lowercase : List[str] = force_token_array.at[index].set(UpperCamelCase )
_lowercase : List[str] = jnp.intaa(UpperCamelCase )
def __call__( self : Union[str, Any] ,UpperCamelCase : jnp.ndarray ,UpperCamelCase : jnp.ndarray ,UpperCamelCase : int ) -> Optional[int]:
def _force_token(UpperCamelCase : int ):
_lowercase : Optional[int] = scores.shape[0]
_lowercase : int = self.force_token_array[generation_idx]
_lowercase : Tuple = jnp.ones_like(UpperCamelCase ,dtype=scores.dtype ) * -float('inf' )
_lowercase : Optional[int] = jnp.zeros((batch_size, 1) ,dtype=scores.dtype )
_lowercase : Any = lax.dynamic_update_slice(UpperCamelCase ,UpperCamelCase ,(0, current_token) )
return new_scores
_lowercase : List[Any] = lax.cond(
cur_len >= self.force_token_array.shape[0] ,lambda: scores ,lambda: lax.cond(
self.force_token_array[cur_len] >= 0 ,lambda: _force_token(UpperCamelCase ) ,lambda: scores ,) ,)
return scores
class __SCREAMING_SNAKE_CASE ( A_ ):
'''simple docstring'''
def __init__( self : Optional[int] ,UpperCamelCase : List[Any] ,UpperCamelCase : Union[str, Any] ,UpperCamelCase : Tuple ) -> int:
_lowercase : Optional[int] = generate_config.eos_token_id
_lowercase : int = generate_config.no_timestamps_token_id
_lowercase : Tuple = generate_config.no_timestamps_token_id + 1
_lowercase : Dict = decoder_input_length + 1
if generate_config.is_multilingual:
# room for language token and task token
self.begin_index += 2
if hasattr(UpperCamelCase ,'max_initial_timestamp_index' ):
_lowercase : List[str] = generate_config.max_initial_timestamp_index
else:
_lowercase : int = model_config.vocab_size
if self.max_initial_timestamp_index is None:
_lowercase : int = model_config.vocab_size
def __call__( self : Tuple ,UpperCamelCase : Union[str, Any] ,UpperCamelCase : Dict ,UpperCamelCase : List[str] ) -> Optional[Any]:
_lowercase : Dict = scores.at[:, self.no_timestamps_token_id].set(-float('inf' ) )
def handle_pairs(UpperCamelCase : Union[str, Any] ,UpperCamelCase : Dict ):
_lowercase : Any = jnp.where((cur_len - self.begin_index) >= 1 ,UpperCamelCase ,UpperCamelCase )
_lowercase : List[Any] = jnp.where(
input_ids_k[cur_len - 1] >= self.timestamp_begin ,True and last_was_timestamp ,UpperCamelCase ,)
_lowercase : int = jnp.where((cur_len - self.begin_index) < 2 ,UpperCamelCase ,UpperCamelCase )
_lowercase : List[str] = jnp.where(
input_ids_k[cur_len - 2] >= self.timestamp_begin ,UpperCamelCase ,UpperCamelCase ,)
return jnp.where(
UpperCamelCase ,jnp.where(
penultimate_was_timestamp > 0 ,scores_k.at[self.timestamp_begin :].set(-float('inf' ) ) ,scores_k.at[: self.eos_token_id].set(-float('inf' ) ) ,) ,UpperCamelCase ,)
_lowercase : Any = jax.vmap(UpperCamelCase )(UpperCamelCase ,UpperCamelCase )
_lowercase : str = jnp.where(cur_len == self.begin_index ,UpperCamelCase ,UpperCamelCase )
_lowercase : Tuple = jnp.where(
self.max_initial_timestamp_index is not None ,True and apply_max_initial_timestamp ,UpperCamelCase ,)
_lowercase : List[Any] = self.timestamp_begin + self.max_initial_timestamp_index
_lowercase : Optional[int] = jnp.where(
UpperCamelCase ,scores.at[:, last_allowed + 1 :].set(-float('inf' ) ) ,UpperCamelCase ,)
# if sum of probability over timestamps is above any other token, sample timestamp
_lowercase : Dict = jax.nn.log_softmax(UpperCamelCase ,axis=-1 )
def handle_cumulative_probs(UpperCamelCase : Dict ,UpperCamelCase : Tuple ):
_lowercase : Tuple = jax.nn.logsumexp(logprobs_k[self.timestamp_begin :] ,axis=-1 )
_lowercase : Tuple = jnp.max(logprobs_k[: self.timestamp_begin] )
return jnp.where(
timestamp_logprob > max_text_token_logprob ,scores_k.at[: self.timestamp_begin].set(-float('inf' ) ) ,UpperCamelCase ,)
_lowercase : Any = jax.vmap(UpperCamelCase )(UpperCamelCase ,UpperCamelCase )
return scores
| 125
|
import os
import pickle
import unittest
from transformers import AutoTokenizer
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.models.bert_japanese.tokenization_bert_japanese import (
VOCAB_FILES_NAMES,
BertJapaneseTokenizer,
CharacterTokenizer,
JumanppTokenizer,
MecabTokenizer,
SudachiTokenizer,
WordpieceTokenizer,
)
from transformers.testing_utils import custom_tokenizers, require_jumanpp, require_sudachi
from ...test_tokenization_common import TokenizerTesterMixin
@custom_tokenizers
class UpperCAmelCase__ ( A_ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase_ = BertJapaneseTokenizer
UpperCAmelCase_ = False
UpperCAmelCase_ = True
def lowerCAmelCase_ ( self : int ):
"""simple docstring"""
super().setUp()
_lowercase : Dict = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''こんにちは''',
'''こん''',
'''にちは''',
'''ばんは''',
'''##こん''',
'''##にちは''',
'''##ばんは''',
'''世界''',
'''##世界''',
'''、''',
'''##、''',
'''。''',
'''##。''',
]
_lowercase : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def lowerCAmelCase_ ( self : Dict , UpperCamelCase : Dict ):
"""simple docstring"""
_lowercase : List[str] = '''こんにちは、世界。 \nこんばんは、世界。'''
_lowercase : List[str] = '''こんにちは 、 世界 。 こんばんは 、 世界 。'''
return input_text, output_text
def lowerCAmelCase_ ( self : Optional[int] , UpperCamelCase : Tuple ):
"""simple docstring"""
_lowercase , _lowercase : Optional[Any] = self.get_input_output_texts(UpperCamelCase )
_lowercase : Union[str, Any] = tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase )
_lowercase : Tuple = tokenizer.decode(UpperCamelCase , clean_up_tokenization_spaces=UpperCamelCase )
return text, ids
def lowerCAmelCase_ ( self : List[str] ):
"""simple docstring"""
pass # TODO add if relevant
def lowerCAmelCase_ ( self : str ):
"""simple docstring"""
pass # TODO add if relevant
def lowerCAmelCase_ ( self : List[str] ):
"""simple docstring"""
pass # TODO add if relevant
def lowerCAmelCase_ ( self : Tuple ):
"""simple docstring"""
_lowercase : str = self.tokenizer_class(self.vocab_file )
_lowercase : List[str] = tokenizer.tokenize('''こんにちは、世界。\nこんばんは、世界。''' )
self.assertListEqual(UpperCamelCase , ['''こんにちは''', '''、''', '''世界''', '''。''', '''こん''', '''##ばんは''', '''、''', '''世界''', '''。'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
def lowerCAmelCase_ ( self : Optional[Any] ):
"""simple docstring"""
_lowercase : Optional[int] = self.tokenizer_class(self.vocab_file , word_tokenizer_type='''mecab''' )
self.assertIsNotNone(UpperCamelCase )
_lowercase : Any = '''こんにちは、世界。\nこんばんは、世界。'''
_lowercase : List[Any] = tokenizer.tokenize(UpperCamelCase )
self.assertListEqual(UpperCamelCase , ['''こんにちは''', '''、''', '''世界''', '''。''', '''こん''', '''##ばんは''', '''、''', '''世界''', '''。'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
_lowercase : Any = os.path.join(self.tmpdirname , '''tokenizer.bin''' )
with open(UpperCamelCase , '''wb''' ) as handle:
pickle.dump(UpperCamelCase , UpperCamelCase )
with open(UpperCamelCase , '''rb''' ) as handle:
_lowercase : int = pickle.load(UpperCamelCase )
_lowercase : Tuple = tokenizer_new.tokenize(UpperCamelCase )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
def lowerCAmelCase_ ( self : List[str] ):
"""simple docstring"""
_lowercase : str = MecabTokenizer(mecab_dic='''ipadic''' )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップルストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''] , )
def lowerCAmelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
try:
_lowercase : List[str] = MecabTokenizer(mecab_dic='''unidic_lite''' )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''] , )
def lowerCAmelCase_ ( self : Optional[Any] ):
"""simple docstring"""
try:
_lowercase : Any = MecabTokenizer(mecab_dic='''unidic''' )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''] , )
def lowerCAmelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
_lowercase : Tuple = MecabTokenizer(do_lower_case=UpperCamelCase , mecab_dic='''ipadic''' )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップルストア''', '''で''', '''iphone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''] , )
def lowerCAmelCase_ ( self : int ):
"""simple docstring"""
try:
_lowercase : Tuple = MecabTokenizer(
do_lower_case=UpperCamelCase , normalize_text=UpperCamelCase , mecab_option='''-d /usr/local/lib/mecab/dic/jumandic''' )
except RuntimeError:
# if dict doesn't exist in the system, previous code raises this error.
return
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップルストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れた''', '''\u3000''', '''。'''] , )
def lowerCAmelCase_ ( self : Dict ):
"""simple docstring"""
_lowercase : Dict = MecabTokenizer(normalize_text=UpperCamelCase , mecab_dic='''ipadic''' )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップルストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', ''' ''', '''。'''] , )
@require_sudachi
def lowerCAmelCase_ ( self : List[Any] ):
"""simple docstring"""
_lowercase : Any = self.tokenizer_class(self.vocab_file , word_tokenizer_type='''sudachi''' )
self.assertIsNotNone(UpperCamelCase )
_lowercase : Dict = '''こんにちは、世界。\nこんばんは、世界。'''
_lowercase : Dict = tokenizer.tokenize(UpperCamelCase )
self.assertListEqual(UpperCamelCase , ['''こんにちは''', '''、''', '''世界''', '''。''', '''こん''', '''##ばんは''', '''、''', '''世界''', '''。'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
_lowercase : Union[str, Any] = os.path.join(self.tmpdirname , '''tokenizer.bin''' )
with open(UpperCamelCase , '''wb''' ) as handle:
pickle.dump(UpperCamelCase , UpperCamelCase )
with open(UpperCamelCase , '''rb''' ) as handle:
_lowercase : Tuple = pickle.load(UpperCamelCase )
_lowercase : Tuple = tokenizer_new.tokenize(UpperCamelCase )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
@require_sudachi
def lowerCAmelCase_ ( self : Dict ):
"""simple docstring"""
_lowercase : Dict = SudachiTokenizer(sudachi_dict_type='''core''' )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , [''' ''', '''\t''', '''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', ''' ''', '''が''', ''' ''', ''' ''', '''\n ''', '''発売''', '''さ''', '''れ''', '''た''', ''' ''', '''。''', ''' ''', ''' '''] , )
@require_sudachi
def lowerCAmelCase_ ( self : Optional[Any] ):
"""simple docstring"""
_lowercase : List[str] = SudachiTokenizer(sudachi_dict_type='''core''' , sudachi_split_mode='''A''' )
self.assertListEqual(tokenizer.tokenize('''外国人参政権''' ) , ['''外国''', '''人''', '''参政''', '''権'''] )
@require_sudachi
def lowerCAmelCase_ ( self : Tuple ):
"""simple docstring"""
_lowercase : Dict = SudachiTokenizer(sudachi_dict_type='''core''' , sudachi_split_mode='''B''' )
self.assertListEqual(tokenizer.tokenize('''外国人参政権''' ) , ['''外国人''', '''参政権'''] )
@require_sudachi
def lowerCAmelCase_ ( self : Dict ):
"""simple docstring"""
_lowercase : List[str] = SudachiTokenizer(sudachi_dict_type='''core''' , sudachi_split_mode='''C''' )
self.assertListEqual(tokenizer.tokenize('''外国人参政権''' ) , ['''外国人参政権'''] )
@require_sudachi
def lowerCAmelCase_ ( self : List[Any] ):
"""simple docstring"""
_lowercase : str = SudachiTokenizer(do_lower_case=UpperCamelCase , sudachi_dict_type='''core''' )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , [''' ''', '''\t''', '''アップル''', '''ストア''', '''で''', '''iphone''', '''8''', ''' ''', '''が''', ''' ''', ''' ''', '''\n ''', '''発売''', '''さ''', '''れ''', '''た''', ''' ''', '''。''', ''' ''', ''' '''] , )
@require_sudachi
def lowerCAmelCase_ ( self : Tuple ):
"""simple docstring"""
_lowercase : Optional[Any] = SudachiTokenizer(normalize_text=UpperCamelCase , sudachi_dict_type='''core''' )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , [''' ''', '''\t''', '''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', ''' ''', '''が''', ''' ''', ''' ''', '''\n ''', '''発売''', '''さ''', '''れ''', '''た''', '''\u3000''', '''。''', ''' ''', ''' '''] , )
@require_sudachi
def lowerCAmelCase_ ( self : Dict ):
"""simple docstring"""
_lowercase : Optional[Any] = SudachiTokenizer(trim_whitespace=UpperCamelCase , sudachi_dict_type='''core''' )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''] , )
@require_jumanpp
def lowerCAmelCase_ ( self : Any ):
"""simple docstring"""
_lowercase : List[Any] = self.tokenizer_class(self.vocab_file , word_tokenizer_type='''jumanpp''' )
self.assertIsNotNone(UpperCamelCase )
_lowercase : Optional[int] = '''こんにちは、世界。\nこんばんは、世界。'''
_lowercase : str = tokenizer.tokenize(UpperCamelCase )
self.assertListEqual(UpperCamelCase , ['''こんにちは''', '''、''', '''世界''', '''。''', '''こん''', '''##ばんは''', '''、''', '''世界''', '''。'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
_lowercase : Tuple = os.path.join(self.tmpdirname , '''tokenizer.bin''' )
with open(UpperCamelCase , '''wb''' ) as handle:
pickle.dump(UpperCamelCase , UpperCamelCase )
with open(UpperCamelCase , '''rb''' ) as handle:
_lowercase : List[Any] = pickle.load(UpperCamelCase )
_lowercase : Any = tokenizer_new.tokenize(UpperCamelCase )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
@require_jumanpp
def lowerCAmelCase_ ( self : Optional[Any] ):
"""simple docstring"""
_lowercase : Optional[Any] = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''\u3000''', '''が''', '''\u3000''', '''\u3000''', '''\u3000''', '''発売''', '''さ''', '''れた''', '''\u3000''', '''。'''] , )
@require_jumanpp
def lowerCAmelCase_ ( self : List[Any] ):
"""simple docstring"""
_lowercase : Dict = JumanppTokenizer(do_lower_case=UpperCamelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップル''', '''ストア''', '''で''', '''iphone''', '''8''', '''\u3000''', '''が''', '''\u3000''', '''\u3000''', '''\u3000''', '''発売''', '''さ''', '''れた''', '''\u3000''', '''。'''] , )
@require_jumanpp
def lowerCAmelCase_ ( self : Dict ):
"""simple docstring"""
_lowercase : Dict = JumanppTokenizer(normalize_text=UpperCamelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''ア''', '''ッ''', '''フ''', '''゚''', '''ル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''\u3000''', '''が''', '''\u3000''', '''\u3000''', '''\u3000''', '''発売''', '''さ''', '''れた''', '''\u3000''', '''。'''] , )
@require_jumanpp
def lowerCAmelCase_ ( self : List[Any] ):
"""simple docstring"""
_lowercase : Optional[int] = JumanppTokenizer(trim_whitespace=UpperCamelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れた''', '''。'''] , )
@require_jumanpp
def lowerCAmelCase_ ( self : Optional[int] ):
"""simple docstring"""
_lowercase : str = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize('''ありがとうございますm(_ _)m見つけるのが大変です。''' ) , ['''ありがとう''', '''ございます''', '''m(_ _)m''', '''見つける''', '''の''', '''が''', '''大変です''', '''。'''] , )
def lowerCAmelCase_ ( self : Dict ):
"""simple docstring"""
_lowercase : Tuple = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''こんにちは''', '''こん''', '''にちは''', '''ばんは''', '''##こん''', '''##にちは''', '''##ばんは''']
_lowercase : int = {}
for i, token in enumerate(UpperCamelCase ):
_lowercase : Optional[Any] = i
_lowercase : str = WordpieceTokenizer(vocab=UpperCamelCase , unk_token='''[UNK]''' )
self.assertListEqual(tokenizer.tokenize('''''' ) , [] )
self.assertListEqual(tokenizer.tokenize('''こんにちは''' ) , ['''こんにちは'''] )
self.assertListEqual(tokenizer.tokenize('''こんばんは''' ) , ['''こん''', '''##ばんは'''] )
self.assertListEqual(tokenizer.tokenize('''こんばんは こんばんにちは こんにちは''' ) , ['''こん''', '''##ばんは''', '''[UNK]''', '''こんにちは'''] )
def lowerCAmelCase_ ( self : Optional[Any] ):
"""simple docstring"""
_lowercase : List[Any] = BertJapaneseTokenizer.from_pretrained('''nlp-waseda/roberta-base-japanese-with-auto-jumanpp''' )
_lowercase : Dict = tokenizer.subword_tokenizer
_lowercase : Dict = subword_tokenizer.tokenize('''国境 の 長い トンネル を 抜ける と 雪国 であった 。''' )
self.assertListEqual(UpperCamelCase , ['''▁国境''', '''▁の''', '''▁長い''', '''▁トンネル''', '''▁を''', '''▁抜ける''', '''▁と''', '''▁雪''', '''国''', '''▁であった''', '''▁。'''] )
_lowercase : int = subword_tokenizer.tokenize('''こんばんは こんばん にち は こんにちは''' )
self.assertListEqual(UpperCamelCase , ['''▁こん''', '''ばん''', '''は''', '''▁こん''', '''ばん''', '''▁に''', '''ち''', '''▁は''', '''▁こんにちは'''] )
def lowerCAmelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
_lowercase : str = self.tokenizer_class.from_pretrained('''cl-tohoku/bert-base-japanese''' )
_lowercase : str = tokenizer.encode('''ありがとう。''' , add_special_tokens=UpperCamelCase )
_lowercase : List[Any] = tokenizer.encode('''どういたしまして。''' , add_special_tokens=UpperCamelCase )
_lowercase : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(UpperCamelCase )
_lowercase : Optional[Any] = tokenizer.build_inputs_with_special_tokens(UpperCamelCase , UpperCamelCase )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class UpperCAmelCase__ ( A_ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase_ = BertJapaneseTokenizer
UpperCAmelCase_ = False
def lowerCAmelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
super().setUp()
_lowercase : Dict = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''こ''', '''ん''', '''に''', '''ち''', '''は''', '''ば''', '''世''', '''界''', '''、''', '''。''']
_lowercase : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def lowerCAmelCase_ ( self : Optional[int] , **UpperCamelCase : Optional[int] ):
"""simple docstring"""
return BertJapaneseTokenizer.from_pretrained(self.tmpdirname , subword_tokenizer_type='''character''' , **UpperCamelCase )
def lowerCAmelCase_ ( self : List[str] , UpperCamelCase : List[Any] ):
"""simple docstring"""
_lowercase : Tuple = '''こんにちは、世界。 \nこんばんは、世界。'''
_lowercase : List[str] = '''こ ん に ち は 、 世 界 。 こ ん ば ん は 、 世 界 。'''
return input_text, output_text
def lowerCAmelCase_ ( self : str ):
"""simple docstring"""
pass # TODO add if relevant
def lowerCAmelCase_ ( self : Optional[Any] ):
"""simple docstring"""
pass # TODO add if relevant
def lowerCAmelCase_ ( self : Optional[Any] ):
"""simple docstring"""
pass # TODO add if relevant
def lowerCAmelCase_ ( self : str ):
"""simple docstring"""
_lowercase : List[Any] = self.tokenizer_class(self.vocab_file , subword_tokenizer_type='''character''' )
_lowercase : List[Any] = tokenizer.tokenize('''こんにちは、世界。 \nこんばんは、世界。''' )
self.assertListEqual(
UpperCamelCase , ['''こ''', '''ん''', '''に''', '''ち''', '''は''', '''、''', '''世''', '''界''', '''。''', '''こ''', '''ん''', '''ば''', '''ん''', '''は''', '''、''', '''世''', '''界''', '''。'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(UpperCamelCase ) , [3, 4, 5, 6, 7, 11, 9, 10, 12, 3, 4, 8, 4, 7, 11, 9, 10, 12] )
def lowerCAmelCase_ ( self : int ):
"""simple docstring"""
_lowercase : List[Any] = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''こ''', '''ん''', '''に''', '''ち''', '''は''', '''ば''', '''世''', '''界''', '''、''', '''。''']
_lowercase : int = {}
for i, token in enumerate(UpperCamelCase ):
_lowercase : Dict = i
_lowercase : List[str] = CharacterTokenizer(vocab=UpperCamelCase , unk_token='''[UNK]''' )
self.assertListEqual(tokenizer.tokenize('''''' ) , [] )
self.assertListEqual(tokenizer.tokenize('''こんにちは''' ) , ['''こ''', '''ん''', '''に''', '''ち''', '''は'''] )
self.assertListEqual(tokenizer.tokenize('''こんにちほ''' ) , ['''こ''', '''ん''', '''に''', '''ち''', '''[UNK]'''] )
def lowerCAmelCase_ ( self : str ):
"""simple docstring"""
_lowercase : List[Any] = self.tokenizer_class.from_pretrained('''cl-tohoku/bert-base-japanese-char''' )
_lowercase : Tuple = tokenizer.encode('''ありがとう。''' , add_special_tokens=UpperCamelCase )
_lowercase : Optional[Any] = tokenizer.encode('''どういたしまして。''' , add_special_tokens=UpperCamelCase )
_lowercase : List[Any] = tokenizer.build_inputs_with_special_tokens(UpperCamelCase )
_lowercase : Optional[Any] = tokenizer.build_inputs_with_special_tokens(UpperCamelCase , UpperCamelCase )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class UpperCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase_ ( self : int ):
"""simple docstring"""
_lowercase : int = '''cl-tohoku/bert-base-japanese'''
_lowercase : Any = AutoTokenizer.from_pretrained(UpperCamelCase )
self.assertIsInstance(UpperCamelCase , UpperCamelCase )
class UpperCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
_lowercase : Any = '''cl-tohoku/bert-base-japanese'''
with self.assertLogs('''transformers''' , level='''WARNING''' ) as cm:
BertTokenizer.from_pretrained(UpperCamelCase )
self.assertTrue(
cm.records[0].message.startswith(
'''The tokenizer class you load from this checkpoint is not the same type as the class this function'''
''' is called from.''' ) )
_lowercase : Any = '''bert-base-cased'''
with self.assertLogs('''transformers''' , level='''WARNING''' ) as cm:
BertJapaneseTokenizer.from_pretrained(UpperCamelCase )
self.assertTrue(
cm.records[0].message.startswith(
'''The tokenizer class you load from this checkpoint is not the same type as the class this function'''
''' is called from.''' ) )
| 322
| 0
|
'''simple docstring'''
def _lowerCamelCase ( lowerCamelCase_ : int = 4000000 ):
"""simple docstring"""
UpperCAmelCase_ : str = [0, 1]
UpperCAmelCase_ : Optional[int] = 0
while fib[i] <= n:
fib.append(fib[i] + fib[i + 1] )
if fib[i + 2] > n:
break
i += 1
UpperCAmelCase_ : Dict = 0
for j in range(len(lowerCamelCase_ ) - 1 ):
if fib[j] % 2 == 0:
total += fib[j]
return total
if __name__ == "__main__":
print(f'''{solution() = }''')
| 705
|
'''simple docstring'''
def _lowerCamelCase ( lowerCamelCase_ : Union[str, Any] ):
"""simple docstring"""
if collection == []:
return []
# get some information about the collection
UpperCAmelCase_ : str = len(lowerCamelCase_ )
UpperCAmelCase_ : int = max(lowerCamelCase_ )
UpperCAmelCase_ : int = min(lowerCamelCase_ )
# create the counting array
UpperCAmelCase_ : List[str] = coll_max + 1 - coll_min
UpperCAmelCase_ : List[Any] = [0] * counting_arr_length
# count how much a number appears in the collection
for number in collection:
counting_arr[number - coll_min] += 1
# sum each position with it's predecessors. now, counting_arr[i] tells
# us how many elements <= i has in the collection
for i in range(1 , lowerCamelCase_ ):
UpperCAmelCase_ : Optional[int] = counting_arr[i] + counting_arr[i - 1]
# create the output collection
UpperCAmelCase_ : str = [0] * coll_len
# place the elements in the output, respecting the original order (stable
# sort) from end to begin, updating counting_arr
for i in reversed(range(0 , lowerCamelCase_ ) ):
UpperCAmelCase_ : Optional[int] = collection[i]
counting_arr[collection[i] - coll_min] -= 1
return ordered
def _lowerCamelCase ( lowerCamelCase_ : Any ):
"""simple docstring"""
return "".join([chr(lowerCamelCase_ ) for i in counting_sort([ord(lowerCamelCase_ ) for c in string] )] )
if __name__ == "__main__":
# Test string sort
assert counting_sort_string('''thisisthestring''') == "eghhiiinrsssttt"
snake_case__ : Any = input('''Enter numbers separated by a comma:\n''').strip()
snake_case__ : int = [int(item) for item in user_input.split(''',''')]
print(counting_sort(unsorted))
| 389
| 0
|
'''simple docstring'''
import json
import os
from datetime import date
from pathlib import Path
from tabulate import DataRow, TableFormat, tabulate
__lowerCAmelCase = TableFormat(
lineabove=None,
linebelowheader=None,
linebetweenrows=None,
linebelow=None,
headerrow=DataRow("""""", """|""", """|"""),
datarow=DataRow("""""", """|""", """|"""),
padding=1,
with_header_hide=None,
)
__lowerCAmelCase = []
__lowerCAmelCase = []
__lowerCAmelCase = {"""type""": """section""", """text""": {"""type""": """plain_text""", """text""": """No failed tests! 🤗""", """emoji""": True}}
__lowerCAmelCase = [
{
"""type""": """header""",
"""text""": {
"""type""": """plain_text""",
"""text""": f'''🤗 Accelerate nightly {os.environ.get('TEST_TYPE', '')} test results''',
"""emoji""": True,
},
}
]
__lowerCAmelCase = 0
for log in Path().glob("""*.log"""):
__lowerCAmelCase = 0
with open(log, """r""") as f:
for line in f:
__lowerCAmelCase = json.loads(line)
if line.get("""nodeid""", """""") != "":
__lowerCAmelCase = line["""nodeid"""]
if line.get("""duration""", None) is not None:
__lowerCAmelCase = f'''{line['duration']:.4f}'''
if line.get("""outcome""", """""") == "failed":
section_num_failed += 1
failed.append([test, duration, log.name.split("""_""")[0]])
total_num_failed += 1
group_info.append([str(log), section_num_failed, failed])
__lowerCAmelCase = []
log.unlink()
__lowerCAmelCase = """"""
__lowerCAmelCase = []
if total_num_failed > 0:
for name, num_failed, failed_tests in group_info:
if num_failed > 0:
if num_failed == 1:
message += f"*{name[1:]}: {num_failed} failed test*\n"
else:
message += f"*{name[1:]}: {num_failed} failed tests*\n"
__lowerCAmelCase = []
__lowerCAmelCase = {}
for test in failed_tests:
__lowerCAmelCase = test[0].split("""::""")
__lowerCAmelCase = data[0].split("""/""")[-1]
if data[0] not in filesafailed:
__lowerCAmelCase = [data[1:]]
else:
filesafailed[data[0]] += [data[1:]]
failed_table.append(data)
__lowerCAmelCase = [test[0] for test in failed_table]
__lowerCAmelCase = list(set(files))
# Count number of instances in failed_tests
__lowerCAmelCase = []
for file in individual_files:
table.append([file, len(filesafailed[file])])
__lowerCAmelCase = tabulate(
table,
headers=["""Test Location""", """Num Failed"""],
tablefmt=hf_table_format,
stralign="""right""",
)
message += f"\n```\n{failed_table}\n```"
all_filesafailed.append(filesafailed)
if len(message) > 3_0_0_0:
__lowerCAmelCase = """Too many failed tests, please see the full report in the Action results."""
__lowerCAmelCase = len(err) + 1_0
__lowerCAmelCase = message[: 3_0_0_0 - offset] + f'''\n...\n```\n{err}'''
print(f'''### {message}''')
else:
__lowerCAmelCase = """No failed tests! 🤗"""
print(f'''## {message}''')
payload.append(no_error_payload)
if os.environ.get("""TEST_TYPE""", """""") != "":
from slack_sdk import WebClient
__lowerCAmelCase = WebClient(token=os.environ["""SLACK_API_TOKEN"""])
if message != "No failed tests! 🤗":
__lowerCAmelCase = {
"""type""": """section""",
"""text""": {
"""type""": """mrkdwn""",
"""text""": message,
},
}
payload.append(md_report)
__lowerCAmelCase = {
"""type""": """section""",
"""text""": {
"""type""": """mrkdwn""",
"""text""": """*For more details:*""",
},
"""accessory""": {
"""type""": """button""",
"""text""": {
"""type""": """plain_text""",
"""text""": """Check Action results""",
"""emoji""": True,
},
"""url""": f'''https://github.com/{os.environ['GITHUB_REPOSITORY']}/actions/runs/{os.environ['GITHUB_RUN_ID']}''',
},
}
payload.append(action_button)
__lowerCAmelCase = {
"""type""": """context""",
"""elements""": [
{
"""type""": """plain_text""",
"""text""": f'''Nightly {os.environ.get('TEST_TYPE')} test results for {date.today()}''',
}
],
}
payload.append(date_report)
__lowerCAmelCase = client.chat_postMessage(channel="""#accelerate-ci-daily""", text=message, blocks=payload)
__lowerCAmelCase = response.data["""ts"""]
for failed_file in all_filesafailed:
for test_location, test_failures in failed_file.items():
# Keep only the first instance of the test name
__lowerCAmelCase = """"""
for i, row in enumerate(test_failures):
if row[0] != test_class:
__lowerCAmelCase = row[0]
else:
__lowerCAmelCase = """"""
__lowerCAmelCase = {
"""type""": """section""",
"""text""": {
"""type""": """mrkdwn""",
"""text""": f'''Test location: {test_location}\n```\n{tabulate(test_failures, headers=['Class', 'Test'], tablefmt=hf_table_format, stralign='right')}\n```''',
},
}
client.chat_postMessage(
channel="""#accelerate-ci-daily""",
thread_ts=ts,
blocks=[payload],
)
| 229
|
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import TensorType, logging
if TYPE_CHECKING:
from ...onnx.config import PatchingSpec
from ...tokenization_utils_base import PreTrainedTokenizerBase
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {
"""allenai/longformer-base-4096""": """https://huggingface.co/allenai/longformer-base-4096/resolve/main/config.json""",
"""allenai/longformer-large-4096""": """https://huggingface.co/allenai/longformer-large-4096/resolve/main/config.json""",
"""allenai/longformer-large-4096-finetuned-triviaqa""": (
"""https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/config.json"""
),
"""allenai/longformer-base-4096-extra.pos.embd.only""": (
"""https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/config.json"""
),
"""allenai/longformer-large-4096-extra.pos.embd.only""": (
"""https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/config.json"""
),
}
class UpperCAmelCase__ ( lowercase__ ):
"""simple docstring"""
__UpperCAmelCase : List[str] = '''longformer'''
def __init__( self : Optional[Any] ,_a : Union[List[int], int] = 512 ,_a : int = 2 ,_a : int = 1 ,_a : int = 0 ,_a : int = 2 ,_a : int = 3_0522 ,_a : int = 768 ,_a : int = 12 ,_a : int = 12 ,_a : int = 3072 ,_a : str = "gelu" ,_a : float = 0.1 ,_a : float = 0.1 ,_a : int = 512 ,_a : int = 2 ,_a : float = 0.02 ,_a : float = 1E-12 ,_a : bool = False ,**_a : Union[str, Any] ,):
'''simple docstring'''
super().__init__(pad_token_id=_a ,**_a )
_a : Tuple = attention_window
_a : int = sep_token_id
_a : Dict = bos_token_id
_a : str = eos_token_id
_a : Tuple = vocab_size
_a : Tuple = hidden_size
_a : Union[str, Any] = num_hidden_layers
_a : List[str] = num_attention_heads
_a : int = hidden_act
_a : List[str] = intermediate_size
_a : Any = hidden_dropout_prob
_a : Any = attention_probs_dropout_prob
_a : Optional[int] = max_position_embeddings
_a : Any = type_vocab_size
_a : List[Any] = initializer_range
_a : Tuple = layer_norm_eps
_a : Optional[int] = onnx_export
class UpperCAmelCase__ ( lowercase__ ):
"""simple docstring"""
def __init__( self : Optional[int] ,_a : "PretrainedConfig" ,_a : str = "default" ,_a : "List[PatchingSpec]" = None ):
'''simple docstring'''
super().__init__(_a ,_a ,_a )
_a : int = True
@property
def __lowercase ( self : List[Any] ):
'''simple docstring'''
if self.task == "multiple-choice":
_a : Union[str, Any] = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
_a : str = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('global_attention_mask', dynamic_axis),
] )
@property
def __lowercase ( self : List[Any] ):
'''simple docstring'''
_a : List[str] = super().outputs
if self.task == "default":
_a : List[str] = {0: 'batch'}
return outputs
@property
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
return 1E-4
@property
def __lowercase ( self : List[str] ):
'''simple docstring'''
return max(super().default_onnx_opset ,14 )
def __lowercase ( self : int ,_a : "PreTrainedTokenizerBase" ,_a : int = -1 ,_a : int = -1 ,_a : bool = False ,_a : Optional[TensorType] = None ,):
'''simple docstring'''
_a : Union[str, Any] = super().generate_dummy_inputs(
preprocessor=_a ,batch_size=_a ,seq_length=_a ,is_pair=_a ,framework=_a )
import torch
# for some reason, replacing this code by inputs["global_attention_mask"] = torch.randint(2, inputs["input_ids"].shape, dtype=torch.int64)
# makes the export fail randomly
_a : Optional[int] = torch.zeros_like(inputs['input_ids'] )
# make every second token global
_a : List[str] = 1
return inputs
| 229
| 1
|
"""simple docstring"""
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class lowercase_ ( __lowerCAmelCase , __lowerCAmelCase ):
'''simple docstring'''
@register_to_config
def __init__( self : str , *,
_UpperCAmelCase : int = 4 , _UpperCAmelCase : int = 768 , _UpperCAmelCase : int , _UpperCAmelCase : List[str] , ):
super().__init__()
_A = nn.Parameter(torch.zeros(_UpperCAmelCase ) )
# parameters for additional clip time embeddings
_A = nn.Linear(_UpperCAmelCase , _UpperCAmelCase )
_A = nn.Linear(_UpperCAmelCase , _UpperCAmelCase )
# parameters for encoder hidden states
_A = clip_extra_context_tokens
_A = nn.Linear(
_UpperCAmelCase , self.clip_extra_context_tokens * cross_attention_dim )
_A = nn.Linear(_UpperCAmelCase , _UpperCAmelCase )
_A = nn.LayerNorm(_UpperCAmelCase )
def lowerCAmelCase_ ( self : int , *, _UpperCAmelCase : str , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : int , _UpperCAmelCase : str ):
if do_classifier_free_guidance:
# Add the classifier free guidance embeddings to the image embeddings
_A = image_embeddings.shape[0]
_A = self.learned_classifier_free_guidance_embeddings.unsqueeze(0 )
_A = classifier_free_guidance_embeddings.expand(
_UpperCAmelCase , -1 )
_A = torch.cat([classifier_free_guidance_embeddings, image_embeddings] , dim=0 )
# The image embeddings batch size and the text embeddings batch size are equal
assert image_embeddings.shape[0] == prompt_embeds.shape[0]
_A = prompt_embeds.shape[0]
# "Specifically, we modify the architecture described in Nichol et al. (2021) by projecting and
# adding CLIP embeddings to the existing timestep embedding, ...
_A = self.embedding_proj(_UpperCAmelCase )
_A = self.clip_image_embeddings_project_to_time_embeddings(_UpperCAmelCase )
_A = time_projected_image_embeddings + time_projected_prompt_embeds
# ... and by projecting CLIP embeddings into four
# extra tokens of context that are concatenated to the sequence of outputs from the GLIDE text encoder"
_A = self.clip_extra_context_tokens_proj(_UpperCAmelCase )
_A = clip_extra_context_tokens.reshape(_UpperCAmelCase , -1 , self.clip_extra_context_tokens )
_A = clip_extra_context_tokens.permute(0 , 2 , 1 )
_A = self.encoder_hidden_states_proj(_UpperCAmelCase )
_A = self.text_encoder_hidden_states_norm(_UpperCAmelCase )
_A = torch.cat([clip_extra_context_tokens, text_encoder_hidden_states] , dim=1 )
return text_encoder_hidden_states, additive_clip_time_embeddings
| 505
|
"""simple docstring"""
from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels
from .squad import SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor
from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels
| 505
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_lowerCAmelCase = {
'configuration_squeezebert': [
'SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'SqueezeBertConfig',
'SqueezeBertOnnxConfig',
],
'tokenization_squeezebert': ['SqueezeBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = ['SqueezeBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = [
'SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'SqueezeBertForMaskedLM',
'SqueezeBertForMultipleChoice',
'SqueezeBertForQuestionAnswering',
'SqueezeBertForSequenceClassification',
'SqueezeBertForTokenClassification',
'SqueezeBertModel',
'SqueezeBertModule',
'SqueezeBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_squeezebert import (
SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
SqueezeBertConfig,
SqueezeBertOnnxConfig,
)
from .tokenization_squeezebert import SqueezeBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_squeezebert import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
SqueezeBertModule,
SqueezeBertPreTrainedModel,
)
else:
import sys
_lowerCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 264
|
"""simple docstring"""
def UpperCamelCase ( _A ) -> int:
lowercase : Dict = 0
while num > 0:
digit_sum += num % 10
num //= 10
return digit_sum
def UpperCamelCase ( _A = 100 ) -> int:
lowercase : Union[str, Any] = 1
lowercase : Tuple = 2
for i in range(2 , max_n + 1 ):
lowercase : Any = pre_numerator
lowercase : Dict = 2 * i // 3 if i % 3 == 0 else 1
lowercase : Optional[Any] = cur_numerator
lowercase : str = e_cont * pre_numerator + temp
return sum_digits(_A )
if __name__ == "__main__":
print(F'{solution() = }')
| 264
| 1
|
import json
import os
import unittest
from transformers.models.roc_bert.tokenization_roc_bert import (
VOCAB_FILES_NAMES,
RoCBertBasicTokenizer,
RoCBertTokenizer,
RoCBertWordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class lowerCamelCase ( _UpperCamelCase , unittest.TestCase ):
__lowerCamelCase = RoCBertTokenizer
__lowerCamelCase = None
__lowerCamelCase = False
__lowerCamelCase = True
__lowerCamelCase = filter_non_english
def lowerCAmelCase_ ( self ) -> Dict:
'''simple docstring'''
super().setUp()
snake_case: Any = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "你", "好", "是", "谁", "a", "b", "c", "d"]
snake_case: List[str] = {}
snake_case: Tuple = {}
for i, value in enumerate(__a ):
snake_case: List[str] = i
snake_case: Optional[Any] = i
snake_case: Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
snake_case: Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""word_shape_file"""] )
snake_case: Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""word_pronunciation_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
with open(self.word_shape_file , """w""" , encoding="""utf-8""" ) as word_shape_writer:
json.dump(__a , __a , ensure_ascii=__a )
with open(self.word_pronunciation_file , """w""" , encoding="""utf-8""" ) as word_pronunciation_writer:
json.dump(__a , __a , ensure_ascii=__a )
def lowerCAmelCase_ ( self ) -> Optional[int]:
'''simple docstring'''
snake_case: Tuple = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
snake_case: int = tokenizer.tokenize("""你好[SEP]你是谁""" )
self.assertListEqual(__a , ["""你""", """好""", """[SEP]""", """你""", """是""", """谁"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__a ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_shape_ids(__a ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_pronunciation_ids(__a ) , [5, 6, 2, 5, 7, 8] )
def lowerCAmelCase_ ( self ) -> Dict:
'''simple docstring'''
snake_case: Dict = RoCBertBasicTokenizer()
self.assertListEqual(tokenizer.tokenize("""ah\u535A\u63A8zz""" ) , ["""ah""", """\u535A""", """\u63A8""", """zz"""] )
def lowerCAmelCase_ ( self ) -> Any:
'''simple docstring'''
snake_case: List[Any] = RoCBertBasicTokenizer(do_lower_case=__a )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) , ["""hello""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def lowerCAmelCase_ ( self ) -> Tuple:
'''simple docstring'''
snake_case: Optional[Any] = RoCBertBasicTokenizer(do_lower_case=__a , strip_accents=__a )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hällo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""h\u00E9llo"""] )
def lowerCAmelCase_ ( self ) -> str:
'''simple docstring'''
snake_case: Dict = RoCBertBasicTokenizer(do_lower_case=__a , strip_accents=__a )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hallo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def lowerCAmelCase_ ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case: List[str] = RoCBertBasicTokenizer(do_lower_case=__a )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hallo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def lowerCAmelCase_ ( self ) -> Optional[int]:
'''simple docstring'''
snake_case: Tuple = RoCBertBasicTokenizer(do_lower_case=__a )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def lowerCAmelCase_ ( self ) -> List[str]:
'''simple docstring'''
snake_case: Union[str, Any] = RoCBertBasicTokenizer(do_lower_case=__a , strip_accents=__a )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""HäLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def lowerCAmelCase_ ( self ) -> List[Any]:
'''simple docstring'''
snake_case: Tuple = RoCBertBasicTokenizer(do_lower_case=__a , strip_accents=__a )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""HaLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def lowerCAmelCase_ ( self ) -> int:
'''simple docstring'''
snake_case: int = RoCBertBasicTokenizer(do_lower_case=__a , never_split=["""[UNK]"""] )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? [UNK]""" ) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?""", """[UNK]"""] )
def lowerCAmelCase_ ( self ) -> Optional[Any]:
'''simple docstring'''
snake_case: Optional[int] = ["[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn", "##ing"]
snake_case: Any = {}
for i, token in enumerate(__a ):
snake_case: str = i
snake_case: Optional[int] = RoCBertWordpieceTokenizer(vocab=__a , unk_token="""[UNK]""" )
self.assertListEqual(tokenizer.tokenize("""""" ) , [] )
self.assertListEqual(tokenizer.tokenize("""unwanted running""" ) , ["""un""", """##want""", """##ed""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.tokenize("""unwantedX running""" ) , ["""[UNK]""", """runn""", """##ing"""] )
def lowerCAmelCase_ ( self ) -> Optional[Any]:
'''simple docstring'''
self.assertTrue(_is_whitespace(""" """ ) )
self.assertTrue(_is_whitespace("""\t""" ) )
self.assertTrue(_is_whitespace("""\r""" ) )
self.assertTrue(_is_whitespace("""\n""" ) )
self.assertTrue(_is_whitespace("""\u00A0""" ) )
self.assertFalse(_is_whitespace("""A""" ) )
self.assertFalse(_is_whitespace("""-""" ) )
def lowerCAmelCase_ ( self ) -> Dict:
'''simple docstring'''
self.assertTrue(_is_control("""\u0005""" ) )
self.assertFalse(_is_control("""A""" ) )
self.assertFalse(_is_control(""" """ ) )
self.assertFalse(_is_control("""\t""" ) )
self.assertFalse(_is_control("""\r""" ) )
def lowerCAmelCase_ ( self ) -> Optional[int]:
'''simple docstring'''
self.assertTrue(_is_punctuation("""-""" ) )
self.assertTrue(_is_punctuation("""$""" ) )
self.assertTrue(_is_punctuation("""`""" ) )
self.assertTrue(_is_punctuation(""".""" ) )
self.assertFalse(_is_punctuation("""A""" ) )
self.assertFalse(_is_punctuation(""" """ ) )
def lowerCAmelCase_ ( self ) -> List[Any]:
'''simple docstring'''
snake_case: Optional[Any] = self.get_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(__a ) for t in ["""Test""", """\xad""", """test"""]] , [["""[UNK]"""], [], ["""[UNK]"""]] )
if self.test_rust_tokenizer:
snake_case: Tuple = self.get_rust_tokenizer()
self.assertListEqual(
[rust_tokenizer.tokenize(__a ) for t in ["""Test""", """\xad""", """test"""]] , [["""[UNK]"""], [], ["""[UNK]"""]] )
def lowerCAmelCase_ ( self ) -> Optional[Any]:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
snake_case: int = self.rust_tokenizer_class.from_pretrained(__a , **__a )
snake_case: Union[str, Any] = F"A, naïve {tokenizer_r.mask_token} AllenNLP sentence."
snake_case: Optional[Any] = tokenizer_r.encode_plus(
__a , return_attention_mask=__a , return_token_type_ids=__a , return_offsets_mapping=__a , add_special_tokens=__a , )
snake_case: List[Any] = tokenizer_r.do_lower_case if hasattr(__a , """do_lower_case""" ) else False
snake_case: Dict = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), "A"),
((1, 2), ","),
((3, 5), "na"),
((5, 6), "##ï"),
((6, 8), "##ve"),
((9, 15), tokenizer_r.mask_token),
((16, 21), "Allen"),
((21, 23), "##NL"),
((23, 24), "##P"),
((25, 33), "sentence"),
((33, 34), "."),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), "a"),
((1, 2), ","),
((3, 8), "naive"),
((9, 15), tokenizer_r.mask_token),
((16, 21), "allen"),
((21, 23), "##nl"),
((23, 24), "##p"),
((25, 33), "sentence"),
((33, 34), "."),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens["""input_ids"""] ) )
self.assertEqual([e[0] for e in expected_results] , tokens["""offset_mapping"""] )
def lowerCAmelCase_ ( self ) -> List[Any]:
'''simple docstring'''
snake_case: Optional[Any] = ["的", "人", "有"]
snake_case: int = "".join(__a )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
snake_case: int = True
snake_case: Any = self.tokenizer_class.from_pretrained(__a , **__a )
snake_case: Optional[Any] = self.rust_tokenizer_class.from_pretrained(__a , **__a )
snake_case: int = tokenizer_p.encode(__a , add_special_tokens=__a )
snake_case: int = tokenizer_r.encode(__a , add_special_tokens=__a )
snake_case: List[Any] = tokenizer_r.convert_ids_to_tokens(__a )
snake_case: Union[str, Any] = tokenizer_p.convert_ids_to_tokens(__a )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(__a , __a )
self.assertListEqual(__a , __a )
snake_case: Any = False
snake_case: Dict = self.rust_tokenizer_class.from_pretrained(__a , **__a )
snake_case: Any = self.tokenizer_class.from_pretrained(__a , **__a )
snake_case: Any = tokenizer_r.encode(__a , add_special_tokens=__a )
snake_case: Any = tokenizer_p.encode(__a , add_special_tokens=__a )
snake_case: Union[str, Any] = tokenizer_r.convert_ids_to_tokens(__a )
snake_case: Dict = tokenizer_p.convert_ids_to_tokens(__a )
# it is expected that only the first Chinese character is not preceded by "##".
snake_case: Any = [
F"##{token}" if idx != 0 else token for idx, token in enumerate(__a )
]
self.assertListEqual(__a , __a )
self.assertListEqual(__a , __a )
@slow
def lowerCAmelCase_ ( self ) -> Any:
'''simple docstring'''
snake_case: Dict = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
snake_case: Optional[int] = tokenizer.encode("""你好""" , add_special_tokens=__a )
snake_case: Dict = tokenizer.encode("""你是谁""" , add_special_tokens=__a )
snake_case: Union[str, Any] = tokenizer.build_inputs_with_special_tokens(__a )
snake_case: Tuple = tokenizer.build_inputs_with_special_tokens(__a , __a )
assert encoded_sentence == [1] + text + [2]
assert encoded_pair == [1] + text + [2] + text_a + [2]
def lowerCAmelCase_ ( self ) -> int:
'''simple docstring'''
snake_case: Optional[Any] = self.get_tokenizers(do_lower_case=__a )
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
snake_case: int = "你好,你是谁"
snake_case: Any = tokenizer.tokenize(__a )
snake_case: Optional[Any] = tokenizer.convert_tokens_to_ids(__a )
snake_case: List[str] = tokenizer.convert_tokens_to_shape_ids(__a )
snake_case: Any = tokenizer.convert_tokens_to_pronunciation_ids(__a )
snake_case: Optional[int] = tokenizer.prepare_for_model(
__a , __a , __a , add_special_tokens=__a )
snake_case: Tuple = tokenizer.encode_plus(__a , add_special_tokens=__a )
self.assertEqual(__a , __a )
| 707
|
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
__lowerCAmelCase : List[str] = logging.get_logger(__name__)
__lowerCAmelCase : Union[str, Any] = {
'Salesforce/instruct-blip-flan-t5': 'https://huggingface.co/Salesforce/instruct-blip-flan-t5/resolve/main/config.json',
}
class lowerCamelCase ( __snake_case ):
__lowerCamelCase = 'instructblip_vision_model'
def __init__( self , __lowerCamelCase=14_08 , __lowerCamelCase=61_44 , __lowerCamelCase=39 , __lowerCamelCase=16 , __lowerCamelCase=2_24 , __lowerCamelCase=14 , __lowerCamelCase="gelu" , __lowerCamelCase=1e-6 , __lowerCamelCase=0.0 , __lowerCamelCase=1e-10 , __lowerCamelCase=True , **__lowerCamelCase , ) -> Dict:
'''simple docstring'''
super().__init__(**__lowerCamelCase )
snake_case: Union[str, Any] = hidden_size
snake_case: List[str] = intermediate_size
snake_case: List[Any] = num_hidden_layers
snake_case: Optional[Any] = num_attention_heads
snake_case: str = patch_size
snake_case: Optional[int] = image_size
snake_case: str = initializer_range
snake_case: Tuple = attention_dropout
snake_case: List[str] = layer_norm_eps
snake_case: Dict = hidden_act
snake_case: Any = qkv_bias
@classmethod
def lowerCAmelCase_ ( cls , __lowerCamelCase , **__lowerCamelCase ) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(__lowerCamelCase )
snake_case , snake_case: Any = cls.get_config_dict(__lowerCamelCase , **__lowerCamelCase )
# get the vision config dict if we are loading from InstructBlipConfig
if config_dict.get("""model_type""" ) == "instructblip":
snake_case: Dict = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
F"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(__lowerCamelCase , **__lowerCamelCase )
class lowerCamelCase ( __snake_case ):
__lowerCamelCase = 'instructblip_qformer'
def __init__( self , __lowerCamelCase=3_05_22 , __lowerCamelCase=7_68 , __lowerCamelCase=12 , __lowerCamelCase=12 , __lowerCamelCase=30_72 , __lowerCamelCase="gelu" , __lowerCamelCase=0.1 , __lowerCamelCase=0.1 , __lowerCamelCase=5_12 , __lowerCamelCase=0.02 , __lowerCamelCase=1e-12 , __lowerCamelCase=0 , __lowerCamelCase="absolute" , __lowerCamelCase=2 , __lowerCamelCase=14_08 , **__lowerCamelCase , ) -> str:
'''simple docstring'''
super().__init__(pad_token_id=__lowerCamelCase , **__lowerCamelCase )
snake_case: Optional[Any] = vocab_size
snake_case: Union[str, Any] = hidden_size
snake_case: str = num_hidden_layers
snake_case: Union[str, Any] = num_attention_heads
snake_case: List[str] = hidden_act
snake_case: Dict = intermediate_size
snake_case: List[Any] = hidden_dropout_prob
snake_case: Tuple = attention_probs_dropout_prob
snake_case: Tuple = max_position_embeddings
snake_case: Any = initializer_range
snake_case: List[str] = layer_norm_eps
snake_case: Dict = position_embedding_type
snake_case: Optional[Any] = cross_attention_frequency
snake_case: Optional[Any] = encoder_hidden_size
@classmethod
def lowerCAmelCase_ ( cls , __lowerCamelCase , **__lowerCamelCase ) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(__lowerCamelCase )
snake_case , snake_case: Union[str, Any] = cls.get_config_dict(__lowerCamelCase , **__lowerCamelCase )
# get the qformer config dict if we are loading from InstructBlipConfig
if config_dict.get("""model_type""" ) == "instructblip":
snake_case: List[Any] = config_dict["""qformer_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
F"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(__lowerCamelCase , **__lowerCamelCase )
class lowerCamelCase ( __snake_case ):
__lowerCamelCase = 'instructblip'
__lowerCamelCase = True
def __init__( self , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=32 , **__lowerCamelCase ) -> List[Any]:
'''simple docstring'''
super().__init__(**__lowerCamelCase )
if vision_config is None:
snake_case: Any = {}
logger.info("""vision_config is None. initializing the InstructBlipVisionConfig with default values.""" )
if qformer_config is None:
snake_case: List[Any] = {}
logger.info("""qformer_config is None. Initializing the InstructBlipQFormerConfig with default values.""" )
if text_config is None:
snake_case: List[Any] = {}
logger.info("""text_config is None. Initializing the text config with default values (`OPTConfig`).""" )
snake_case: int = InstructBlipVisionConfig(**__lowerCamelCase )
snake_case: int = InstructBlipQFormerConfig(**__lowerCamelCase )
snake_case: int = text_config["""model_type"""] if """model_type""" in text_config else """opt"""
snake_case: Optional[int] = CONFIG_MAPPING[text_model_type](**__lowerCamelCase )
snake_case: Optional[int] = self.text_config.tie_word_embeddings
snake_case: Any = self.text_config.is_encoder_decoder
snake_case: List[str] = num_query_tokens
snake_case: Any = self.vision_config.hidden_size
snake_case: Any = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
snake_case: Any = 1.0
snake_case: List[str] = 0.02
@classmethod
def lowerCAmelCase_ ( cls , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase , ) -> List[Any]:
'''simple docstring'''
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **__lowerCamelCase , )
def lowerCAmelCase_ ( self ) -> Any:
'''simple docstring'''
snake_case: str = copy.deepcopy(self.__dict__ )
snake_case: Any = self.vision_config.to_dict()
snake_case: Dict = self.qformer_config.to_dict()
snake_case: List[str] = self.text_config.to_dict()
snake_case: List[Any] = self.__class__.model_type
return output
| 164
| 0
|
import inspect
import unittest
import warnings
from transformers import DeiTConfig
from transformers.models.auto import get_values
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
)
from transformers.models.deit.modeling_deit import DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class lowerCAmelCase_ :
def __init__( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=13, SCREAMING_SNAKE_CASE_=30, SCREAMING_SNAKE_CASE_=2, SCREAMING_SNAKE_CASE_=3, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=32, SCREAMING_SNAKE_CASE_=5, SCREAMING_SNAKE_CASE_=4, SCREAMING_SNAKE_CASE_=37, SCREAMING_SNAKE_CASE_="gelu", SCREAMING_SNAKE_CASE_=0.1, SCREAMING_SNAKE_CASE_=0.1, SCREAMING_SNAKE_CASE_=10, SCREAMING_SNAKE_CASE_=0.02, SCREAMING_SNAKE_CASE_=3, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=2, ) -> Any:
UpperCamelCase : Dict = parent
UpperCamelCase : Optional[int] = batch_size
UpperCamelCase : Tuple = image_size
UpperCamelCase : List[Any] = patch_size
UpperCamelCase : Union[str, Any] = num_channels
UpperCamelCase : Union[str, Any] = is_training
UpperCamelCase : Any = use_labels
UpperCamelCase : Union[str, Any] = hidden_size
UpperCamelCase : List[Any] = num_hidden_layers
UpperCamelCase : Union[str, Any] = num_attention_heads
UpperCamelCase : Optional[Any] = intermediate_size
UpperCamelCase : str = hidden_act
UpperCamelCase : Tuple = hidden_dropout_prob
UpperCamelCase : int = attention_probs_dropout_prob
UpperCamelCase : Optional[Any] = type_sequence_label_size
UpperCamelCase : Optional[int] = initializer_range
UpperCamelCase : Optional[Any] = scope
UpperCamelCase : List[Any] = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
UpperCamelCase : Union[str, Any] = (image_size // patch_size) ** 2
UpperCamelCase : str = num_patches + 2
def snake_case_ ( self ) -> Tuple:
UpperCamelCase : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase : str = None
if self.use_labels:
UpperCamelCase : Optional[int] = ids_tensor([self.batch_size], self.type_sequence_label_size )
UpperCamelCase : Union[str, Any] = self.get_config()
return config, pixel_values, labels
def snake_case_ ( self ) -> Optional[int]:
return DeiTConfig(
image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, is_decoder=SCREAMING_SNAKE_CASE_, initializer_range=self.initializer_range, encoder_stride=self.encoder_stride, )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
UpperCamelCase : int = DeiTModel(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCamelCase : Any = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Tuple:
UpperCamelCase : Optional[int] = DeiTForMaskedImageModeling(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCamelCase : List[str] = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(
result.reconstruction.shape, (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
UpperCamelCase : Union[str, Any] = 1
UpperCamelCase : Tuple = DeiTForMaskedImageModeling(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCamelCase : Union[str, Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCamelCase : Union[str, Any] = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.reconstruction.shape, (self.batch_size, 1, self.image_size, self.image_size) )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> List[Any]:
UpperCamelCase : int = self.type_sequence_label_size
UpperCamelCase : Dict = DeiTForImageClassification(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCamelCase : Dict = model(SCREAMING_SNAKE_CASE_, labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
UpperCamelCase : str = 1
UpperCamelCase : List[Any] = DeiTForImageClassification(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCamelCase : List[str] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCamelCase : List[str] = model(SCREAMING_SNAKE_CASE_, labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size) )
def snake_case_ ( self ) -> Dict:
UpperCamelCase : str = self.prepare_config_and_inputs()
(
(
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) ,
) : Dict = config_and_inputs
UpperCamelCase : Any = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class lowerCAmelCase_ ( a__ , a__ , unittest.TestCase ):
UpperCAmelCase__ : Optional[Any] = (
(
DeiTModel,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
UpperCAmelCase__ : Optional[int] = (
{
"feature-extraction": DeiTModel,
"image-classification": (DeiTForImageClassification, DeiTForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
UpperCAmelCase__ : Dict = False
UpperCAmelCase__ : Union[str, Any] = False
UpperCAmelCase__ : Optional[int] = False
def snake_case_ ( self ) -> Optional[int]:
UpperCamelCase : str = DeiTModelTester(self )
UpperCamelCase : str = ConfigTester(self, config_class=SCREAMING_SNAKE_CASE_, has_text_modality=SCREAMING_SNAKE_CASE_, hidden_size=37 )
def snake_case_ ( self ) -> Tuple:
self.config_tester.run_common_tests()
@unittest.skip(reason='DeiT does not use inputs_embeds' )
def snake_case_ ( self ) -> str:
pass
def snake_case_ ( self ) -> List[Any]:
UpperCamelCase , UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase : List[Any] = model_class(SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(model.get_input_embeddings(), (nn.Module) )
UpperCamelCase : Any = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(SCREAMING_SNAKE_CASE_, nn.Linear ) )
def snake_case_ ( self ) -> Optional[int]:
UpperCamelCase , UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase : Union[str, Any] = model_class(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[int] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase : List[str] = [*signature.parameters.keys()]
UpperCamelCase : Any = ['pixel_values']
self.assertListEqual(arg_names[:1], SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ) -> str:
UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ) -> Dict:
UpperCamelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ) -> Dict:
UpperCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=False ) -> Optional[Any]:
UpperCamelCase : str = super()._prepare_for_class(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, return_labels=SCREAMING_SNAKE_CASE_ )
if return_labels:
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def snake_case_ ( self ) -> str:
if not self.model_tester.is_training:
return
UpperCamelCase , UpperCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase : int = True
for model_class in self.all_model_classes:
# DeiTForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(SCREAMING_SNAKE_CASE_ )
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
UpperCamelCase : List[Any] = model_class(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.train()
UpperCamelCase : Dict = self._prepare_for_class(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, return_labels=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : str = model(**SCREAMING_SNAKE_CASE_ ).loss
loss.backward()
def snake_case_ ( self ) -> Dict:
UpperCamelCase , UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
UpperCamelCase : Tuple = False
UpperCamelCase : int = True
for model_class in self.all_model_classes:
if model_class in get_values(SCREAMING_SNAKE_CASE_ ) or not model_class.supports_gradient_checkpointing:
continue
# DeiTForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
continue
UpperCamelCase : str = model_class(SCREAMING_SNAKE_CASE_ )
model.gradient_checkpointing_enable()
model.to(SCREAMING_SNAKE_CASE_ )
model.train()
UpperCamelCase : Any = self._prepare_for_class(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, return_labels=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Union[str, Any] = model(**SCREAMING_SNAKE_CASE_ ).loss
loss.backward()
def snake_case_ ( self ) -> Optional[int]:
UpperCamelCase , UpperCamelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase : List[Any] = [
{'title': 'multi_label_classification', 'num_labels': 2, 'dtype': torch.float},
{'title': 'single_label_classification', 'num_labels': 1, 'dtype': torch.long},
{'title': 'regression', 'num_labels': 1, 'dtype': torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(SCREAMING_SNAKE_CASE_ ),
*get_values(SCREAMING_SNAKE_CASE_ ),
]
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=F"""Testing {model_class} with {problem_type["title"]}""" ):
UpperCamelCase : Dict = problem_type['title']
UpperCamelCase : Optional[int] = problem_type['num_labels']
UpperCamelCase : Dict = model_class(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.train()
UpperCamelCase : Optional[Any] = self._prepare_for_class(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, return_labels=SCREAMING_SNAKE_CASE_ )
if problem_type["num_labels"] > 1:
UpperCamelCase : int = inputs['labels'].unsqueeze(1 ).repeat(1, problem_type['num_labels'] )
UpperCamelCase : Tuple = inputs['labels'].to(problem_type['dtype'] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=SCREAMING_SNAKE_CASE_ ) as warning_list:
UpperCamelCase : List[Any] = model(**SCREAMING_SNAKE_CASE_ ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
F"""Something is going wrong in the regression problem: intercepted {w.message}""" )
loss.backward()
@slow
def snake_case_ ( self ) -> Union[str, Any]:
for model_name in DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase : Tuple = DeiTModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
def UpperCamelCase ( ) -> List[str]:
UpperCamelCase : int = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class lowerCAmelCase_ ( unittest.TestCase ):
@cached_property
def snake_case_ ( self ) -> List[str]:
return (
DeiTImageProcessor.from_pretrained('facebook/deit-base-distilled-patch16-224' )
if is_vision_available()
else None
)
@slow
def snake_case_ ( self ) -> Union[str, Any]:
UpperCamelCase : Tuple = DeiTForImageClassificationWithTeacher.from_pretrained('facebook/deit-base-distilled-patch16-224' ).to(
SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Union[str, Any] = self.default_image_processor
UpperCamelCase : List[Any] = prepare_img()
UpperCamelCase : Tuple = image_processor(images=SCREAMING_SNAKE_CASE_, return_tensors='pt' ).to(SCREAMING_SNAKE_CASE_ )
# forward pass
with torch.no_grad():
UpperCamelCase : int = model(**SCREAMING_SNAKE_CASE_ )
# verify the logits
UpperCamelCase : Dict = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape, SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = torch.tensor([-1.02_66, 0.19_12, -1.28_61] ).to(SCREAMING_SNAKE_CASE_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3], SCREAMING_SNAKE_CASE_, atol=1e-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def snake_case_ ( self ) -> str:
UpperCamelCase : str = DeiTModel.from_pretrained(
'facebook/deit-base-distilled-patch16-224', torch_dtype=torch.floataa, device_map='auto' )
UpperCamelCase : List[Any] = self.default_image_processor
UpperCamelCase : Optional[int] = prepare_img()
UpperCamelCase : Any = image_processor(images=SCREAMING_SNAKE_CASE_, return_tensors='pt' )
UpperCamelCase : Optional[int] = inputs.pixel_values.to(SCREAMING_SNAKE_CASE_ )
# forward pass to make sure inference works in fp16
with torch.no_grad():
UpperCamelCase : List[str] = model(SCREAMING_SNAKE_CASE_ )
| 40
|
import math
import random
def UpperCamelCase ( snake_case__ : float , snake_case__ : bool = False ) -> float:
if deriv:
return value * (1 - value)
return 1 / (1 + math.exp(-value ))
# Initial Value
__UpperCAmelCase = 0.02
def UpperCamelCase ( snake_case__ : int , snake_case__ : int ) -> float:
UpperCamelCase : Optional[Any] = float(2 * (random.randint(1 , 100 )) - 1 )
for _ in range(snake_case__ ):
# Forward propagation
UpperCamelCase : str = sigmoid_function(INITIAL_VALUE * weight )
# How much did we miss?
UpperCamelCase : int = (expected / 100) - layer_a
# Error delta
UpperCamelCase : List[str] = layer_1_error * sigmoid_function(snake_case__ , snake_case__ )
# Update weight
weight += INITIAL_VALUE * layer_1_delta
return layer_a * 100
if __name__ == "__main__":
import doctest
doctest.testmod()
__UpperCAmelCase = int(input('''Expected value: '''))
__UpperCAmelCase = int(input('''Number of propagations: '''))
print(forward_propagation(expected, number_propagations))
| 40
| 1
|
'''simple docstring'''
import math
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import SchedulerMixin, SchedulerOutput
class __UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase ):
__lowercase = 1
@register_to_config
def __init__( self , lowerCAmelCase_ = 10_00 , lowerCAmelCase_ = None ):
"""simple docstring"""
self.set_timesteps(lowerCAmelCase_ )
# standard deviation of the initial noise distribution
_snake_case = 1.0
# For now we only support F-PNDM, i.e. the runge-kutta method
# For more information on the algorithm please take a look at the paper: https://arxiv.org/pdf/2202.09778.pdf
# mainly at formula (9), (12), (13) and the Algorithm 2.
_snake_case = 4
# running values
_snake_case = []
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None ):
"""simple docstring"""
_snake_case = num_inference_steps
_snake_case = torch.linspace(1 , 0 , num_inference_steps + 1 )[:-1]
_snake_case = torch.cat([steps, torch.tensor([0.0] )] )
if self.config.trained_betas is not None:
_snake_case = torch.tensor(self.config.trained_betas , dtype=torch.floataa )
else:
_snake_case = torch.sin(steps * math.pi / 2 ) ** 2
_snake_case = (1.0 - self.betas**2) ** 0.5
_snake_case = (torch.atana(self.betas , self.alphas ) / math.pi * 2)[:-1]
_snake_case = timesteps.to(lowerCAmelCase_ )
_snake_case = []
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = True , ):
"""simple docstring"""
if self.num_inference_steps is None:
raise ValueError(
'Number of inference steps is \'None\', you need to run \'set_timesteps\' after creating the scheduler' )
_snake_case = (self.timesteps == timestep).nonzero().item()
_snake_case = timestep_index + 1
_snake_case = sample * self.betas[timestep_index] + model_output * self.alphas[timestep_index]
self.ets.append(lowerCAmelCase_ )
if len(self.ets ) == 1:
_snake_case = self.ets[-1]
elif len(self.ets ) == 2:
_snake_case = (3 * self.ets[-1] - self.ets[-2]) / 2
elif len(self.ets ) == 3:
_snake_case = (23 * self.ets[-1] - 16 * self.ets[-2] + 5 * self.ets[-3]) / 12
else:
_snake_case = (1 / 24) * (55 * self.ets[-1] - 59 * self.ets[-2] + 37 * self.ets[-3] - 9 * self.ets[-4])
_snake_case = self._get_prev_sample(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=lowerCAmelCase_ )
def lowerCamelCase ( self , lowerCAmelCase_ , *lowerCAmelCase_ , **lowerCAmelCase_ ):
"""simple docstring"""
return sample
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = self.alphas[timestep_index]
_snake_case = self.betas[timestep_index]
_snake_case = self.alphas[prev_timestep_index]
_snake_case = self.betas[prev_timestep_index]
_snake_case = (sample - sigma * ets) / max(lowerCAmelCase_ , 1E-8 )
_snake_case = next_alpha * pred + ets * next_sigma
return prev_sample
def __len__( self ):
"""simple docstring"""
return self.config.num_train_timesteps
| 542
|
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class __UpperCAmelCase :
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_=13 , lowerCAmelCase_=7 , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=99 , lowerCAmelCase_=32 , lowerCAmelCase_=5 , lowerCAmelCase_=4 , lowerCAmelCase_=37 , lowerCAmelCase_="gelu" , lowerCAmelCase_=0.1 , lowerCAmelCase_=0.1 , lowerCAmelCase_=5_12 , lowerCAmelCase_=16 , lowerCAmelCase_=2 , lowerCAmelCase_=0.02 , lowerCAmelCase_=3 , lowerCAmelCase_=4 , lowerCAmelCase_=None , ):
"""simple docstring"""
_snake_case = parent
_snake_case = batch_size
_snake_case = seq_length
_snake_case = is_training
_snake_case = use_token_type_ids
_snake_case = use_labels
_snake_case = vocab_size
_snake_case = hidden_size
_snake_case = num_hidden_layers
_snake_case = num_attention_heads
_snake_case = intermediate_size
_snake_case = hidden_act
_snake_case = hidden_dropout_prob
_snake_case = attention_probs_dropout_prob
_snake_case = max_position_embeddings
_snake_case = type_vocab_size
_snake_case = type_sequence_label_size
_snake_case = initializer_range
_snake_case = num_labels
_snake_case = num_choices
_snake_case = scope
_snake_case = self.vocab_size - 1
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_snake_case = None
if self.use_token_type_ids:
_snake_case = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_snake_case = None
_snake_case = None
_snake_case = None
if self.use_labels:
_snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_snake_case = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_snake_case = ids_tensor([self.batch_size] , self.num_choices )
_snake_case = OpenAIGPTConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
_snake_case = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , *lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = OpenAIGPTModel(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_snake_case = model(lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , head_mask=lowerCAmelCase_ )
_snake_case = model(lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ )
_snake_case = model(lowerCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , *lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = OpenAIGPTLMHeadModel(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_snake_case = model(lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , *lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = OpenAIGPTDoubleHeadsModel(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_snake_case = model(lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , *lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = self.num_labels
_snake_case = OpenAIGPTForSequenceClassification(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_snake_case = model(lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = self.prepare_config_and_inputs()
(
(
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) ,
) = config_and_inputs
_snake_case = {
'input_ids': input_ids,
'token_type_ids': token_type_ids,
'head_mask': head_mask,
}
return config, inputs_dict
@require_torch
class __UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , unittest.TestCase ):
__lowercase = (
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
__lowercase = (
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
__lowercase = (
{
"""feature-extraction""": OpenAIGPTModel,
"""text-classification""": OpenAIGPTForSequenceClassification,
"""text-generation""": OpenAIGPTLMHeadModel,
"""zero-shot""": OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=False ):
"""simple docstring"""
_snake_case = super()._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ , return_labels=lowerCAmelCase_ )
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
_snake_case = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=lowerCAmelCase_ , )
_snake_case = inputs_dict['labels']
_snake_case = inputs_dict['labels']
_snake_case = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=lowerCAmelCase_ , )
_snake_case = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase_ )
return inputs_dict
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = OpenAIGPTModelTester(self )
_snake_case = ConfigTester(self , config_class=lowerCAmelCase_ , n_embd=37 )
def lowerCamelCase ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*lowerCAmelCase_ )
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*lowerCAmelCase_ )
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*lowerCAmelCase_ )
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*lowerCAmelCase_ )
@slow
def lowerCamelCase ( self ):
"""simple docstring"""
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case = OpenAIGPTModel.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
@require_torch
class __UpperCAmelCase ( unittest.TestCase ):
@slow
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = OpenAIGPTLMHeadModel.from_pretrained('openai-gpt' )
model.to(lowerCAmelCase_ )
_snake_case = torch.tensor([[4_81, 47_35, 5_44]] , dtype=torch.long , device=lowerCAmelCase_ ) # the president is
_snake_case = [
4_81,
47_35,
5_44,
2_46,
9_63,
8_70,
7_62,
2_39,
2_44,
4_04_77,
2_44,
2_49,
7_19,
8_81,
4_87,
5_44,
2_40,
2_44,
6_03,
4_81,
] # the president is a very good man. " \n " i\'m sure he is, " said the
_snake_case = model.generate(lowerCAmelCase_ , do_sample=lowerCAmelCase_ )
self.assertListEqual(output_ids[0].tolist() , lowerCAmelCase_ )
| 542
| 1
|
import math
from collections.abc import Callable
def __lowercase ( snake_case, snake_case, snake_case ):
"""simple docstring"""
__magic_name__ :float = xa
__magic_name__ :float = xa
while True:
if x_n == x_na or function(snake_case__ ) == function(snake_case__ ):
raise ZeroDivisionError('''float division by zero, could not find root''' )
__magic_name__ :float = x_na - (
function(snake_case__ ) / ((function(snake_case__ ) - function(snake_case__ )) / (x_na - x_n))
)
if abs(x_na - x_na ) < 1_0**-5:
return x_na
__magic_name__ :Any = x_na
__magic_name__ :Any = x_na
def __lowercase ( snake_case ):
"""simple docstring"""
return math.pow(snake_case__, 3 ) - (2 * x) - 5
if __name__ == "__main__":
print(intersection(f, 3, 3.5))
| 0
|
'''simple docstring'''
import fire
from utils import calculate_rouge, save_json
def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__=None , **snake_case__ ):
'''simple docstring'''
A : Optional[Any] = [x.strip() for x in open(snake_case__ ).readlines()]
A : Tuple = [x.strip() for x in open(snake_case__ ).readlines()][: len(snake_case__ )]
A : Union[str, Any] = calculate_rouge(snake_case__ , snake_case__ , **snake_case__ )
if save_path is not None:
save_json(snake_case__ , snake_case__ , indent=snake_case__ )
return metrics # these print nicely
if __name__ == "__main__":
fire.Fire(calculate_rouge_path)
| 634
| 0
|
import argparse
import logging
import os
from datetime import datetime
import numpy as np
import torch
from torch import nn
from torch.utils.data import DataLoader, RandomSampler, TensorDataset
from tqdm import tqdm
from transformers import GPTaLMHeadModel
UpperCamelCase_ = logging.getLogger(__name__)
def _UpperCAmelCase ( A , A ):
'''simple docstring'''
if os.path.exists(lowerCamelCase__ ):
if os.path.exists(os.path.join(lowerCamelCase__ , "config.json" ) ) and os.path.isfile(
os.path.join(lowerCamelCase__ , "config.json" ) ):
os.remove(os.path.join(lowerCamelCase__ , "config.json" ) )
if os.path.exists(os.path.join(lowerCamelCase__ , "pytorch_model.bin" ) ) and os.path.isfile(
os.path.join(lowerCamelCase__ , "pytorch_model.bin" ) ):
os.remove(os.path.join(lowerCamelCase__ , "pytorch_model.bin" ) )
else:
os.makedirs(lowerCamelCase__ )
model.save_pretrained(lowerCamelCase__ )
def _UpperCAmelCase ( A , A=False ):
'''simple docstring'''
UpperCAmelCase__ =2
if unlogit:
UpperCAmelCase__ =torch.pow(lowerCamelCase__ , lowerCamelCase__ )
UpperCAmelCase__ =p * torch.log(lowerCamelCase__ )
UpperCAmelCase__ =0
return -plogp.sum(dim=-1 )
def _UpperCAmelCase ( A ):
'''simple docstring'''
logger.info("lv, h >\t" + "\t".join(F"""{x + 1}""" for x in range(len(lowerCamelCase__ ) ) ) )
for row in range(len(lowerCamelCase__ ) ):
if tensor.dtype != torch.long:
logger.info(F"""layer {row + 1}:\t""" + "\t".join(F"""{x:.5f}""" for x in tensor[row].cpu().data ) )
else:
logger.info(F"""layer {row + 1}:\t""" + "\t".join(F"""{x:d}""" for x in tensor[row].cpu().data ) )
def _UpperCAmelCase ( A , A , A , A=True , A=True , A=None , A=False ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ =model.config.num_hidden_layers, model.config.num_attention_heads
UpperCAmelCase__ =torch.zeros(lowerCamelCase__ , lowerCamelCase__ ).to(args.device )
UpperCAmelCase__ =torch.zeros(lowerCamelCase__ , lowerCamelCase__ ).to(args.device )
if head_mask is None:
UpperCAmelCase__ =torch.ones(lowerCamelCase__ , lowerCamelCase__ ).to(args.device )
head_mask.requires_grad_(requires_grad=lowerCamelCase__ )
# If actually pruned attention multi-head, set head mask to None to avoid shape mismatch
if actually_pruned:
UpperCAmelCase__ =None
UpperCAmelCase__ =0.0
UpperCAmelCase__ =0.0
for step, inputs in enumerate(tqdm(lowerCamelCase__ , desc="Iteration" , disable=args.local_rank not in [-1, 0] ) ):
UpperCAmelCase__ =tuple(t.to(args.device ) for t in inputs )
((UpperCAmelCase__ ) , ) =inputs
# Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below)
UpperCAmelCase__ =model(lowerCamelCase__ , labels=lowerCamelCase__ , head_mask=lowerCamelCase__ )
# (loss), lm_logits, presents, (all hidden_states), (attentions)
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ =(
outputs[0],
outputs[1],
outputs[-1],
) # Loss and logits are the first, attention the last
loss.backward() # Backpropagate to populate the gradients in the head mask
total_loss += loss.detach().cpu().numpy()
if compute_entropy:
for layer, attn in enumerate(lowerCamelCase__ ):
UpperCAmelCase__ =entropy(attn.detach() , lowerCamelCase__ )
attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach()
if compute_importance:
head_importance += head_mask.grad.abs().detach()
tot_tokens += torch.ones_like(lowerCamelCase__ ).float().detach().sum().data
# Normalize
attn_entropy /= tot_tokens
head_importance /= tot_tokens
# Layerwise importance normalization
if not args.dont_normalize_importance_by_layer:
UpperCAmelCase__ =2
UpperCAmelCase__ =torch.pow(torch.pow(lowerCamelCase__ , lowerCamelCase__ ).sum(-1 ) , 1 / exponent )
head_importance /= norm_by_layer.unsqueeze(-1 ) + 1e-2_0
if not args.dont_normalize_global_importance:
UpperCAmelCase__ =(head_importance - head_importance.min()) / (head_importance.max() - head_importance.min())
# Print matrices
if compute_entropy:
logger.info("Attention entropies" )
print_ad_tensor(lowerCamelCase__ )
if compute_importance:
logger.info("Head importance scores" )
print_ad_tensor(lowerCamelCase__ )
logger.info("Head ranked by importance scores" )
UpperCAmelCase__ =torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device )
UpperCAmelCase__ =torch.arange(
head_importance.numel() , device=args.device )
UpperCAmelCase__ =head_ranks.view_as(lowerCamelCase__ )
print_ad_tensor(lowerCamelCase__ )
return attn_entropy, head_importance, total_loss
def _UpperCAmelCase ( A , A , A ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ =compute_heads_importance(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , compute_entropy=lowerCamelCase__ )
UpperCAmelCase__ =1 / loss # instead of downsteam score use the LM loss
logger.info("Pruning: original score: %f, threshold: %f" , lowerCamelCase__ , original_score * args.masking_threshold )
UpperCAmelCase__ =torch.ones_like(lowerCamelCase__ )
UpperCAmelCase__ =max(1 , int(new_head_mask.numel() * args.masking_amount ) )
UpperCAmelCase__ =original_score
while current_score >= original_score * args.masking_threshold:
UpperCAmelCase__ =new_head_mask.clone().detach() # save current head mask
# heads from least important to most - keep only not-masked heads
UpperCAmelCase__ =float("Inf" )
UpperCAmelCase__ =head_importance.view(-1 ).sort()[1]
if len(lowerCamelCase__ ) <= num_to_mask:
print("BREAK BY num_to_mask" )
break
# mask heads
UpperCAmelCase__ =current_heads_to_mask[:num_to_mask]
logger.info("Heads to mask: %s" , str(current_heads_to_mask.tolist() ) )
UpperCAmelCase__ =new_head_mask.view(-1 )
UpperCAmelCase__ =0.0
UpperCAmelCase__ =new_head_mask.view_as(lowerCamelCase__ )
UpperCAmelCase__ =new_head_mask.clone().detach()
print_ad_tensor(lowerCamelCase__ )
# Compute metric and head importance again
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ =compute_heads_importance(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , compute_entropy=lowerCamelCase__ , head_mask=lowerCamelCase__ )
UpperCAmelCase__ =1 / loss
logger.info(
"Masking: current score: %f, remaining heads %d (%.1f percents)" , lowerCamelCase__ , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 100 , )
logger.info("Final head mask" )
print_ad_tensor(lowerCamelCase__ )
np.save(os.path.join(args.output_dir , "head_mask.npy" ) , head_mask.detach().cpu().numpy() )
return head_mask
def _UpperCAmelCase ( A , A , A , A ):
'''simple docstring'''
UpperCAmelCase__ =datetime.now()
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ =compute_heads_importance(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , compute_entropy=lowerCamelCase__ , compute_importance=lowerCamelCase__ , head_mask=lowerCamelCase__ )
UpperCAmelCase__ =1 / loss
UpperCAmelCase__ =datetime.now() - before_time
UpperCAmelCase__ =sum(p.numel() for p in model.parameters() )
UpperCAmelCase__ ={
layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(lowerCamelCase__ ) )
}
for k, v in heads_to_prune.items():
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
UpperCAmelCase__ =[
v,
]
assert sum(len(lowerCamelCase__ ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item()
model.prune_heads(lowerCamelCase__ )
UpperCAmelCase__ =sum(p.numel() for p in model.parameters() )
UpperCAmelCase__ =datetime.now()
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ =compute_heads_importance(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , compute_entropy=lowerCamelCase__ , compute_importance=lowerCamelCase__ , head_mask=lowerCamelCase__ , actually_pruned=lowerCamelCase__ , )
UpperCAmelCase__ =1 / loss
UpperCAmelCase__ =datetime.now() - before_time
logger.info(
"Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)" , lowerCamelCase__ , lowerCamelCase__ , pruned_num_params / original_num_params * 100 , )
logger.info("Pruning: score with masking: %f score with pruning: %f" , lowerCamelCase__ , lowerCamelCase__ )
logger.info("Pruning: speed ratio (original timing / new timing): %f percents" , original_time / new_time * 100 )
save_model(lowerCamelCase__ , args.output_dir )
def _UpperCAmelCase ( ):
'''simple docstring'''
UpperCAmelCase__ =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--data_dir" , default=lowerCamelCase__ , type=lowerCamelCase__ , required=lowerCamelCase__ , help="The input data dir. Should contain the .tsv files (or other data files) for the task." , )
parser.add_argument(
"--model_name_or_path" , default=lowerCamelCase__ , type=lowerCamelCase__ , required=lowerCamelCase__ , help="Path to pretrained model or model identifier from huggingface.co/models" , )
parser.add_argument(
"--output_dir" , default=lowerCamelCase__ , type=lowerCamelCase__ , required=lowerCamelCase__ , help="The output directory where the model predictions and checkpoints will be written." , )
# Other parameters
parser.add_argument(
"--config_name" , default="" , type=lowerCamelCase__ , help="Pretrained config name or path if not the same as model_name_or_path" , )
parser.add_argument(
"--tokenizer_name" , default="" , type=lowerCamelCase__ , help="Pretrained tokenizer name or path if not the same as model_name_or_path" , )
parser.add_argument(
"--cache_dir" , default=lowerCamelCase__ , type=lowerCamelCase__ , help="Where do you want to store the pre-trained models downloaded from s3" , )
parser.add_argument(
"--data_subset" , type=lowerCamelCase__ , default=-1 , help="If > 0: limit the data to a subset of data_subset instances." )
parser.add_argument(
"--overwrite_output_dir" , action="store_true" , help="Whether to overwrite data in output directory" )
parser.add_argument(
"--overwrite_cache" , action="store_true" , help="Overwrite the cached training and evaluation sets" )
parser.add_argument(
"--dont_normalize_importance_by_layer" , action="store_true" , help="Don\'t normalize importance score by layers" )
parser.add_argument(
"--dont_normalize_global_importance" , action="store_true" , help="Don\'t normalize all importance scores between 0 and 1" , )
parser.add_argument(
"--try_masking" , action="store_true" , help="Whether to try to mask head until a threshold of accuracy." )
parser.add_argument(
"--masking_threshold" , default=0.9 , type=lowerCamelCase__ , help="masking threshold in term of metrics (stop masking when metric < threshold * original metric value)." , )
parser.add_argument(
"--masking_amount" , default=0.1 , type=lowerCamelCase__ , help="Amount to heads to masking at each masking step." )
parser.add_argument("--metric_name" , default="acc" , type=lowerCamelCase__ , help="Metric to use for head masking." )
parser.add_argument(
"--max_seq_length" , default=128 , type=lowerCamelCase__ , help=(
"The maximum total input sequence length after WordPiece tokenization. \n"
"Sequences longer than this will be truncated, sequences shorter padded."
) , )
parser.add_argument("--batch_size" , default=1 , type=lowerCamelCase__ , help="Batch size." )
parser.add_argument("--seed" , type=lowerCamelCase__ , default=42 )
parser.add_argument("--local_rank" , type=lowerCamelCase__ , default=-1 , help="local_rank for distributed training on gpus" )
parser.add_argument("--no_cuda" , action="store_true" , help="Whether not to use CUDA when available" )
parser.add_argument("--server_ip" , type=lowerCamelCase__ , default="" , help="Can be used for distant debugging." )
parser.add_argument("--server_port" , type=lowerCamelCase__ , default="" , help="Can be used for distant debugging." )
UpperCAmelCase__ =parser.parse_args()
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("Waiting for debugger attach" )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=lowerCamelCase__ )
ptvsd.wait_for_attach()
# Setup devices and distributed training
if args.local_rank == -1 or args.no_cuda:
UpperCAmelCase__ =torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu" )
UpperCAmelCase__ =0 if args.no_cuda else torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank )
UpperCAmelCase__ =torch.device("cuda" , args.local_rank )
UpperCAmelCase__ =1
torch.distributed.init_process_group(backend="nccl" ) # Initializes the distributed backend
# Setup logging
logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN )
logger.info("device: {} n_gpu: {}, distributed: {}".format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) )
UpperCAmelCase__ =GPTaLMHeadModel.from_pretrained(args.model_name_or_path )
# Distributed and parallel training
model.to(args.device )
if args.local_rank != -1:
UpperCAmelCase__ =nn.parallel.DistributedDataParallel(
lowerCamelCase__ , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=lowerCamelCase__ )
elif args.n_gpu > 1:
UpperCAmelCase__ =nn.DataParallel(lowerCamelCase__ )
# Print/save training arguments
os.makedirs(args.output_dir , exist_ok=lowerCamelCase__ )
torch.save(lowerCamelCase__ , os.path.join(args.output_dir , "run_args.bin" ) )
logger.info("Training/evaluation parameters %s" , lowerCamelCase__ )
# Prepare dataset
UpperCAmelCase__ =np.concatenate(
[
np.loadtxt(args.data_dir , dtype=np.intaa ),
] )
UpperCAmelCase__ =(torch.from_numpy(lowerCamelCase__ ),)
UpperCAmelCase__ =TensorDataset(*lowerCamelCase__ )
UpperCAmelCase__ =RandomSampler(lowerCamelCase__ )
UpperCAmelCase__ =DataLoader(lowerCamelCase__ , sampler=lowerCamelCase__ , batch_size=args.batch_size )
# Compute head entropy and importance score
compute_heads_importance(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# Try head masking (set heads to zero until the score goes under a threshole)
# and head pruning (remove masked heads and see the effect on the network)
if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0:
UpperCAmelCase__ =mask_heads(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
prune_heads(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
if __name__ == "__main__":
main()
| 721
|
from __future__ import annotations
from math import gcd
def _UpperCAmelCase ( A , A = 2 , A = 1 , A = 3 , ):
'''simple docstring'''
if num < 2:
raise ValueError("The input value cannot be less than 2" )
# Because of the relationship between ``f(f(x))`` and ``f(x)``, this
# algorithm struggles to find factors that are divisible by two.
# As a workaround, we specifically check for two and even inputs.
# See: https://math.stackexchange.com/a/2856214/165820
if num > 2 and num % 2 == 0:
return 2
# Pollard's Rho algorithm requires a function that returns pseudorandom
# values between 0 <= X < ``num``. It doesn't need to be random in the
# sense that the output value is cryptographically secure or difficult
# to calculate, it only needs to be random in the sense that all output
# values should be equally likely to appear.
# For this reason, Pollard suggested using ``f(x) = (x**2 - 1) % num``
# However, the success of Pollard's algorithm isn't guaranteed and is
# determined in part by the initial seed and the chosen random function.
# To make retries easier, we will instead use ``f(x) = (x**2 + C) % num``
# where ``C`` is a value that we can modify between each attempt.
def rand_fn(A , A , A ) -> int:
return (pow(A , 2 ) + step) % modulus
for _ in range(A ):
# These track the position within the cycle detection logic.
UpperCAmelCase__ =seed
UpperCAmelCase__ =seed
while True:
# At each iteration, the tortoise moves one step and the hare moves two.
UpperCAmelCase__ =rand_fn(A , A , A )
UpperCAmelCase__ =rand_fn(A , A , A )
UpperCAmelCase__ =rand_fn(A , A , A )
# At some point both the tortoise and the hare will enter a cycle whose
# length ``p`` is a divisor of ``num``. Once in that cycle, at some point
# the tortoise and hare will end up on the same value modulo ``p``.
# We can detect when this happens because the position difference between
# the tortoise and the hare will share a common divisor with ``num``.
UpperCAmelCase__ =gcd(hare - tortoise , A )
if divisor == 1:
# No common divisor yet, just keep searching.
continue
else:
# We found a common divisor!
if divisor == num:
# Unfortunately, the divisor is ``num`` itself and is useless.
break
else:
# The divisor is a nontrivial factor of ``num``!
return divisor
# If we made it here, then this attempt failed.
# We need to pick a new starting seed for the tortoise and hare
# in addition to a new step value for the random function.
# To keep this example implementation deterministic, the
# new values will be generated based on currently available
# values instead of using something like ``random.randint``.
# We can use the hare's position as the new seed.
# This is actually what Richard Brent's the "optimized" variant does.
UpperCAmelCase__ =hare
# The new step value for the random function can just be incremented.
# At first the results will be similar to what the old function would
# have produced, but the value will quickly diverge after a bit.
step += 1
# We haven't found a divisor within the requested number of attempts.
# We were unlucky or ``num`` itself is actually prime.
return None
if __name__ == "__main__":
import argparse
UpperCamelCase_ = argparse.ArgumentParser()
parser.add_argument(
'num',
type=int,
help='The value to find a divisor of',
)
parser.add_argument(
'--attempts',
type=int,
default=3,
help='The number of attempts before giving up',
)
UpperCamelCase_ = parser.parse_args()
UpperCamelCase_ = pollard_rho(args.num, attempts=args.attempts)
if divisor is None:
print(f"""{args.num} is probably prime""")
else:
UpperCamelCase_ = args.num // divisor
print(f"""{args.num} = {divisor} * {quotient}""")
| 510
| 0
|
def __lowerCAmelCase ( __magic_name__ , __magic_name__ , __magic_name__ ):
_lowercase: str = len(__magic_name__ )
_lowercase: Optional[Any] = [[0] * n for i in range(__magic_name__ )]
for i in range(__magic_name__ ):
_lowercase: Optional[Any] = y_points[i]
for i in range(2 , __magic_name__ ):
for j in range(__magic_name__ , __magic_name__ ):
_lowercase: List[Any] = (
(xa - x_points[j - i + 1]) * q[j][i - 1]
- (xa - x_points[j]) * q[j - 1][i - 1]
) / (x_points[j] - x_points[j - i + 1])
return [q[n - 1][n - 1], q]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 226
|
def __lowerCAmelCase ( __magic_name__ = 1_0_0 ):
_lowercase: Dict = set()
_lowercase: List[Any] = 0
_lowercase: List[Any] = n + 1 # maximum limit
for a in range(2 , __magic_name__ ):
for b in range(2 , __magic_name__ ):
_lowercase: int = a**b # calculates the current power
collect_powers.add(__magic_name__ ) # adds the result to the set
return len(__magic_name__ )
if __name__ == "__main__":
print('Number of terms ', solution(int(str(input()).strip())))
| 226
| 1
|
from typing import Dict, Iterable, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
A__ : Dict = ["pixel_values"]
def __init__( self : List[str] , _snake_case : bool = True , _snake_case : Dict[str, int] = None , _snake_case : PILImageResampling = PILImageResampling.BICUBIC , _snake_case : bool = True , _snake_case : Dict[str, int] = None , _snake_case : bool = True , _snake_case : Union[int, float] = 1 / 2_55 , _snake_case : bool = True , _snake_case : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_MEAN , _snake_case : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_STD , **_snake_case : Any , ):
"""simple docstring"""
super().__init__(**_snake_case )
A__ = size if size is not None else {'shortest_edge': 2_24}
A__ = get_size_dict(_snake_case , default_to_square=_snake_case )
A__ = crop_size if crop_size is not None else {'height': 2_24, 'width': 2_24}
A__ = get_size_dict(_snake_case , param_name='crop_size' )
A__ = do_resize
A__ = size
A__ = resample
A__ = do_center_crop
A__ = crop_size
A__ = do_rescale
A__ = rescale_factor
A__ = do_normalize
A__ = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
A__ = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def _a ( self : str , _snake_case : np.ndarray , _snake_case : Dict[str, int] , _snake_case : PILImageResampling = PILImageResampling.BICUBIC , _snake_case : Optional[Union[str, ChannelDimension]] = None , **_snake_case : List[Any] , ):
"""simple docstring"""
A__ = get_size_dict(_snake_case , default_to_square=_snake_case )
# size_dict is a dict with either keys "height" and "width" or "shortest_edge"
if "shortest_edge" in size:
A__ = int((2_56 / 2_24) * size['shortest_edge'] )
A__ = get_resize_output_image_size(_snake_case , size=_snake_case , default_to_square=_snake_case )
A__ = {'height': output_size[0], 'width': output_size[1]}
if "height" not in size_dict or "width" not in size_dict:
raise ValueError(
F'''Size dict must have keys \'height\' and \'width\' or \'shortest_edge\'. Got {size_dict.keys()}''' )
return resize(
_snake_case , size=(size_dict['height'], size_dict['width']) , resample=_snake_case , data_format=_snake_case , **_snake_case )
def _a ( self : List[str] , _snake_case : np.ndarray , _snake_case : Dict[str, int] , _snake_case : Optional[Union[str, ChannelDimension]] = None , **_snake_case : Optional[int] , ):
"""simple docstring"""
A__ = get_size_dict(_snake_case )
if "height" not in size or "width" not in size:
raise ValueError(F'''Size dict must have keys \'height\' and \'width\'. Got {size.keys()}''' )
return center_crop(_snake_case , size=(size['height'], size['width']) , data_format=_snake_case , **_snake_case )
def _a ( self : List[Any] , _snake_case : np.ndarray , _snake_case : Union[int, float] , _snake_case : Optional[Union[str, ChannelDimension]] = None , **_snake_case : Any , ):
"""simple docstring"""
return rescale(_snake_case , scale=_snake_case , data_format=_snake_case , **_snake_case )
def _a ( self : Optional[Any] , _snake_case : np.ndarray , _snake_case : Union[float, List[float]] , _snake_case : Union[float, List[float]] , _snake_case : Optional[Union[str, ChannelDimension]] = None , **_snake_case : Union[str, Any] , ):
"""simple docstring"""
return normalize(_snake_case , mean=_snake_case , std=_snake_case , data_format=_snake_case , **_snake_case )
def _a ( self : Tuple , _snake_case : ImageInput , _snake_case : Optional[bool] = None , _snake_case : Optional[Dict[str, int]] = None , _snake_case : PILImageResampling = None , _snake_case : Optional[bool] = None , _snake_case : Optional[Dict[str, int]] = None , _snake_case : Optional[bool] = None , _snake_case : Optional[float] = None , _snake_case : Optional[bool] = None , _snake_case : Optional[Union[float, Iterable[float]]] = None , _snake_case : Optional[Union[float, Iterable[float]]] = None , _snake_case : Optional[TensorType] = None , _snake_case : ChannelDimension = ChannelDimension.FIRST , **_snake_case : Any , ):
"""simple docstring"""
A__ = do_resize if do_resize is not None else self.do_resize
A__ = resample if resample is not None else self.resample
A__ = do_center_crop if do_center_crop is not None else self.do_center_crop
A__ = do_rescale if do_rescale is not None else self.do_rescale
A__ = rescale_factor if rescale_factor is not None else self.rescale_factor
A__ = do_normalize if do_normalize is not None else self.do_normalize
A__ = image_mean if image_mean is not None else self.image_mean
A__ = image_std if image_std is not None else self.image_std
A__ = size if size is not None else self.size
A__ = get_size_dict(_snake_case , default_to_square=_snake_case )
A__ = crop_size if crop_size is not None else self.crop_size
A__ = get_size_dict(_snake_case , param_name='crop_size' )
A__ = make_list_of_images(_snake_case )
if not valid_images(_snake_case ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
A__ = [to_numpy_array(_snake_case ) for image in images]
if do_resize:
A__ = [self.resize(_snake_case , _snake_case , _snake_case ) for image in images]
if do_center_crop:
A__ = [self.center_crop(_snake_case , _snake_case ) for image in images]
if do_rescale:
A__ = [self.rescale(_snake_case , _snake_case ) for image in images]
if do_normalize:
A__ = [self.normalize(_snake_case , _snake_case , _snake_case ) for image in images]
A__ = [to_channel_dimension_format(_snake_case , _snake_case ) for image in images]
A__ = {'pixel_values': images}
return BatchFeature(data=_snake_case , tensor_type=_snake_case )
| 52
|
import math
import random
def A ( __UpperCamelCase , __UpperCamelCase = False ) -> float:
if deriv:
return value * (1 - value)
return 1 / (1 + math.exp(-value ))
# Initial Value
SCREAMING_SNAKE_CASE__ = 0.02
def A ( __UpperCamelCase , __UpperCamelCase ) -> float:
A__ = float(2 * (random.randint(1 , 100 )) - 1 )
for _ in range(__UpperCamelCase ):
# Forward propagation
A__ = sigmoid_function(INITIAL_VALUE * weight )
# How much did we miss?
A__ = (expected / 100) - layer_a
# Error delta
A__ = layer_1_error * sigmoid_function(__UpperCamelCase , __UpperCamelCase )
# Update weight
weight += INITIAL_VALUE * layer_1_delta
return layer_a * 100
if __name__ == "__main__":
import doctest
doctest.testmod()
SCREAMING_SNAKE_CASE__ = int(input('''Expected value: '''))
SCREAMING_SNAKE_CASE__ = int(input('''Number of propagations: '''))
print(forward_propagation(expected, number_propagations))
| 52
| 1
|
'''simple docstring'''
from __future__ import annotations
import time
from collections.abc import Sequence
from random import randint
from matplotlib import pyplot as plt
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Sequence[float] , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ):
if not arr:
return None, None, 0
if low == high:
return low, high, arr[low]
__a : Optional[Any] = (low + high) // 2
__a , __a , __a : Any = max_subarray(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__a , __a , __a : Any = max_subarray(_SCREAMING_SNAKE_CASE , mid + 1 , _SCREAMING_SNAKE_CASE )
__a , __a , __a : Dict = max_cross_sum(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if left_sum >= right_sum and left_sum >= cross_sum:
return left_low, left_high, left_sum
elif right_sum >= left_sum and right_sum >= cross_sum:
return right_low, right_high, right_sum
return cross_left, cross_right, cross_sum
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Sequence[float] , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ):
__a , __a : Tuple = float('-inf' ), -1
__a , __a : int = float('-inf' ), -1
__a : int | float = 0
for i in range(_SCREAMING_SNAKE_CASE , low - 1 , -1 ):
summ += arr[i]
if summ > left_sum:
__a : Optional[int] = summ
__a : List[str] = i
__a : int = 0
for i in range(mid + 1 , high + 1 ):
summ += arr[i]
if summ > right_sum:
__a : List[Any] = summ
__a : Optional[int] = i
return max_left, max_right, (left_sum + right_sum)
def lowerCamelCase (_SCREAMING_SNAKE_CASE : int ):
__a : Optional[Any] = [randint(1 , _SCREAMING_SNAKE_CASE ) for _ in range(_SCREAMING_SNAKE_CASE )]
__a : List[Any] = time.time()
max_subarray(_SCREAMING_SNAKE_CASE , 0 , input_size - 1 )
__a : Tuple = time.time()
return end - start
def lowerCamelCase ():
__a : Any = [10, 100, 1_000, 10_000, 50_000, 100_000, 200_000, 300_000, 400_000, 500_000]
__a : Dict = [time_max_subarray(_SCREAMING_SNAKE_CASE ) for input_size in input_sizes]
print('No of Inputs\t\tTime Taken' )
for input_size, runtime in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
print(_SCREAMING_SNAKE_CASE , '\t\t' , _SCREAMING_SNAKE_CASE )
plt.plot(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
plt.xlabel('Number of Inputs' )
plt.ylabel('Time taken in seconds' )
plt.show()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 476
|
'''simple docstring'''
import unittest
from transformers import JukeboxTokenizer
from transformers.testing_utils import require_torch
class __UpperCamelCase ( unittest.TestCase ):
A_ = JukeboxTokenizer
A_ = {
"artist": "Zac Brown Band",
"genres": "Country",
"lyrics": "I met a traveller from an antique land,\n Who said \"Two vast and trunkless legs of stone\n Stand in the desert. . . . Near them, on the sand,\n Half sunk a shattered visage lies, whose frown,\n And wrinkled lip, and sneer of cold command,\n Tell that its sculptor well those passions read\n Which yet survive, stamped on these lifeless things,\n The hand that mocked them, and the heart that fed;\n And on the pedestal, these words appear:\n My name is Ozymandias, King of Kings;\n Look on my Works, ye Mighty, and despair!\n Nothing beside remains. Round the decay\n Of that colossal Wreck, boundless and bare\n The lone and level sands stretch far away\n ",
}
@require_torch
def __UpperCAmelCase ( self ):
'''simple docstring'''
import torch
__a : Optional[int] = JukeboxTokenizer.from_pretrained('openai/jukebox-1b-lyrics' )
__a : List[str] = tokenizer(**self.metas )['input_ids']
# fmt: off
__a : str = [
torch.tensor([[
0, 0, 0, 7169, 507, 9, 76, 39, 31, 46, 76, 27,
76, 46, 44, 27, 48, 31, 38, 38, 31, 44, 76, 32,
44, 41, 39, 76, 27, 40, 76, 27, 40, 46, 35, 43,
47, 31, 76, 38, 27, 40, 30, 64, 78, 76, 76, 76,
76, 76, 76, 76, 76, 23, 34, 41, 76, 45, 27, 35,
30, 76, 71, 20, 49, 41, 76, 48, 27, 45, 46, 76,
27, 40, 30, 76, 46, 44, 47, 40, 37, 38, 31, 45,
45, 76, 38, 31, 33, 45, 76, 41, 32, 76, 45, 46,
41, 40, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
19, 46, 27, 40, 30, 76, 35, 40, 76, 46, 34, 31,
76, 30, 31, 45, 31, 44, 46, 63, 76, 63, 76, 63,
76, 63, 76, 14, 31, 27, 44, 76, 46, 34, 31, 39,
64, 76, 41, 40, 76, 46, 34, 31, 76, 45, 27, 40,
30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 8,
27, 38, 32, 76, 45, 47, 40, 37, 76, 27, 76, 45,
34, 27, 46, 46, 31, 44, 31, 30, 76, 48, 35, 45,
27, 33, 31, 76, 38, 35, 31, 45, 64, 76, 49, 34,
41, 45, 31, 76, 32, 44, 41, 49, 40, 64, 78, 76,
76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76, 49,
44, 35, 40, 37, 38, 31, 30, 76, 38, 35, 42, 64,
76, 27, 40, 30, 76, 45, 40, 31, 31, 44, 76, 41,
32, 76, 29, 41, 38, 30, 76, 29, 41, 39, 39, 27,
40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 31, 38, 38, 76, 46, 34, 27, 46, 76, 35, 46,
45, 76, 45, 29, 47, 38, 42, 46, 41, 44, 76, 49,
31, 38, 38, 76, 46, 34, 41, 45, 31, 76, 42, 27,
45, 45, 35, 41, 40, 45, 76, 44, 31, 27, 30, 78,
76, 76, 76, 76, 76, 76, 76, 76, 23, 34, 35, 29,
34, 76, 51, 31, 46, 76, 45, 47, 44, 48, 35, 48,
31, 64, 76, 45, 46, 27, 39, 42, 31, 30, 76, 41,
40, 76, 46, 34, 31, 45, 31, 76, 38, 35, 32, 31,
38, 31, 45, 45, 76, 46, 34, 35, 40, 33, 45, 64,
78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 34, 31,
76, 34, 27, 40, 30, 76, 46, 34, 27, 46, 76, 39,
41, 29, 37, 31, 30, 76, 46, 34, 31, 39, 64, 76,
27, 40, 30, 76, 46, 34, 31, 76, 34, 31, 27, 44,
46, 76, 46, 34, 27, 46, 76, 32, 31, 30, 66, 78,
76, 76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76,
41, 40, 76, 46, 34, 31, 76, 42, 31, 30, 31, 45,
46, 27, 38, 64, 76, 46, 34, 31, 45, 31, 76, 49,
41, 44, 30, 45, 76, 27, 42, 42, 31, 27, 44, 65,
78, 76, 76, 76, 76, 76, 76, 76, 76, 13, 51, 76,
40, 27, 39, 31, 76, 35, 45, 76, 15, 52, 51, 39,
27, 40, 30, 35, 27, 45, 64, 76, 11, 35, 40, 33,
76, 41, 32, 76, 11, 35, 40, 33, 45, 66, 78, 76,
76, 76, 76, 76, 76, 76, 76, 12, 41, 41, 37, 76,
41, 40, 76, 39, 51, 76, 23, 41, 44, 37, 45, 64,
76, 51, 31, 76, 13, 35, 33, 34, 46, 51, 64, 76,
27, 40, 30, 76, 30, 31, 45, 42, 27, 35, 44, 67,
78, 76, 76, 76, 76, 76, 76, 76, 76, 14, 41, 46,
34, 35, 40, 33, 76, 28, 31, 45, 35, 30, 31, 76,
44, 31, 39, 27, 35, 40, 45, 63, 76, 18, 41, 47,
40, 30, 76, 46, 34, 31, 76, 30, 31, 29, 27, 51,
78, 76, 76, 76, 76, 76, 76, 76, 76, 15, 32, 76,
46, 34, 27, 46, 76, 29, 41, 38, 41, 45, 45, 27,
38, 76, 23, 44, 31, 29, 37, 64, 76, 28, 41, 47,
40, 30, 38, 31, 45, 45, 76, 27, 40, 30, 76, 28,
27, 44, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 34, 31, 76, 38, 41, 40, 31, 76, 27, 40, 30,
76, 38, 31, 48, 31, 38, 76, 45, 27, 40, 30, 45,
76, 45, 46, 44, 31, 46, 29, 34, 76, 32, 27, 44,
76, 27, 49, 27, 51, 78, 76, 76, 76, 76, 76, 76,
76, 76]] ),
torch.tensor([[0, 0, 0, 1069, 11]] ),
torch.tensor([[0, 0, 0, 1069, 11]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
@require_torch
def __UpperCAmelCase ( self ):
'''simple docstring'''
import torch
__a : Union[str, Any] = JukeboxTokenizer.from_pretrained('openai/jukebox-5b-lyrics' )
__a : Tuple = tokenizer(**self.metas )['input_ids']
# fmt: off
__a : Dict = [
torch.tensor([[
0, 0, 0, 1069, 11, -1, -1, -1, -1, 9, 77, 39,
31, 46, 77, 27, 77, 46, 44, 27, 48, 31, 38, 38,
31, 44, 77, 32, 44, 41, 39, 77, 27, 40, 77, 27,
40, 46, 35, 43, 47, 31, 77, 38, 27, 40, 30, 64,
79, 77, 77, 77, 77, 77, 77, 77, 77, 23, 34, 41,
77, 45, 27, 35, 30, 77, 72, 20, 49, 41, 77, 48,
27, 45, 46, 77, 27, 40, 30, 77, 46, 44, 47, 40,
37, 38, 31, 45, 45, 77, 38, 31, 33, 45, 77, 41,
32, 77, 45, 46, 41, 40, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 19, 46, 27, 40, 30, 77, 35, 40,
77, 46, 34, 31, 77, 30, 31, 45, 31, 44, 46, 63,
77, 63, 77, 63, 77, 63, 77, 14, 31, 27, 44, 77,
46, 34, 31, 39, 64, 77, 41, 40, 77, 46, 34, 31,
77, 45, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77,
77, 77, 77, 8, 27, 38, 32, 77, 45, 47, 40, 37,
77, 27, 77, 45, 34, 27, 46, 46, 31, 44, 31, 30,
77, 48, 35, 45, 27, 33, 31, 77, 38, 35, 31, 45,
64, 77, 49, 34, 41, 45, 31, 77, 32, 44, 41, 49,
40, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 1,
40, 30, 77, 49, 44, 35, 40, 37, 38, 31, 30, 77,
38, 35, 42, 64, 77, 27, 40, 30, 77, 45, 40, 31,
31, 44, 77, 41, 32, 77, 29, 41, 38, 30, 77, 29,
41, 39, 39, 27, 40, 30, 64, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 31, 38, 38, 77, 46, 34, 27,
46, 77, 35, 46, 45, 77, 45, 29, 47, 38, 42, 46,
41, 44, 77, 49, 31, 38, 38, 77, 46, 34, 41, 45,
31, 77, 42, 27, 45, 45, 35, 41, 40, 45, 77, 44,
31, 27, 30, 79, 77, 77, 77, 77, 77, 77, 77, 77,
23, 34, 35, 29, 34, 77, 51, 31, 46, 77, 45, 47,
44, 48, 35, 48, 31, 64, 77, 45, 46, 27, 39, 42,
31, 30, 77, 41, 40, 77, 46, 34, 31, 45, 31, 77,
38, 35, 32, 31, 38, 31, 45, 45, 77, 46, 34, 35,
40, 33, 45, 64, 79, 77, 77, 77, 77, 77, 77, 77,
77, 20, 34, 31, 77, 34, 27, 40, 30, 77, 46, 34,
27, 46, 77, 39, 41, 29, 37, 31, 30, 77, 46, 34,
31, 39, 64, 77, 27, 40, 30, 77, 46, 34, 31, 77,
34, 31, 27, 44, 46, 77, 46, 34, 27, 46, 77, 32,
31, 30, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77,
1, 40, 30, 77, 41, 40, 77, 46, 34, 31, 77, 42,
31, 30, 31, 45, 46, 27, 38, 64, 77, 46, 34, 31,
45, 31, 77, 49, 41, 44, 30, 45, 77, 27, 42, 42,
31, 27, 44, 65, 79, 77, 77, 77, 77, 77, 77, 77,
77, 13, 51, 77, 40, 27, 39, 31, 77, 35, 45, 77,
15, 52, 51, 39, 27, 40, 30, 35, 27, 45, 64, 77,
11, 35, 40, 33, 77, 41, 32, 77, 11, 35, 40, 33,
45, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77, 12,
41, 41, 37, 77, 41, 40, 77, 39, 51, 77, 23, 41,
44, 37, 45, 64, 77, 51, 31, 77, 13, 35, 33, 34,
46, 51, 64, 77, 27, 40, 30, 77, 30, 31, 45, 42,
27, 35, 44, 67, 79, 77, 77, 77, 77, 77, 77, 77,
77, 14, 41, 46, 34, 35, 40, 33, 77, 28, 31, 45,
35, 30, 31, 77, 44, 31, 39, 27, 35, 40, 45, 63,
77, 18, 41, 47, 40, 30, 77, 46, 34, 31, 77, 30,
31, 29, 27, 51, 79, 77, 77, 77, 77, 77, 77, 77,
77, 15, 32, 77, 46, 34, 27, 46, 77, 29, 41, 38,
41, 45, 45, 27, 38, 77, 23, 44, 31, 29, 37, 64,
77, 28, 41, 47, 40, 30, 38, 31, 45, 45, 77, 27,
40, 30, 77, 28, 27, 44, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 34, 31, 77, 38, 41, 40, 31,
77, 27, 40, 30, 77, 38, 31, 48, 31, 38, 77, 45,
27, 40, 30, 45, 77, 45, 46, 44, 31, 46, 29, 34,
77, 32, 27, 44, 77, 27, 49, 27, 51, 79, 77, 77,
77, 77, 77, 77, 77, 77]] ),
torch.tensor([[0, 0, 0, 1069, 11, -1, -1, -1, -1]] ),
torch.tensor([[0, 0, 0, 1069, 11, -1, -1, -1, -1]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
| 476
| 1
|
"""simple docstring"""
import unittest
from transformers import AutoTokenizer, FalconConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
)
class _lowercase :
def __init__( self : List[str] , a : Tuple , a : Tuple=3 , a : List[Any]=7 , a : Optional[Any]=True , a : int=True , a : List[str]=False , a : List[str]=True , a : Tuple=9_9 , a : Dict=3_2 , a : str=5 , a : List[str]=4 , a : Union[str, Any]=3_7 , a : str="gelu" , a : Optional[Any]=0.1 , a : Optional[int]=0.1 , a : List[Any]=5_1_2 , a : List[Any]=1_6 , a : Any=2 , a : Optional[int]=0.0_2 , a : Tuple=3 , a : Dict=4 , a : Any=None , ):
"""simple docstring"""
__snake_case : List[str] =parent
__snake_case : Any =batch_size
__snake_case : Any =seq_length
__snake_case : List[str] =is_training
__snake_case : List[Any] =use_input_mask
__snake_case : Tuple =use_token_type_ids
__snake_case : List[Any] =use_labels
__snake_case : int =vocab_size
__snake_case : int =hidden_size
__snake_case : Tuple =num_hidden_layers
__snake_case : Optional[Any] =num_attention_heads
__snake_case : List[str] =intermediate_size
__snake_case : Tuple =hidden_act
__snake_case : str =hidden_dropout_prob
__snake_case : List[Any] =attention_probs_dropout_prob
__snake_case : Optional[int] =max_position_embeddings
__snake_case : Optional[Any] =type_vocab_size
__snake_case : List[Any] =type_sequence_label_size
__snake_case : Optional[Any] =initializer_range
__snake_case : Any =num_labels
__snake_case : List[str] =num_choices
__snake_case : Any =scope
def _UpperCamelCase ( self : Dict ):
"""simple docstring"""
__snake_case : Any =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__snake_case : str =None
if self.use_input_mask:
__snake_case : Any =random_attention_mask([self.batch_size, self.seq_length] )
__snake_case : Tuple =None
__snake_case : Optional[Any] =None
__snake_case : Dict =None
__snake_case : Union[str, Any] =None
if self.use_labels:
__snake_case : int =ids_tensor([self.batch_size] , self.type_sequence_label_size )
__snake_case : Union[str, Any] =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__snake_case : Optional[int] =ids_tensor([self.batch_size] , self.num_choices )
__snake_case : Optional[int] =self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
return FalconConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=a , initializer_range=self.initializer_range , pad_token_id=1 , new_decoder_architecture=a , )
def _UpperCamelCase ( self : List[str] , a : List[str] , a : Dict , a : Optional[Any] , a : Tuple , a : Dict , a : List[str] , a : Optional[int] ):
"""simple docstring"""
__snake_case : Optional[int] =FalconModel(config=a )
model.to(a )
model.eval()
__snake_case : List[str] =model(a , attention_mask=a )
__snake_case : int =model(a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCamelCase ( self : Dict , a : int , a : List[str] , a : Tuple , a : Dict , a : Dict , a : List[Any] , a : Optional[Any] , a : Optional[int] , a : List[str] , ):
"""simple docstring"""
__snake_case : str =True
__snake_case : Optional[Any] =FalconModel(a )
model.to(a )
model.eval()
__snake_case : Optional[int] =model(
a , attention_mask=a , encoder_hidden_states=a , encoder_attention_mask=a , )
__snake_case : Union[str, Any] =model(
a , attention_mask=a , encoder_hidden_states=a , )
__snake_case : Any =model(a , attention_mask=a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCamelCase ( self : Optional[int] , a : int , a : Union[str, Any] , a : Optional[Any] , a : Optional[Any] , a : Dict , a : Optional[Any] , a : int , a : Union[str, Any] , a : Optional[int] , ):
"""simple docstring"""
__snake_case : Any =FalconForCausalLM(config=a )
model.to(a )
model.eval()
__snake_case : int =model(a , attention_mask=a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _UpperCamelCase ( self : Any , a : List[Any] , a : Optional[int] , a : Optional[int] , a : Union[str, Any] , a : Optional[Any] , a : List[Any] , a : Union[str, Any] , a : Any , a : str , ):
"""simple docstring"""
__snake_case : Tuple =True
__snake_case : str =True
__snake_case : int =FalconForCausalLM(config=a )
model.to(a )
model.eval()
# first forward pass
__snake_case : Any =model(
a , attention_mask=a , encoder_hidden_states=a , encoder_attention_mask=a , use_cache=a , )
__snake_case : Union[str, Any] =outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
__snake_case : int =ids_tensor((self.batch_size, 3) , config.vocab_size )
__snake_case : Optional[int] =ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
__snake_case : Optional[int] =torch.cat([input_ids, next_tokens] , dim=-1 )
__snake_case : List[str] =torch.cat([input_mask, next_mask] , dim=-1 )
__snake_case : Tuple =model(
a , attention_mask=a , encoder_hidden_states=a , encoder_attention_mask=a , output_hidden_states=a , )['''hidden_states'''][0]
__snake_case : int =model(
a , attention_mask=a , encoder_hidden_states=a , encoder_attention_mask=a , past_key_values=a , output_hidden_states=a , )['''hidden_states'''][0]
# select random slice
__snake_case : int =ids_tensor((1,) , output_from_past.shape[-1] ).item()
__snake_case : Dict =output_from_no_past[:, -3:, random_slice_idx].detach()
__snake_case : List[str] =output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(a , a , atol=1e-3 ) )
def _UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
__snake_case : Union[str, Any] =self.prepare_config_and_inputs()
(
(
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) ,
) : Dict =config_and_inputs
__snake_case : str ={'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class _lowercase ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , unittest.TestCase ):
_a : str = (
(
FalconModel,
FalconForCausalLM,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconForQuestionAnswering,
)
if is_torch_available()
else ()
)
_a : int = (FalconForCausalLM,) if is_torch_available() else ()
_a : Any = (
{
'''feature-extraction''': FalconModel,
'''text-classification''': FalconForSequenceClassification,
'''text-generation''': FalconForCausalLM,
'''question-answering''': FalconForQuestionAnswering,
'''token-classification''': FalconForTokenClassification,
'''zero-shot''': FalconForSequenceClassification,
}
if is_torch_available()
else {}
)
_a : List[str] = False
_a : str = False
def _UpperCamelCase ( self : int ):
"""simple docstring"""
__snake_case : int =FalconModelTester(self )
__snake_case : Any =ConfigTester(self , config_class=a , hidden_size=3_7 )
def _UpperCamelCase ( self : str ):
"""simple docstring"""
self.config_tester.run_common_tests()
def _UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
__snake_case : Optional[int] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a )
def _UpperCamelCase ( self : str ):
"""simple docstring"""
__snake_case , *__snake_case : List[str] =self.model_tester.prepare_config_and_inputs()
for alibi in [True, False]:
__snake_case : Tuple =alibi
self.model_tester.create_and_check_model(a , *a )
def _UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
__snake_case , __snake_case : Tuple =self.model_tester.prepare_config_and_inputs_for_common()
__snake_case : Optional[int] =3
__snake_case : Union[str, Any] =input_dict['''input_ids''']
__snake_case : List[str] =input_ids.ne(1 ).to(a )
__snake_case : Any =ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
__snake_case : Any =FalconForSequenceClassification(a )
model.to(a )
model.eval()
__snake_case : int =model(a , attention_mask=a , labels=a )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def _UpperCamelCase ( self : int ):
"""simple docstring"""
__snake_case , __snake_case : Tuple =self.model_tester.prepare_config_and_inputs_for_common()
__snake_case : str =3
__snake_case : Optional[int] ='''single_label_classification'''
__snake_case : Union[str, Any] =input_dict['''input_ids''']
__snake_case : Tuple =input_ids.ne(1 ).to(a )
__snake_case : Union[str, Any] =ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
__snake_case : str =FalconForSequenceClassification(a )
model.to(a )
model.eval()
__snake_case : Dict =model(a , attention_mask=a , labels=a )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def _UpperCamelCase ( self : Dict ):
"""simple docstring"""
__snake_case , __snake_case : List[Any] =self.model_tester.prepare_config_and_inputs_for_common()
__snake_case : Optional[int] =input_dict['''input_ids''']
__snake_case : Optional[Any] =FalconForCausalLM(a )
model.to(a )
model.eval()
__snake_case : List[Any] =model(a , use_cache=a )
__snake_case : int =input_ids.shape[0]
__snake_case : Dict =model._convert_to_rw_cache(result.past_key_values )
__snake_case : Union[str, Any] =model._convert_cache_to_standard_format(a , a )
for layer in range(len(a ) ):
for tensor_idx in range(2 ):
self.assertTrue(rw_cache[layer][tensor_idx].ndim == 3 )
self.assertTrue(result.past_key_values[layer][tensor_idx].ndim == 4 )
self.assertTrue(
torch.all(result.past_key_values[layer][tensor_idx] == standard_cache[layer][tensor_idx] ) )
def _UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
__snake_case , __snake_case : int =self.model_tester.prepare_config_and_inputs_for_common()
__snake_case : List[Any] =3
__snake_case : List[Any] ='''multi_label_classification'''
__snake_case : int =input_dict['''input_ids''']
__snake_case : str =input_ids.ne(1 ).to(a )
__snake_case : Optional[int] =ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
__snake_case : Any =FalconForSequenceClassification(a )
model.to(a )
model.eval()
__snake_case : List[str] =model(a , attention_mask=a , labels=a )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def _UpperCamelCase ( self : Tuple ):
"""simple docstring"""
for model_class in self.all_generative_model_classes:
__snake_case , __snake_case : Dict =self.model_tester.prepare_config_and_inputs_for_common()
# If it doesn't support cache, pass the test
if not hasattr(a , '''use_cache''' ):
return
__snake_case : Union[str, Any] =model_class(a ).to(a )
if "use_cache" not in inputs:
__snake_case : List[str] =True
__snake_case : Union[str, Any] =model(**a )
# If "past_key_values" is not returned, pass the test (e.g. RWKV uses a different cache name and format)
if "past_key_values" not in outputs:
return
__snake_case : Optional[Any] =(
getattr(a , '''decoder_layers''' , a )
or getattr(a , '''num_decoder_layers''' , a )
or config.num_hidden_layers
)
__snake_case : List[Any] =getattr(a , '''num_kv_heads''' , config.num_attention_heads )
__snake_case : int =getattr(a , '''d_model''' , config.hidden_size )
__snake_case : Any =embed_dim // num_attention_heads
__snake_case : List[Any] =outputs['''past_key_values''']
self.assertEqual(len(a ) , a )
__snake_case , __snake_case : Optional[Any] =inputs['''input_ids'''].shape
for i in range(a ):
if config.new_decoder_architecture:
__snake_case : Dict =config.num_attention_heads
elif config.multi_query:
__snake_case : int =1
self.assertEqual(len(past_kv[0] ) , 2 ) # K V for the decoder = 2
self.assertEqual(
past_kv[i][0].shape , (batch_size, num_attention_heads, seq_length, per_head_embed_dim) )
self.assertEqual(
past_kv[i][1].shape , (batch_size, num_attention_heads, seq_length, per_head_embed_dim) )
@require_torch
class _lowercase ( unittest.TestCase ):
@slow
def _UpperCamelCase ( self : Any ):
"""simple docstring"""
__snake_case : Union[str, Any] =AutoTokenizer.from_pretrained('''Rocketknight1/falcon-rw-1b''' )
__snake_case : str =FalconForCausalLM.from_pretrained('''Rocketknight1/falcon-rw-1b''' )
model.eval()
model.to(a )
__snake_case : Any =tokenizer('''My favorite food is''' , return_tensors='''pt''' ).to(a )
__snake_case : List[str] =(
'''My favorite food is pizza. I love it so much that I have a pizza party every year for my birthday.'''
)
__snake_case : Dict =model.generate(**a , do_sample=a , max_new_tokens=1_9 )
__snake_case : Any =tokenizer.batch_decode(a )[0]
self.assertEqual(a , a )
@slow
def _UpperCamelCase ( self : List[str] ):
"""simple docstring"""
for repo in ["Rocketknight1/tiny-random-falcon-7b", "Rocketknight1/tiny-random-falcon-40b"]:
__snake_case : Optional[int] =AutoTokenizer.from_pretrained(a )
__snake_case : str =FalconForCausalLM.from_pretrained(a )
model.eval()
model.to(a )
__snake_case : int =tokenizer('''My favorite food is''' , return_tensors='''pt''' ).to(a )
# We just test that these run without errors - the models are randomly initialized
# and so the actual text outputs will be garbage
model.generate(**a , do_sample=a , max_new_tokens=4 )
model.generate(**a , do_sample=a , max_new_tokens=4 )
model.generate(**a , num_beams=2 , max_new_tokens=4 )
@slow
def _UpperCamelCase ( self : Any ):
"""simple docstring"""
with torch.no_grad():
for repo in [
"Rocketknight1/falcon-rw-1b",
"Rocketknight1/tiny-random-falcon-7b",
"Rocketknight1/tiny-random-falcon-40b",
]:
__snake_case : List[str] =AutoTokenizer.from_pretrained(a )
__snake_case : Dict =FalconForCausalLM.from_pretrained(a )
model.eval()
model.to(device=a )
__snake_case : int =tokenizer('''My favorite food is''' , return_tensors='''pt''' ).to(a )
# Test results are the same with and without cache
__snake_case : Dict =model.generate(**a , do_sample=a , max_new_tokens=2_0 , use_cache=a )
__snake_case : List[Any] =model.generate(**a , do_sample=a , max_new_tokens=2_0 , use_cache=a )
self.assertTrue((outputs_cache - outputs_no_cache).sum().item() == 0 )
| 497
|
"""simple docstring"""
from __future__ import annotations
def __lowercase ( a : int , a : int ) -> list[str]:
if partitions <= 0:
raise ValueError('''partitions must be a positive number!''' )
if partitions > number_of_bytes:
raise ValueError('''partitions can not > number_of_bytes!''' )
__snake_case : List[str] =number_of_bytes // partitions
__snake_case : str =[]
for i in range(a ):
__snake_case : Optional[Any] =i * bytes_per_partition + 1
__snake_case : Any =(
number_of_bytes if i == partitions - 1 else (i + 1) * bytes_per_partition
)
allocation_list.append(f'''{start_bytes}-{end_bytes}''' )
return allocation_list
if __name__ == "__main__":
import doctest
doctest.testmod()
| 497
| 1
|
# XXX: we want transformers master here - in the absense of conftest manipulating sys.path:
# hack it in for now:
import sys
from pathlib import Path
__lowerCAmelCase = Path(__file__).resolve().parents[3] / 'src'
sys.path.insert(1, str(git_repo_path))
import dataclasses # noqa
import io # noqa
import itertools # noqa
import json # noqa
import os # noqa
import unittest # noqa
from copy import deepcopy # noqa
from parameterized import parameterized # noqa
from transformers import TrainingArguments, is_torch_available # noqa
from transformers.deepspeed import is_deepspeed_available # noqa
from transformers.file_utils import WEIGHTS_NAME # noqa
from transformers.testing_utils import ( # noqa
CaptureLogger,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
mockenv_context,
require_deepspeed,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
from transformers.trainer_utils import set_seed # noqa
set_seed(4_2)
__lowerCAmelCase = {'base': 'patrickvonplaten/wav2vec2_tiny_random', 'robust': 'patrickvonplaten/wav2vec2_tiny_random_robust'}
__lowerCAmelCase = 'zero2'
__lowerCAmelCase = 'zero3'
__lowerCAmelCase = [ZEROa, ZEROa]
def a ( a , a , a ) ->Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = parameterized.to_safe_name('''_'''.join(str(a ) for x in param.args ) )
return F"""{func.__name__}_{param_based_name}"""
# Cartesian-product of zero stages with models to test
__lowerCAmelCase = list(itertools.product(stages, models.keys()))
@slow
@require_deepspeed
@require_torch_gpu
class lowerCamelCase ( __lowerCamelCase ):
@parameterized.expand(lowercase , name_func=lowercase )
def snake_case__ ( self :Optional[Any] , lowercase :str , lowercase :Dict ) -> Optional[Any]:
"""simple docstring"""
self.run_and_check(
stage=lowercase , model=lowercase , distributed=lowercase , fpaa=lowercase , )
@require_torch_multi_gpu
@parameterized.expand(lowercase , name_func=lowercase )
def snake_case__ ( self :Any , lowercase :Tuple , lowercase :int ) -> List[Any]:
"""simple docstring"""
self.run_and_check(
stage=lowercase , model=lowercase , distributed=lowercase , fpaa=lowercase , )
@parameterized.expand(lowercase , name_func=lowercase )
def snake_case__ ( self :Dict , lowercase :List[Any] , lowercase :List[Any] ) -> List[Any]:
"""simple docstring"""
self.run_and_check(
stage=lowercase , model=lowercase , distributed=lowercase , fpaa=lowercase , )
@require_torch_multi_gpu
@parameterized.expand(lowercase , name_func=lowercase )
def snake_case__ ( self :Optional[Any] , lowercase :Tuple , lowercase :List[Any] ) -> Any:
"""simple docstring"""
self.run_and_check(
stage=lowercase , model=lowercase , distributed=lowercase , fpaa=lowercase , )
def snake_case__ ( self :int , lowercase :int ) -> Optional[Any]:
"""simple docstring"""
pass
def snake_case__ ( self :Optional[Any] , lowercase :str , lowercase :str , lowercase :int = 1_0 , lowercase :bool = True , lowercase :bool = True , lowercase :bool = True , ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = models[model]
SCREAMING_SNAKE_CASE = self.run_trainer(
stage=lowercase , model_name=lowercase , eval_steps=lowercase , num_train_epochs=1 , distributed=lowercase , fpaa=lowercase , )
self.do_checks(lowercase )
return output_dir
def snake_case__ ( self :Optional[int] , lowercase :str , lowercase :str , lowercase :int = 1_0 , lowercase :int = 1 , lowercase :bool = True , lowercase :bool = True , ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.get_auto_remove_tmp_dir('''./xxx''' , after=lowercase )
SCREAMING_SNAKE_CASE = f"""
--model_name_or_path {model_name}
--dataset_name hf-internal-testing/librispeech_asr_dummy
--dataset_config_name clean
--train_split_name validation
--validation_split_name validation
--output_dir {output_dir}
--num_train_epochs {str(lowercase )}
--per_device_train_batch_size 2
--per_device_eval_batch_size 2
--evaluation_strategy steps
--learning_rate 5e-4
--warmup_steps 8
--orthography timit
--preprocessing_num_workers 1
--group_by_length
--freeze_feature_extractor
--report_to none
--save_steps 0
--eval_steps {eval_steps}
--report_to none
""".split()
if fpaa:
args.extend(['''--fp16'''] )
# currently ds_config_wav2vec2_zero.json requires "zero_optimization.find_unused_parameters": true,
# hence the separate config files
SCREAMING_SNAKE_CASE = f"""--deepspeed {self.test_file_dir_str}/ds_config_wav2vec2_{stage}.json""".split()
SCREAMING_SNAKE_CASE = [f"""{self.examples_dir_str}/research_projects/wav2vec2/run_asr.py"""]
SCREAMING_SNAKE_CASE = self.get_launcher(lowercase )
SCREAMING_SNAKE_CASE = launcher + script + args + ds_args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(lowercase , env=self.get_env() )
return output_dir
def snake_case__ ( self :Tuple , lowercase :List[Any]=False ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = min(2 , get_gpu_count() ) if distributed else 1
return f"""deepspeed --num_nodes 1 --num_gpus {num_gpus}""".split()
| 201
|
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class lowerCamelCase ( __lowerCamelCase , __lowerCamelCase ):
@register_to_config
def __init__( self :Tuple , *,
lowercase :int = 4 , lowercase :int = 7_6_8 , lowercase :int , lowercase :Any , ) -> Optional[Any]:
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE = nn.Parameter(torch.zeros(lowercase ) )
# parameters for additional clip time embeddings
SCREAMING_SNAKE_CASE = nn.Linear(lowercase , lowercase )
SCREAMING_SNAKE_CASE = nn.Linear(lowercase , lowercase )
# parameters for encoder hidden states
SCREAMING_SNAKE_CASE = clip_extra_context_tokens
SCREAMING_SNAKE_CASE = nn.Linear(
lowercase , self.clip_extra_context_tokens * cross_attention_dim )
SCREAMING_SNAKE_CASE = nn.Linear(lowercase , lowercase )
SCREAMING_SNAKE_CASE = nn.LayerNorm(lowercase )
def snake_case__ ( self :Union[str, Any] , *, lowercase :Optional[int] , lowercase :Union[str, Any] , lowercase :Union[str, Any] , lowercase :Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
if do_classifier_free_guidance:
# Add the classifier free guidance embeddings to the image embeddings
SCREAMING_SNAKE_CASE = image_embeddings.shape[0]
SCREAMING_SNAKE_CASE = self.learned_classifier_free_guidance_embeddings.unsqueeze(0 )
SCREAMING_SNAKE_CASE = classifier_free_guidance_embeddings.expand(
lowercase , -1 )
SCREAMING_SNAKE_CASE = torch.cat([classifier_free_guidance_embeddings, image_embeddings] , dim=0 )
# The image embeddings batch size and the text embeddings batch size are equal
assert image_embeddings.shape[0] == prompt_embeds.shape[0]
SCREAMING_SNAKE_CASE = prompt_embeds.shape[0]
# "Specifically, we modify the architecture described in Nichol et al. (2021) by projecting and
# adding CLIP embeddings to the existing timestep embedding, ...
SCREAMING_SNAKE_CASE = self.embedding_proj(lowercase )
SCREAMING_SNAKE_CASE = self.clip_image_embeddings_project_to_time_embeddings(lowercase )
SCREAMING_SNAKE_CASE = time_projected_image_embeddings + time_projected_prompt_embeds
# ... and by projecting CLIP embeddings into four
# extra tokens of context that are concatenated to the sequence of outputs from the GLIDE text encoder"
SCREAMING_SNAKE_CASE = self.clip_extra_context_tokens_proj(lowercase )
SCREAMING_SNAKE_CASE = clip_extra_context_tokens.reshape(lowercase , -1 , self.clip_extra_context_tokens )
SCREAMING_SNAKE_CASE = clip_extra_context_tokens.permute(0 , 2 , 1 )
SCREAMING_SNAKE_CASE = self.encoder_hidden_states_proj(lowercase )
SCREAMING_SNAKE_CASE = self.text_encoder_hidden_states_norm(lowercase )
SCREAMING_SNAKE_CASE = torch.cat([clip_extra_context_tokens, text_encoder_hidden_states] , dim=1 )
return text_encoder_hidden_states, additive_clip_time_embeddings
| 201
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
__lowerCamelCase = {
'''configuration_trocr''': ['''TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TrOCRConfig'''],
'''processing_trocr''': ['''TrOCRProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = [
'''TROCR_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TrOCRForCausalLM''',
'''TrOCRPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
__lowerCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 701
|
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
AutoConfig,
AutoImageProcessor,
CLIPConfig,
CLIPImageProcessor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER
sys.path.append(str(Path(__file__).parent.parent.parent.parent / '''utils'''))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
class a__ ( unittest.TestCase ):
def UpperCAmelCase( self : int ):
a_ : Any = 0
def UpperCAmelCase( self : Dict ):
a_ : List[str] = AutoImageProcessor.from_pretrained("""openai/clip-vit-base-patch32""" )
self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ )
def UpperCAmelCase( self : int ):
with tempfile.TemporaryDirectory() as tmpdirname:
a_ : Any = Path(lowerCamelCase_ ) / """preprocessor_config.json"""
a_ : Tuple = Path(lowerCamelCase_ ) / """config.json"""
json.dump(
{"""image_processor_type""": """CLIPImageProcessor""", """processor_class""": """CLIPProcessor"""} , open(lowerCamelCase_ , """w""" ) , )
json.dump({"""model_type""": """clip"""} , open(lowerCamelCase_ , """w""" ) )
a_ : Union[str, Any] = AutoImageProcessor.from_pretrained(lowerCamelCase_ )
self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ )
def UpperCAmelCase( self : List[Any] ):
# Ensure we can load the image processor from the feature extractor config
with tempfile.TemporaryDirectory() as tmpdirname:
a_ : Dict = Path(lowerCamelCase_ ) / """preprocessor_config.json"""
a_ : List[str] = Path(lowerCamelCase_ ) / """config.json"""
json.dump(
{"""feature_extractor_type""": """CLIPFeatureExtractor""", """processor_class""": """CLIPProcessor"""} , open(lowerCamelCase_ , """w""" ) , )
json.dump({"""model_type""": """clip"""} , open(lowerCamelCase_ , """w""" ) )
a_ : int = AutoImageProcessor.from_pretrained(lowerCamelCase_ )
self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ )
def UpperCAmelCase( self : Optional[Any] ):
with tempfile.TemporaryDirectory() as tmpdirname:
a_ : int = CLIPConfig()
# Create a dummy config file with image_proceesor_type
a_ : Tuple = Path(lowerCamelCase_ ) / """preprocessor_config.json"""
a_ : Tuple = Path(lowerCamelCase_ ) / """config.json"""
json.dump(
{"""image_processor_type""": """CLIPImageProcessor""", """processor_class""": """CLIPProcessor"""} , open(lowerCamelCase_ , """w""" ) , )
json.dump({"""model_type""": """clip"""} , open(lowerCamelCase_ , """w""" ) )
# remove image_processor_type to make sure config.json alone is enough to load image processor locally
a_ : Optional[int] = AutoImageProcessor.from_pretrained(lowerCamelCase_ ).to_dict()
config_dict.pop("""image_processor_type""" )
a_ : Tuple = CLIPImageProcessor(**lowerCamelCase_ )
# save in new folder
model_config.save_pretrained(lowerCamelCase_ )
config.save_pretrained(lowerCamelCase_ )
a_ : Optional[int] = AutoImageProcessor.from_pretrained(lowerCamelCase_ )
# make sure private variable is not incorrectly saved
a_ : str = json.loads(config.to_json_string() )
self.assertTrue("""_processor_class""" not in dict_as_saved )
self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ )
def UpperCAmelCase( self : Dict ):
with tempfile.TemporaryDirectory() as tmpdirname:
a_ : Union[str, Any] = Path(lowerCamelCase_ ) / """preprocessor_config.json"""
json.dump(
{"""image_processor_type""": """CLIPImageProcessor""", """processor_class""": """CLIPProcessor"""} , open(lowerCamelCase_ , """w""" ) , )
a_ : Optional[int] = AutoImageProcessor.from_pretrained(lowerCamelCase_ )
self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ )
def UpperCAmelCase( self : int ):
with self.assertRaisesRegex(
lowerCamelCase_ , """clip-base is not a local folder and is not a valid model identifier""" ):
a_ : Any = AutoImageProcessor.from_pretrained("""clip-base""" )
def UpperCAmelCase( self : Optional[Any] ):
with self.assertRaisesRegex(
lowerCamelCase_ , R"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ):
a_ : Dict = AutoImageProcessor.from_pretrained(lowerCamelCase_ , revision="""aaaaaa""" )
def UpperCAmelCase( self : Dict ):
with self.assertRaisesRegex(
lowerCamelCase_ , """hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.""" , ):
a_ : Any = AutoImageProcessor.from_pretrained("""hf-internal-testing/config-no-model""" )
def UpperCAmelCase( self : str ):
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(lowerCamelCase_ ):
a_ : int = AutoImageProcessor.from_pretrained("""hf-internal-testing/test_dynamic_image_processor""" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(lowerCamelCase_ ):
a_ : Tuple = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=lowerCamelCase_ )
a_ : int = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=lowerCamelCase_ )
self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" )
# Test image processor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(lowerCamelCase_ )
a_ : int = AutoImageProcessor.from_pretrained(lowerCamelCase_ , trust_remote_code=lowerCamelCase_ )
self.assertEqual(reloaded_image_processor.__class__.__name__ , """NewImageProcessor""" )
def UpperCAmelCase( self : List[str] ):
try:
AutoConfig.register("""custom""" , lowerCamelCase_ )
AutoImageProcessor.register(lowerCamelCase_ , lowerCamelCase_ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(lowerCamelCase_ ):
AutoImageProcessor.register(lowerCamelCase_ , lowerCamelCase_ )
with tempfile.TemporaryDirectory() as tmpdirname:
a_ : Dict = Path(lowerCamelCase_ ) / """preprocessor_config.json"""
a_ : Union[str, Any] = Path(lowerCamelCase_ ) / """config.json"""
json.dump(
{"""feature_extractor_type""": """CLIPFeatureExtractor""", """processor_class""": """CLIPProcessor"""} , open(lowerCamelCase_ , """w""" ) , )
json.dump({"""model_type""": """clip"""} , open(lowerCamelCase_ , """w""" ) )
a_ : str = CustomImageProcessor.from_pretrained(lowerCamelCase_ )
# Now that the config is registered, it can be used as any other config with the auto-API
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(lowerCamelCase_ )
a_ : Any = AutoImageProcessor.from_pretrained(lowerCamelCase_ )
self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
def UpperCAmelCase( self : Union[str, Any] ):
class a__ ( lowerCAmelCase_ ):
lowerCamelCase__: List[Any] = True
try:
AutoConfig.register("""custom""" , lowerCamelCase_ )
AutoImageProcessor.register(lowerCamelCase_ , lowerCamelCase_ )
# If remote code is not set, the default is to use local
a_ : Dict = AutoImageProcessor.from_pretrained("""hf-internal-testing/test_dynamic_image_processor""" )
self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" )
self.assertTrue(image_processor.is_local )
# If remote code is disabled, we load the local one.
a_ : Optional[Any] = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=lowerCamelCase_ )
self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" )
self.assertTrue(image_processor.is_local )
# If remote is enabled, we load from the Hub
a_ : Dict = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=lowerCamelCase_ )
self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" )
self.assertTrue(not hasattr(lowerCamelCase_ , """is_local""" ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
| 478
| 0
|
"""simple docstring"""
import numpy as np
from sklearn.datasets import fetch_california_housing
from sklearn.metrics import mean_absolute_error, mean_squared_error
from sklearn.model_selection import train_test_split
from xgboost import XGBRegressor
def lowerCamelCase ( _UpperCamelCase : dict ) -> tuple:
'''simple docstring'''
return (data["data"], data["target"])
def lowerCamelCase ( _UpperCamelCase : np.ndarray , _UpperCamelCase : np.ndarray , _UpperCamelCase : np.ndarray ) -> np.ndarray:
'''simple docstring'''
__UpperCAmelCase : List[Any] = XGBRegressor(verbosity=0 , random_state=4_2 )
xgb.fit(_UpperCamelCase , _UpperCamelCase )
# Predict target for test data
__UpperCAmelCase : int = xgb.predict(_UpperCamelCase )
__UpperCAmelCase : Optional[Any] = predictions.reshape(len(_UpperCamelCase ) , 1 )
return predictions
def lowerCamelCase ( ) -> None:
'''simple docstring'''
__UpperCAmelCase : str = fetch_california_housing()
__UpperCAmelCase ,__UpperCAmelCase : Dict = data_handling(_UpperCamelCase )
__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase : List[str] = train_test_split(
_UpperCamelCase , _UpperCamelCase , test_size=0.25 , random_state=1 )
__UpperCAmelCase : str = xgboost(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# Error printing
print(f'''Mean Absolute Error : {mean_absolute_error(_UpperCamelCase , _UpperCamelCase )}''' )
print(f'''Mean Square Error : {mean_squared_error(_UpperCamelCase , _UpperCamelCase )}''' )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 139
|
"""simple docstring"""
from math import pow
def lowerCamelCase ( _UpperCamelCase : int , _UpperCamelCase : int , _UpperCamelCase : int , _UpperCamelCase : int , _UpperCamelCase : int , ) -> tuple[int, int]:
'''simple docstring'''
if current_sum == needed_sum:
# If the sum of the powers is equal to needed_sum, then we have a solution.
solutions_count += 1
return current_sum, solutions_count
__UpperCAmelCase : List[str] = int(pow(_UpperCamelCase , _UpperCamelCase ) )
if current_sum + i_to_n <= needed_sum:
# If the sum of the powers is less than needed_sum, then continue adding powers.
current_sum += i_to_n
__UpperCAmelCase ,__UpperCAmelCase : Union[str, Any] = backtrack(
_UpperCamelCase , _UpperCamelCase , current_number + 1 , _UpperCamelCase , _UpperCamelCase )
current_sum -= i_to_n
if i_to_n < needed_sum:
# If the power of i is less than needed_sum, then try with the next power.
__UpperCAmelCase ,__UpperCAmelCase : List[Any] = backtrack(
_UpperCamelCase , _UpperCamelCase , current_number + 1 , _UpperCamelCase , _UpperCamelCase )
return current_sum, solutions_count
def lowerCamelCase ( _UpperCamelCase : int , _UpperCamelCase : int ) -> int:
'''simple docstring'''
if not (1 <= needed_sum <= 1_0_0_0 and 2 <= power <= 1_0):
raise ValueError(
"""Invalid input\n"""
"""needed_sum must be between 1 and 1000, power between 2 and 10.""" )
return backtrack(_UpperCamelCase , _UpperCamelCase , 1 , 0 , 0 )[1] # Return the solutions_count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 139
| 1
|
'''simple docstring'''
_UpperCamelCase : Optional[int] = {
'a': 'AAAAA',
'b': 'AAAAB',
'c': 'AAABA',
'd': 'AAABB',
'e': 'AABAA',
'f': 'AABAB',
'g': 'AABBA',
'h': 'AABBB',
'i': 'ABAAA',
'j': 'BBBAA',
'k': 'ABAAB',
'l': 'ABABA',
'm': 'ABABB',
'n': 'ABBAA',
'o': 'ABBAB',
'p': 'ABBBA',
'q': 'ABBBB',
'r': 'BAAAA',
's': 'BAAAB',
't': 'BAABA',
'u': 'BAABB',
'v': 'BBBAB',
'w': 'BABAA',
'x': 'BABAB',
'y': 'BABBA',
'z': 'BABBB',
' ': ' ',
}
_UpperCamelCase : Union[str, Any] = {value: key for key, value in encode_dict.items()}
def __UpperCAmelCase ( A : str ) -> str:
UpperCAmelCase_ : Optional[int] = ''''''
for letter in word.lower():
if letter.isalpha() or letter == " ":
encoded += encode_dict[letter]
else:
raise Exception('''encode() accepts only letters of the alphabet and spaces''' )
return encoded
def __UpperCAmelCase ( A : str ) -> str:
if set(A ) - {"A", "B", " "} != set():
raise Exception('''decode() accepts only \'A\', \'B\' and spaces''' )
UpperCAmelCase_ : str = ''''''
for word in coded.split():
while len(A ) != 0:
decoded += decode_dict[word[:5]]
UpperCAmelCase_ : Tuple = word[5:]
decoded += " "
return decoded.strip()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 711
|
'''simple docstring'''
import argparse
import math
import traceback
import dateutil.parser as date_parser
import requests
def __UpperCAmelCase ( A : Optional[Any] ) -> List[str]:
UpperCAmelCase_ : Dict = {}
UpperCAmelCase_ : List[Any] = job['''started_at''']
UpperCAmelCase_ : List[Any] = job['''completed_at''']
UpperCAmelCase_ : Optional[Any] = date_parser.parse(A )
UpperCAmelCase_ : List[Any] = date_parser.parse(A )
UpperCAmelCase_ : Any = round((end_datetime - start_datetime).total_seconds() / 60.0 )
UpperCAmelCase_ : Any = start
UpperCAmelCase_ : Dict = end
UpperCAmelCase_ : Tuple = duration_in_min
return job_info
def __UpperCAmelCase ( A : int , A : int=None ) -> List[str]:
UpperCAmelCase_ : Tuple = None
if token is not None:
UpperCAmelCase_ : Any = {'''Accept''': '''application/vnd.github+json''', '''Authorization''': F"Bearer {token}"}
UpperCAmelCase_ : Union[str, Any] = F"https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100"
UpperCAmelCase_ : Any = requests.get(A , headers=A ).json()
UpperCAmelCase_ : Any = {}
try:
job_time.update({job['''name''']: extract_time_from_single_job(A ) for job in result['''jobs''']} )
UpperCAmelCase_ : Union[str, Any] = math.ceil((result['''total_count'''] - 1_0_0) / 1_0_0 )
for i in range(A ):
UpperCAmelCase_ : List[str] = requests.get(url + F"&page={i + 2}" , headers=A ).json()
job_time.update({job['''name''']: extract_time_from_single_job(A ) for job in result['''jobs''']} )
return job_time
except Exception:
print(F"Unknown error, could not fetch links:\n{traceback.format_exc()}" )
return {}
if __name__ == "__main__":
_UpperCamelCase : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--workflow_run_id', type=str, required=True, help='A GitHub Actions workflow run id.')
_UpperCamelCase : str = parser.parse_args()
_UpperCamelCase : str = get_job_time(args.workflow_run_id)
_UpperCamelCase : Optional[int] = dict(sorted(job_time.items(), key=lambda item: item[1]["duration"], reverse=True))
for k, v in job_time.items():
print(f'''{k}: {v["duration"]}''')
| 216
| 0
|
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import List
import timm
import torch
import torch.nn as nn
from huggingface_hub import hf_hub_download
from torch import Tensor
from transformers import AutoImageProcessor, ResNetConfig, ResNetForImageClassification
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCAmelCase : Tuple = logging.get_logger()
@dataclass
class lowerCAmelCase :
'''simple docstring'''
snake_case = 42
snake_case = field(default_factory=__UpperCamelCase )
snake_case = field(default_factory=__UpperCamelCase )
def lowerCamelCase__ ( self : Tuple , __snake_case : int , __snake_case : Tensor , __snake_case : Tensor ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase = len(list(m.modules() ) ) == 1 or isinstance(__snake_case , nn.Convad ) or isinstance(__snake_case , nn.BatchNormad )
if has_not_submodules:
self.traced.append(__snake_case )
def __call__( self : Optional[Any] , __snake_case : Tensor ) -> Optional[int]:
'''simple docstring'''
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(__snake_case )
[x.remove() for x in self.handles]
return self
@property
def lowerCamelCase__ ( self : Dict ) -> Union[str, Any]:
'''simple docstring'''
return list(filter(lambda __snake_case : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) )
@dataclass
class lowerCAmelCase :
'''simple docstring'''
snake_case = 42
snake_case = 42
snake_case = 0
snake_case = field(default_factory=__UpperCamelCase )
snake_case = field(default_factory=__UpperCamelCase )
def __call__( self : List[str] , __snake_case : Tensor ) -> List[Any]:
'''simple docstring'''
lowerCamelCase = Tracker(self.dest )(__snake_case ).parametrized
lowerCamelCase = Tracker(self.src )(__snake_case ).parametrized
lowerCamelCase = list(filter(lambda __snake_case : type(__snake_case ) not in self.src_skip , __snake_case ) )
lowerCamelCase = list(filter(lambda __snake_case : type(__snake_case ) not in self.dest_skip , __snake_case ) )
if len(__snake_case ) != len(__snake_case ):
raise Exception(
F'''Numbers of operations are different. Source module has {len(__snake_case )} operations while'''
F''' destination module has {len(__snake_case )}.''' )
for dest_m, src_m in zip(__snake_case , __snake_case ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(F'''Transfered from={src_m} to={dest_m}''' )
def a_ ( UpperCamelCase_ : str , UpperCamelCase_ : ResNetConfig , UpperCamelCase_ : Path , UpperCamelCase_ : bool = True ) -> Tuple:
"""simple docstring"""
print(f'''Converting {name}...''' )
with torch.no_grad():
lowerCamelCase = timm.create_model(UpperCamelCase_ , pretrained=UpperCamelCase_ ).eval()
lowerCamelCase = ResNetForImageClassification(UpperCamelCase_ ).eval()
lowerCamelCase = ModuleTransfer(src=UpperCamelCase_ , dest=UpperCamelCase_ )
lowerCamelCase = torch.randn((1, 3, 2_2_4, 2_2_4) )
module_transfer(UpperCamelCase_ )
assert torch.allclose(from_model(UpperCamelCase_ ) , our_model(UpperCamelCase_ ).logits ), "The model logits don't match the original one."
lowerCamelCase = f'''resnet{"-".join(name.split("resnet" ) )}'''
print(UpperCamelCase_ )
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message='Add model' , use_temp_dir=UpperCamelCase_ , )
# we can use the convnext one
lowerCamelCase = AutoImageProcessor.from_pretrained('facebook/convnext-base-224-22k-1k' )
image_processor.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message='Add image processor' , use_temp_dir=UpperCamelCase_ , )
print(f'''Pushed {checkpoint_name}''' )
def a_ ( UpperCamelCase_ : Path , UpperCamelCase_ : str = None , UpperCamelCase_ : bool = True ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase = 'imagenet-1k-id2label.json'
lowerCamelCase = 1_0_0_0
lowerCamelCase = (1, num_labels)
lowerCamelCase = 'huggingface/label-files'
lowerCamelCase = num_labels
lowerCamelCase = json.load(open(hf_hub_download(UpperCamelCase_ , UpperCamelCase_ , repo_type='dataset' ) , 'r' ) )
lowerCamelCase = {int(UpperCamelCase_ ): v for k, v in idalabel.items()}
lowerCamelCase = idalabel
lowerCamelCase = {v: k for k, v in idalabel.items()}
lowerCamelCase = partial(UpperCamelCase_ , num_labels=UpperCamelCase_ , idalabel=UpperCamelCase_ , labelaid=UpperCamelCase_ )
lowerCamelCase = {
'resnet18': ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[6_4, 1_2_8, 2_5_6, 5_1_2] , layer_type='basic' ),
'resnet26': ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8] , layer_type='bottleneck' ),
'resnet34': ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[6_4, 1_2_8, 2_5_6, 5_1_2] , layer_type='basic' ),
'resnet50': ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8] , layer_type='bottleneck' ),
'resnet101': ImageNetPreTrainedConfig(
depths=[3, 4, 2_3, 3] , hidden_sizes=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8] , layer_type='bottleneck' ),
'resnet152': ImageNetPreTrainedConfig(
depths=[3, 8, 3_6, 3] , hidden_sizes=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8] , layer_type='bottleneck' ),
}
if model_name:
convert_weight_and_push(UpperCamelCase_ , names_to_config[model_name] , UpperCamelCase_ , UpperCamelCase_ )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
return config, expected_shape
if __name__ == "__main__":
_lowerCAmelCase : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default=None,
type=str,
help=(
'The name of the model you wish to convert, it must be one of the supported resnet* architecture,'
' currently: resnet18,26,34,50,101,152. If `None`, all of them will the converted.'
),
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=Path,
required=True,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub',
default=True,
type=bool,
required=False,
help='If True, push model and image processor to the hub.',
)
_lowerCAmelCase : List[str] = parser.parse_args()
_lowerCAmelCase : Path = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 246
|
import gc
import unittest
from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline
from diffusers.utils import is_flax_available, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCamelCase__ ( self : int ) -> Dict:
'''simple docstring'''
super().tearDown()
gc.collect()
def lowerCamelCase__ ( self : int ) -> str:
'''simple docstring'''
lowerCamelCase , lowerCamelCase = FlaxStableDiffusionPipeline.from_pretrained(
'stabilityai/stable-diffusion-2' , revision='bf16' , dtype=jnp.bfloataa , )
lowerCamelCase = 'A painting of a squirrel eating a burger'
lowerCamelCase = jax.device_count()
lowerCamelCase = num_samples * [prompt]
lowerCamelCase = sd_pipe.prepare_inputs(__snake_case )
lowerCamelCase = replicate(__snake_case )
lowerCamelCase = shard(__snake_case )
lowerCamelCase = jax.random.PRNGKey(0 )
lowerCamelCase = jax.random.split(__snake_case , jax.device_count() )
lowerCamelCase = sd_pipe(__snake_case , __snake_case , __snake_case , num_inference_steps=25 , jit=__snake_case )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
lowerCamelCase = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
lowerCamelCase = images[0, 253:256, 253:256, -1]
lowerCamelCase = jnp.asarray(jax.device_get(image_slice.flatten() ) )
lowerCamelCase = jnp.array([0.4238, 0.4414, 0.4395, 0.4453, 0.4629, 0.4590, 0.4531, 0.4_5508, 0.4512] )
print(F'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
def lowerCamelCase__ ( self : str ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase = 'stabilityai/stable-diffusion-2'
lowerCamelCase , lowerCamelCase = FlaxDPMSolverMultistepScheduler.from_pretrained(__snake_case , subfolder='scheduler' )
lowerCamelCase , lowerCamelCase = FlaxStableDiffusionPipeline.from_pretrained(
__snake_case , scheduler=__snake_case , revision='bf16' , dtype=jnp.bfloataa , )
lowerCamelCase = scheduler_params
lowerCamelCase = 'A painting of a squirrel eating a burger'
lowerCamelCase = jax.device_count()
lowerCamelCase = num_samples * [prompt]
lowerCamelCase = sd_pipe.prepare_inputs(__snake_case )
lowerCamelCase = replicate(__snake_case )
lowerCamelCase = shard(__snake_case )
lowerCamelCase = jax.random.PRNGKey(0 )
lowerCamelCase = jax.random.split(__snake_case , jax.device_count() )
lowerCamelCase = sd_pipe(__snake_case , __snake_case , __snake_case , num_inference_steps=25 , jit=__snake_case )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
lowerCamelCase = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
lowerCamelCase = images[0, 253:256, 253:256, -1]
lowerCamelCase = jnp.asarray(jax.device_get(image_slice.flatten() ) )
lowerCamelCase = jnp.array([0.4336, 0.4_2969, 0.4453, 0.4199, 0.4297, 0.4531, 0.4434, 0.4434, 0.4297] )
print(F'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
| 246
| 1
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase :List[str] = logging.get_logger(__name__)
__lowerCamelCase :List[str] = {'openai-gpt': 'https://huggingface.co/openai-gpt/resolve/main/config.json'}
class A__ ( __lowercase):
"""simple docstring"""
snake_case__ : Dict ='''openai-gpt'''
snake_case__ : Any ={
'''max_position_embeddings''': '''n_positions''',
'''hidden_size''': '''n_embd''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self: List[Any] , __a: str=40_478 , __a: str=512 , __a: Any=768 , __a: Any=12 , __a: Any=12 , __a: Optional[Any]="gelu" , __a: Optional[int]=0.1 , __a: List[str]=0.1 , __a: Any=0.1 , __a: Dict=1e-5 , __a: Optional[int]=0.02 , __a: List[Any]="cls_index" , __a: List[str]=True , __a: Dict=None , __a: Optional[Any]=True , __a: Any=0.1 , **__a: Tuple , )-> Dict:
lowerCamelCase : Any = vocab_size
lowerCamelCase : List[Any] = n_positions
lowerCamelCase : List[Any] = n_embd
lowerCamelCase : Optional[int] = n_layer
lowerCamelCase : Optional[Any] = n_head
lowerCamelCase : List[Any] = afn
lowerCamelCase : List[Any] = resid_pdrop
lowerCamelCase : int = embd_pdrop
lowerCamelCase : int = attn_pdrop
lowerCamelCase : List[Any] = layer_norm_epsilon
lowerCamelCase : Dict = initializer_range
lowerCamelCase : Tuple = summary_type
lowerCamelCase : Union[str, Any] = summary_use_proj
lowerCamelCase : Optional[Any] = summary_activation
lowerCamelCase : Dict = summary_first_dropout
lowerCamelCase : Any = summary_proj_to_labels
super().__init__(**__a )
| 42
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
__lowerCamelCase :Optional[Any] = {
'configuration_encodec': [
'ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP',
'EncodecConfig',
],
'feature_extraction_encodec': ['EncodecFeatureExtractor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase :Union[str, Any] = [
'ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST',
'EncodecModel',
'EncodecPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_encodec import (
ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP,
EncodecConfig,
)
from .feature_extraction_encodec import EncodecFeatureExtractor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encodec import (
ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST,
EncodecModel,
EncodecPreTrainedModel,
)
else:
import sys
__lowerCamelCase :Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 42
| 1
|
"""simple docstring"""
from collections.abc import Generator
from math import sin
def a ( __UpperCAmelCase : bytes ) -> bytes:
if len(__UpperCAmelCase ) != 3_2:
raise ValueError("""Input must be of length 32""" )
__magic_name__: Any = B""""""
for i in [3, 2, 1, 0]:
little_endian += string_aa[8 * i : 8 * i + 8]
return little_endian
def a ( __UpperCAmelCase : int ) -> bytes:
if i < 0:
raise ValueError("""Input must be non-negative""" )
__magic_name__: Any = format(__UpperCAmelCase , """08x""" )[-8:]
__magic_name__: List[str] = B""""""
for i in [3, 2, 1, 0]:
little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode("""utf-8""" )
return little_endian_hex
def a ( __UpperCAmelCase : bytes ) -> bytes:
__magic_name__: List[Any] = B""""""
for char in message:
bit_string += format(__UpperCAmelCase , """08b""" ).encode("""utf-8""" )
__magic_name__: int = format(len(__UpperCAmelCase ) , """064b""" ).encode("""utf-8""" )
# Pad bit_string to a multiple of 512 chars
bit_string += b"1"
while len(__UpperCAmelCase ) % 5_1_2 != 4_4_8:
bit_string += b"0"
bit_string += to_little_endian(start_len[3_2:] ) + to_little_endian(start_len[:3_2] )
return bit_string
def a ( __UpperCAmelCase : bytes ) -> Generator[list[int], None, None]:
if len(__UpperCAmelCase ) % 5_1_2 != 0:
raise ValueError("""Input must have length that's a multiple of 512""" )
for pos in range(0 , len(__UpperCAmelCase ) , 5_1_2 ):
__magic_name__: Union[str, Any] = bit_string[pos : pos + 5_1_2]
__magic_name__: Optional[int] = []
for i in range(0 , 5_1_2 , 3_2 ):
block_words.append(int(to_little_endian(block[i : i + 3_2] ) , 2 ) )
yield block_words
def a ( __UpperCAmelCase : int ) -> int:
if i < 0:
raise ValueError("""Input must be non-negative""" )
__magic_name__: Dict = format(__UpperCAmelCase , """032b""" )
__magic_name__: List[str] = """"""
for c in i_str:
new_str += "1" if c == "0" else "0"
return int(__UpperCAmelCase , 2 )
def a ( __UpperCAmelCase : int , __UpperCAmelCase : int ) -> int:
return (a + b) % 2**3_2
def a ( __UpperCAmelCase : int , __UpperCAmelCase : int ) -> int:
if i < 0:
raise ValueError("""Input must be non-negative""" )
if shift < 0:
raise ValueError("""Shift must be non-negative""" )
return ((i << shift) ^ (i >> (3_2 - shift))) % 2**3_2
def a ( __UpperCAmelCase : bytes ) -> bytes:
__magic_name__: str = preprocess(__UpperCAmelCase )
__magic_name__: Union[str, Any] = [int(2**3_2 * abs(sin(i + 1 ) ) ) for i in range(6_4 )]
# Starting states
__magic_name__: Tuple = 0x6_7_4_5_2_3_0_1
__magic_name__: Dict = 0xE_F_C_D_A_B_8_9
__magic_name__: Any = 0x9_8_B_A_D_C_F_E
__magic_name__: List[str] = 0x1_0_3_2_5_4_7_6
__magic_name__: Any = [
7,
1_2,
1_7,
2_2,
7,
1_2,
1_7,
2_2,
7,
1_2,
1_7,
2_2,
7,
1_2,
1_7,
2_2,
5,
9,
1_4,
2_0,
5,
9,
1_4,
2_0,
5,
9,
1_4,
2_0,
5,
9,
1_4,
2_0,
4,
1_1,
1_6,
2_3,
4,
1_1,
1_6,
2_3,
4,
1_1,
1_6,
2_3,
4,
1_1,
1_6,
2_3,
6,
1_0,
1_5,
2_1,
6,
1_0,
1_5,
2_1,
6,
1_0,
1_5,
2_1,
6,
1_0,
1_5,
2_1,
]
# Process bit string in chunks, each with 16 32-char words
for block_words in get_block_words(__UpperCAmelCase ):
__magic_name__: Optional[int] = aa
__magic_name__: Any = ba
__magic_name__: Optional[int] = ca
__magic_name__: List[Any] = da
# Hash current chunk
for i in range(6_4 ):
if i <= 1_5:
# f = (b & c) | (not_32(b) & d) # Alternate definition for f
__magic_name__: Optional[Any] = d ^ (b & (c ^ d))
__magic_name__: List[str] = i
elif i <= 3_1:
# f = (d & b) | (not_32(d) & c) # Alternate definition for f
__magic_name__: Union[str, Any] = c ^ (d & (b ^ c))
__magic_name__: List[Any] = (5 * i + 1) % 1_6
elif i <= 4_7:
__magic_name__: str = b ^ c ^ d
__magic_name__: Tuple = (3 * i + 5) % 1_6
else:
__magic_name__: Union[str, Any] = c ^ (b | not_aa(__UpperCAmelCase ))
__magic_name__: Optional[int] = (7 * i) % 1_6
__magic_name__: Tuple = (f + a + added_consts[i] + block_words[g]) % 2**3_2
__magic_name__: Optional[int] = d
__magic_name__: Optional[int] = c
__magic_name__: List[Any] = b
__magic_name__: Optional[Any] = sum_aa(__UpperCAmelCase , left_rotate_aa(__UpperCAmelCase , shift_amounts[i] ) )
# Add hashed chunk to running total
__magic_name__: Optional[Any] = sum_aa(__UpperCAmelCase , __UpperCAmelCase )
__magic_name__: List[str] = sum_aa(__UpperCAmelCase , __UpperCAmelCase )
__magic_name__: Any = sum_aa(__UpperCAmelCase , __UpperCAmelCase )
__magic_name__: int = sum_aa(__UpperCAmelCase , __UpperCAmelCase )
__magic_name__: Any = reformat_hex(__UpperCAmelCase ) + reformat_hex(__UpperCAmelCase ) + reformat_hex(__UpperCAmelCase ) + reformat_hex(__UpperCAmelCase )
return digest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 96
|
"""simple docstring"""
import argparse
import json
import os
import re
import torch
from transformers import BloomConfig, BloomModel
from transformers.file_utils import CONFIG_NAME, WEIGHTS_NAME
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCamelCase = [
'word_embeddings_layernorm.weight',
'word_embeddings_layernorm.bias',
'input_layernorm.weight',
'input_layernorm.bias',
'post_attention_layernorm.weight',
'post_attention_layernorm.bias',
'self_attention.dense.bias',
'mlp.dense_4h_to_h.bias',
'ln_f.weight',
'ln_f.bias',
]
__lowerCamelCase = [
'mlp.dense_4h_to_h.weight',
'self_attention.dense.weight',
]
def a ( __UpperCAmelCase : Any , __UpperCAmelCase : Any ) -> Any:
__magic_name__: List[str] = {
"""word_embeddings.weight""": """word_embeddings.weight""",
"""word_embeddings.norm.weight""": """word_embeddings_layernorm.weight""",
"""word_embeddings.norm.bias""": """word_embeddings_layernorm.bias""",
"""weight""": """ln_f.weight""",
"""bias""": """ln_f.bias""",
}
if key in layer_rename_map:
return layer_rename_map[key]
# Handle transformer blocks
__magic_name__: Any = int(re.match(R""".*layer_(\d*).*""" , __UpperCAmelCase )[1] )
layer_number -= 3
return f'h.{layer_number}.' + key
def a ( __UpperCAmelCase : List[str] ) -> Union[str, Any]:
if dtype == torch.bool:
return 1 / 8
__magic_name__: Optional[int] = re.search(R"""[^\d](\d+)$""" , str(__UpperCAmelCase ) )
if bit_search is None:
raise ValueError(f'`dtype` is not a valid dtype: {dtype}.' )
__magic_name__: Dict = int(bit_search.groups()[0] )
return bit_size // 8
def a ( __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Tuple ) -> List[str]:
# Construct model
if bloom_config_file == "":
__magic_name__: Optional[Any] = BloomConfig()
else:
__magic_name__: Tuple = BloomConfig.from_json_file(__UpperCAmelCase )
if shard_model:
__magic_name__: Union[str, Any] = os.listdir(__UpperCAmelCase )
__magic_name__: Optional[int] = sorted(filter(lambda __UpperCAmelCase : s.startswith("""layer""" ) and "model_00" in s , __UpperCAmelCase ) )
__magic_name__: str = {"""weight_map""": {}, """metadata""": {}}
__magic_name__: Optional[Any] = 0
__magic_name__: Dict = None
__magic_name__: Optional[int] = BloomConfig()
for j, file in enumerate(__UpperCAmelCase ):
print("""Processing file: {}""".format(__UpperCAmelCase ) )
__magic_name__: Tuple = None
for i in range(__UpperCAmelCase ):
# load all TP files
__magic_name__: int = file.replace("""model_00""" , f'model_0{i}' )
__magic_name__: Optional[Any] = torch.load(os.path.join(__UpperCAmelCase , __UpperCAmelCase ) , map_location="""cpu""" )
# Rename keys in the transformers names
__magic_name__: str = list(temp.keys() )
for key in keys:
__magic_name__: Any = temp.pop(__UpperCAmelCase )
if tensors is None:
__magic_name__: Union[str, Any] = temp
else:
for key in tensors.keys():
if any(key.endswith(__UpperCAmelCase ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
__magic_name__: int = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
__magic_name__: int = torch.cat([tensors[key], temp[key]] , dim=__UpperCAmelCase )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(__UpperCAmelCase ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
__magic_name__: Union[str, Any] = tensors[key] / pretraining_tp
torch.save(
__UpperCAmelCase , os.path.join(
__UpperCAmelCase , """pytorch_model_{}-of-{}.bin""".format(str(j + 1 ).zfill(5 ) , str(len(__UpperCAmelCase ) ).zfill(5 ) ) , ) , )
for key in tensors.keys():
__magic_name__: Optional[Any] = tensors[key]
total_size += value.numel() * get_dtype_size(value.dtype )
if key not in index_dict["weight_map"]:
__magic_name__: Union[str, Any] = """pytorch_model_{}-of-{}.bin""".format(
str(j + 1 ).zfill(5 ) , str(len(__UpperCAmelCase ) ).zfill(5 ) )
__magic_name__: Tuple = BloomConfig()
__magic_name__: Optional[int] = pytorch_dump_folder_path + """/""" + CONFIG_NAME
__magic_name__: Optional[int] = total_size
with open(__UpperCAmelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(config.to_json_string() )
with open(os.path.join(__UpperCAmelCase , WEIGHTS_NAME + """.index.json""" ) , """w""" , encoding="""utf-8""" ) as f:
__magic_name__: str = json.dumps(__UpperCAmelCase , indent=2 , sort_keys=__UpperCAmelCase ) + """\n"""
f.write(__UpperCAmelCase )
else:
__magic_name__: Optional[int] = BloomModel(__UpperCAmelCase )
__magic_name__: List[str] = os.listdir(__UpperCAmelCase )
__magic_name__: Optional[Any] = sorted(filter(lambda __UpperCAmelCase : s.startswith("""layer""" ) and "model_00" in s , __UpperCAmelCase ) )
__magic_name__: Any = None
for i, file in enumerate(__UpperCAmelCase ):
__magic_name__: List[Any] = None
for i in range(__UpperCAmelCase ):
# load all TP files
__magic_name__: Any = file.replace("""model_00""" , f'model_0{i}' )
__magic_name__: Dict = torch.load(os.path.join(__UpperCAmelCase , __UpperCAmelCase ) , map_location="""cpu""" )
# Rename keys in the transformers names
__magic_name__: Optional[Any] = list(temp.keys() )
for key in keys:
__magic_name__: Optional[Any] = temp.pop(__UpperCAmelCase )
if tensors is None:
__magic_name__: Tuple = temp
else:
for key in tensors.keys():
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
if any(key.endswith(__UpperCAmelCase ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
__magic_name__: Tuple = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
__magic_name__: Dict = torch.cat([tensors[key], temp[key]] , dim=__UpperCAmelCase )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(__UpperCAmelCase ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
__magic_name__: Optional[int] = tensors[key] / pretraining_tp
__magic_name__: Union[str, Any] = model.load_state_dict(__UpperCAmelCase , strict=__UpperCAmelCase )
assert not other_keys.unexpected_keys, f'The keys {other_keys.unexpected_keys} are unexpected'
if missing_keys is None:
__magic_name__: Optional[Any] = set(other_keys.missing_keys )
else:
__magic_name__: Tuple = missing_keys.intersection(set(other_keys.missing_keys ) )
assert not missing_keys, f'The keys {missing_keys} are missing'
# Save pytorch-model
os.makedirs(__UpperCAmelCase , exist_ok=__UpperCAmelCase )
__magic_name__: int = pytorch_dump_folder_path + """/""" + WEIGHTS_NAME
__magic_name__: List[str] = pytorch_dump_folder_path + """/""" + CONFIG_NAME
print(f'Save PyTorch model to {pytorch_weights_dump_path} with dtype {config.torch_dtype}' )
if config.torch_dtype is not None:
__magic_name__: Optional[Any] = model.to(config.torch_dtype )
torch.save(model.state_dict() , __UpperCAmelCase )
print(f'Save configuration file to {pytorch_config_dump_path}' )
with open(__UpperCAmelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
__lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--bloom_checkpoint_path',
default=None,
type=str,
required=True,
help='Path to the Megatron-LM checkpoint path.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--bloom_config_file',
default='',
type=str,
help=(
'An optional config json file corresponding to the pre-trained model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--shard_model',
action='store_true',
help='An optional setting to shard the output model \nThis enables sharding the converted checkpoint',
)
parser.add_argument(
'--pretraining_tp',
default=4,
type=int,
help='Pretraining TP rank that has been used when training the model in Megatron-LM \n',
)
__lowerCamelCase = parser.parse_args()
convert_bloom_checkpoint_to_pytorch(
args.bloom_checkpoint_path,
args.bloom_config_file,
args.pytorch_dump_folder_path,
args.shard_model,
args.pretraining_tp,
)
| 96
| 1
|
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_flax_cross_test,
require_flax,
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import is_flax_available, is_torch_available, is_vision_available
from ...test_modeling_flax_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_flax_bert import FlaxBertModelTester
from ..clip.test_modeling_flax_clip import FlaxCLIPVisionModelTester
from ..vit.test_modeling_flax_vit import FlaxViTModelTester
if is_flax_available():
from transformers import (
FlaxBertModel,
FlaxCLIPVisionModel,
FlaxVisionTextDualEncoderModel,
FlaxViTModel,
VisionTextDualEncoderConfig,
VisionTextDualEncoderProcessor,
)
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
if is_torch_available():
import torch
from transformers import VisionTextDualEncoderModel
if is_vision_available():
from PIL import Image
def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Union[str, Any] ):
"""simple docstring"""
if isinstance(_SCREAMING_SNAKE_CASE , collections.abc.Iterable ):
return x
return (x, x)
@require_flax
class __snake_case :
def lowerCAmelCase__ ( self , _A , _A):
pass
def lowerCAmelCase__ ( self):
pass
def lowerCAmelCase__ ( self):
pass
def lowerCAmelCase__ ( self , _A , _A , _A):
SCREAMING_SNAKE_CASE_ = np.abs((a - b)).max()
self.assertLessEqual(_A , _A , f"""Difference between torch and flax is {diff} (>= {tol}).""")
def lowerCAmelCase__ ( self , _A , _A , _A , _A , _A=None , **_A):
SCREAMING_SNAKE_CASE_ = VisionTextDualEncoderConfig.from_vision_text_configs(_A , _A)
SCREAMING_SNAKE_CASE_ = FlaxVisionTextDualEncoderModel(_A)
SCREAMING_SNAKE_CASE_ = model(input_ids=_A , pixel_values=_A , attention_mask=_A)
self.assertEqual(output['text_embeds'].shape , (input_ids.shape[0], config.projection_dim))
self.assertEqual(output['image_embeds'].shape , (pixel_values.shape[0], config.projection_dim))
def lowerCAmelCase__ ( self , _A , _A , _A , _A , _A=None , **_A):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.get_vision_text_model(_A , _A)
SCREAMING_SNAKE_CASE_ = {'vision_model': vision_model, 'text_model': text_model}
SCREAMING_SNAKE_CASE_ = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**_A)
SCREAMING_SNAKE_CASE_ = model(input_ids=_A , pixel_values=_A , attention_mask=_A)
self.assertEqual(output['text_embeds'].shape , (input_ids.shape[0], model.config.projection_dim))
self.assertEqual(output['image_embeds'].shape , (pixel_values.shape[0], model.config.projection_dim))
def lowerCAmelCase__ ( self , _A , _A , _A , _A , _A=None , **_A):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.get_vision_text_model(_A , _A)
SCREAMING_SNAKE_CASE_ = {'vision_model': vision_model, 'text_model': text_model}
SCREAMING_SNAKE_CASE_ = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**_A)
SCREAMING_SNAKE_CASE_ = model(input_ids=_A , pixel_values=_A , attention_mask=_A)
SCREAMING_SNAKE_CASE_ = output[0]
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_A)
SCREAMING_SNAKE_CASE_ = FlaxVisionTextDualEncoderModel.from_pretrained(_A)
SCREAMING_SNAKE_CASE_ = model(input_ids=_A , pixel_values=_A , attention_mask=_A)
SCREAMING_SNAKE_CASE_ = after_output[0]
SCREAMING_SNAKE_CASE_ = np.amax(np.abs(out_a - out_a))
self.assertLessEqual(_A , 1E-3)
def lowerCAmelCase__ ( self , _A , _A , _A , _A , _A=None , **_A):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.get_vision_text_model(_A , _A)
SCREAMING_SNAKE_CASE_ = {'vision_model': vision_model, 'text_model': text_model}
SCREAMING_SNAKE_CASE_ = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**_A)
SCREAMING_SNAKE_CASE_ = model(
input_ids=_A , pixel_values=_A , attention_mask=_A , output_attentions=_A)
SCREAMING_SNAKE_CASE_ = output.vision_model_output.attentions
self.assertEqual(len(_A) , vision_config.num_hidden_layers)
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
SCREAMING_SNAKE_CASE_ = to_atuple(vision_model.config.image_size)
SCREAMING_SNAKE_CASE_ = to_atuple(vision_model.config.patch_size)
SCREAMING_SNAKE_CASE_ = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
SCREAMING_SNAKE_CASE_ = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len))
SCREAMING_SNAKE_CASE_ = output.text_model_output.attentions
self.assertEqual(len(_A) , text_config.num_hidden_layers)
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def lowerCAmelCase__ ( self , _A , _A , _A):
pt_model.to(_A)
pt_model.eval()
# prepare inputs
SCREAMING_SNAKE_CASE_ = inputs_dict
SCREAMING_SNAKE_CASE_ = {k: torch.tensor(v.tolist()) for k, v in flax_inputs.items()}
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = pt_model(**_A).to_tuple()
SCREAMING_SNAKE_CASE_ = fx_model(**_A).to_tuple()
self.assertEqual(len(_A) , len(_A) , 'Output lengths differ between Flax and PyTorch')
for fx_output, pt_output in zip(fx_outputs[:4] , pt_outputs[:4]):
self.assert_almost_equals(_A , pt_output.numpy() , 4E-2)
# PT -> Flax
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(_A)
SCREAMING_SNAKE_CASE_ = FlaxVisionTextDualEncoderModel.from_pretrained(_A , from_pt=_A)
SCREAMING_SNAKE_CASE_ = fx_model_loaded(**_A).to_tuple()
self.assertEqual(len(_A) , len(_A) , 'Output lengths differ between Flax and PyTorch')
for fx_output_loaded, pt_output in zip(fx_outputs_loaded[:4] , pt_outputs[:4]):
self.assert_almost_equals(_A , pt_output.numpy() , 4E-2)
# Flax -> PT
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(_A)
SCREAMING_SNAKE_CASE_ = VisionTextDualEncoderModel.from_pretrained(_A , from_flax=_A)
pt_model_loaded.to(_A)
pt_model_loaded.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = pt_model_loaded(**_A).to_tuple()
self.assertEqual(len(_A) , len(_A) , 'Output lengths differ between Flax and PyTorch')
for fx_output, pt_output_loaded in zip(fx_outputs[:4] , pt_outputs_loaded[:4]):
self.assert_almost_equals(_A , pt_output_loaded.numpy() , 4E-2)
def lowerCAmelCase__ ( self , _A , _A , _A):
SCREAMING_SNAKE_CASE_ = VisionTextDualEncoderConfig.from_vision_text_configs(_A , _A)
SCREAMING_SNAKE_CASE_ = VisionTextDualEncoderModel(_A)
SCREAMING_SNAKE_CASE_ = FlaxVisionTextDualEncoderModel(_A)
SCREAMING_SNAKE_CASE_ = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , _A)
SCREAMING_SNAKE_CASE_ = fx_state
self.check_pt_flax_equivalence(_A , _A , _A)
def lowerCAmelCase__ ( self , _A , _A , _A):
SCREAMING_SNAKE_CASE_ = VisionTextDualEncoderConfig.from_vision_text_configs(_A , _A)
SCREAMING_SNAKE_CASE_ = VisionTextDualEncoderModel(_A)
SCREAMING_SNAKE_CASE_ = FlaxVisionTextDualEncoderModel(_A)
SCREAMING_SNAKE_CASE_ = load_flax_weights_in_pytorch_model(_A , fx_model.params)
self.check_pt_flax_equivalence(_A , _A , _A)
def lowerCAmelCase__ ( self):
SCREAMING_SNAKE_CASE_ = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**_A)
def lowerCAmelCase__ ( self):
SCREAMING_SNAKE_CASE_ = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**_A)
def lowerCAmelCase__ ( self):
SCREAMING_SNAKE_CASE_ = self.prepare_config_and_inputs()
self.check_save_load(**_A)
def lowerCAmelCase__ ( self):
SCREAMING_SNAKE_CASE_ = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**_A)
@is_pt_flax_cross_test
def lowerCAmelCase__ ( self):
SCREAMING_SNAKE_CASE_ = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_ = config_inputs_dict.pop('vision_config')
SCREAMING_SNAKE_CASE_ = config_inputs_dict.pop('text_config')
SCREAMING_SNAKE_CASE_ = config_inputs_dict
self.check_equivalence_pt_to_flax(_A , _A , _A)
self.check_equivalence_flax_to_pt(_A , _A , _A)
@slow
def lowerCAmelCase__ ( self):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.get_pretrained_model_and_inputs()
SCREAMING_SNAKE_CASE_ = model_a(**_A)
SCREAMING_SNAKE_CASE_ = outputs[0]
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(_A)
SCREAMING_SNAKE_CASE_ = FlaxVisionTextDualEncoderModel.from_pretrained(_A)
SCREAMING_SNAKE_CASE_ = model_a(**_A)
SCREAMING_SNAKE_CASE_ = after_outputs[0]
SCREAMING_SNAKE_CASE_ = np.amax(np.abs(out_a - out_a))
self.assertLessEqual(_A , 1E-5)
@require_flax
class __snake_case ( lowerCAmelCase__ , unittest.TestCase ):
def lowerCAmelCase__ ( self):
SCREAMING_SNAKE_CASE_ = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
'hf-internal-testing/tiny-random-vit' , 'hf-internal-testing/tiny-bert' , vision_from_pt=_A , text_from_pt=_A , )
SCREAMING_SNAKE_CASE_ = 13
SCREAMING_SNAKE_CASE_ = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
])
SCREAMING_SNAKE_CASE_ = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size)
SCREAMING_SNAKE_CASE_ = random_attention_mask([batch_size, 4])
SCREAMING_SNAKE_CASE_ = {'pixel_values': pixel_values, 'input_ids': input_ids, 'attention_mask': attention_mask}
return model, inputs
def lowerCAmelCase__ ( self , _A , _A):
SCREAMING_SNAKE_CASE_ = FlaxViTModel(_A)
SCREAMING_SNAKE_CASE_ = FlaxBertModel(_A)
return vision_model, text_model
def lowerCAmelCase__ ( self):
SCREAMING_SNAKE_CASE_ = FlaxViTModelTester(self)
SCREAMING_SNAKE_CASE_ = FlaxBertModelTester(self)
SCREAMING_SNAKE_CASE_ = vit_model_tester.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_ = bert_model_tester.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = vision_config_and_inputs
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_torch
class __snake_case ( lowerCAmelCase__ , unittest.TestCase ):
def lowerCAmelCase__ ( self):
SCREAMING_SNAKE_CASE_ = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
'hf-internal-testing/tiny-random-clip' , 'hf-internal-testing/tiny-bert' , vision_from_pt=_A , text_from_pt=_A , )
SCREAMING_SNAKE_CASE_ = 13
SCREAMING_SNAKE_CASE_ = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
])
SCREAMING_SNAKE_CASE_ = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size)
SCREAMING_SNAKE_CASE_ = random_attention_mask([batch_size, 4])
SCREAMING_SNAKE_CASE_ = {'pixel_values': pixel_values, 'input_ids': input_ids, 'attention_mask': attention_mask}
return model, inputs
def lowerCAmelCase__ ( self , _A , _A):
SCREAMING_SNAKE_CASE_ = FlaxCLIPVisionModel(_A)
SCREAMING_SNAKE_CASE_ = FlaxBertModel(_A)
return vision_model, text_model
def lowerCAmelCase__ ( self):
SCREAMING_SNAKE_CASE_ = FlaxCLIPVisionModelTester(self)
SCREAMING_SNAKE_CASE_ = FlaxBertModelTester(self)
SCREAMING_SNAKE_CASE_ = clip_model_tester.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_ = bert_model_tester.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = vision_config_and_inputs
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_flax
@require_vision
class __snake_case ( unittest.TestCase ):
@slow
def lowerCAmelCase__ ( self):
SCREAMING_SNAKE_CASE_ = FlaxVisionTextDualEncoderModel.from_pretrained('clip-italian/clip-italian' , logit_scale_init_value=1.0)
SCREAMING_SNAKE_CASE_ = VisionTextDualEncoderProcessor.from_pretrained('clip-italian/clip-italian')
SCREAMING_SNAKE_CASE_ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png')
SCREAMING_SNAKE_CASE_ = processor(
text=['una foto di un gatto', 'una foto di un cane'] , images=_A , padding=_A , return_tensors='np')
SCREAMING_SNAKE_CASE_ = model(**_A)
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]))
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
SCREAMING_SNAKE_CASE_ = np.array([[1.2_2_8_4_7_2_7, 0.3_1_0_4_1_2_2]])
self.assertTrue(np.allclose(outputs.logits_per_image , _A , atol=1E-3))
| 700
|
from typing import List
import numpy as np
def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = {key: len(_SCREAMING_SNAKE_CASE ) for key, value in gen_kwargs.items() if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )}
if len(set(lists_lengths.values() ) ) > 1:
raise RuntimeError(
(
'Sharding is ambiguous for this dataset: '
+ 'we found several data sources lists of different lengths, and we don\'t know over which list we should parallelize:\n'
+ '\n'.join(f"""\t- key {key} has length {length}""" for key, length in lists_lengths.items() )
+ '\nTo fix this, check the \'gen_kwargs\' and make sure to use lists only for data sources, '
+ 'and use tuples otherwise. In the end there should only be one single list, or several lists with the same length.'
) )
SCREAMING_SNAKE_CASE_ = max(lists_lengths.values() , default=0 )
return max(1 , _SCREAMING_SNAKE_CASE )
def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = []
for group_idx in range(_SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE_ = num_shards // max_num_jobs + (group_idx < (num_shards % max_num_jobs))
if num_shards_to_add == 0:
break
SCREAMING_SNAKE_CASE_ = shards_indices_per_group[-1].stop if shards_indices_per_group else 0
SCREAMING_SNAKE_CASE_ = range(_SCREAMING_SNAKE_CASE , start + num_shards_to_add )
shards_indices_per_group.append(_SCREAMING_SNAKE_CASE )
return shards_indices_per_group
def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : dict , _SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = _number_of_shards_in_gen_kwargs(_SCREAMING_SNAKE_CASE )
if num_shards == 1:
return [dict(_SCREAMING_SNAKE_CASE )]
else:
SCREAMING_SNAKE_CASE_ = _distribute_shards(num_shards=_SCREAMING_SNAKE_CASE , max_num_jobs=_SCREAMING_SNAKE_CASE )
return [
{
key: [value[shard_idx] for shard_idx in shard_indices_per_group[group_idx]]
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
else value
for key, value in gen_kwargs.items()
}
for group_idx in range(len(_SCREAMING_SNAKE_CASE ) )
]
def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : List[dict] ):
"""simple docstring"""
return {
key: [value for gen_kwargs in gen_kwargs_list for value in gen_kwargs[key]]
if isinstance(gen_kwargs_list[0][key] , _SCREAMING_SNAKE_CASE )
else gen_kwargs_list[0][key]
for key in gen_kwargs_list[0]
}
def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : np.random.Generator , _SCREAMING_SNAKE_CASE : dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = {len(_SCREAMING_SNAKE_CASE ) for value in gen_kwargs.values() if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )}
SCREAMING_SNAKE_CASE_ = {}
for size in list_sizes:
SCREAMING_SNAKE_CASE_ = list(range(_SCREAMING_SNAKE_CASE ) )
rng.shuffle(indices_per_size[size] )
# Now let's copy the gen_kwargs and shuffle the lists based on their sizes
SCREAMING_SNAKE_CASE_ = dict(_SCREAMING_SNAKE_CASE )
for key, value in shuffled_kwargs.items():
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE_ = [value[i] for i in indices_per_size[len(_SCREAMING_SNAKE_CASE )]]
return shuffled_kwargs
| 620
| 0
|
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all feature extractors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...feature_extraction_utils import FeatureExtractionMixin
from ...utils import CONFIG_NAME, FEATURE_EXTRACTOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = OrderedDict(
[
("audio-spectrogram-transformer", "ASTFeatureExtractor"),
("beit", "BeitFeatureExtractor"),
("chinese_clip", "ChineseCLIPFeatureExtractor"),
("clap", "ClapFeatureExtractor"),
("clip", "CLIPFeatureExtractor"),
("clipseg", "ViTFeatureExtractor"),
("conditional_detr", "ConditionalDetrFeatureExtractor"),
("convnext", "ConvNextFeatureExtractor"),
("cvt", "ConvNextFeatureExtractor"),
("data2vec-audio", "Wav2Vec2FeatureExtractor"),
("data2vec-vision", "BeitFeatureExtractor"),
("deformable_detr", "DeformableDetrFeatureExtractor"),
("deit", "DeiTFeatureExtractor"),
("detr", "DetrFeatureExtractor"),
("dinat", "ViTFeatureExtractor"),
("donut-swin", "DonutFeatureExtractor"),
("dpt", "DPTFeatureExtractor"),
("encodec", "EncodecFeatureExtractor"),
("flava", "FlavaFeatureExtractor"),
("glpn", "GLPNFeatureExtractor"),
("groupvit", "CLIPFeatureExtractor"),
("hubert", "Wav2Vec2FeatureExtractor"),
("imagegpt", "ImageGPTFeatureExtractor"),
("layoutlmv2", "LayoutLMv2FeatureExtractor"),
("layoutlmv3", "LayoutLMv3FeatureExtractor"),
("levit", "LevitFeatureExtractor"),
("maskformer", "MaskFormerFeatureExtractor"),
("mctct", "MCTCTFeatureExtractor"),
("mobilenet_v1", "MobileNetV1FeatureExtractor"),
("mobilenet_v2", "MobileNetV2FeatureExtractor"),
("mobilevit", "MobileViTFeatureExtractor"),
("nat", "ViTFeatureExtractor"),
("owlvit", "OwlViTFeatureExtractor"),
("perceiver", "PerceiverFeatureExtractor"),
("poolformer", "PoolFormerFeatureExtractor"),
("regnet", "ConvNextFeatureExtractor"),
("resnet", "ConvNextFeatureExtractor"),
("segformer", "SegformerFeatureExtractor"),
("sew", "Wav2Vec2FeatureExtractor"),
("sew-d", "Wav2Vec2FeatureExtractor"),
("speech_to_text", "Speech2TextFeatureExtractor"),
("speecht5", "SpeechT5FeatureExtractor"),
("swiftformer", "ViTFeatureExtractor"),
("swin", "ViTFeatureExtractor"),
("swinv2", "ViTFeatureExtractor"),
("table-transformer", "DetrFeatureExtractor"),
("timesformer", "VideoMAEFeatureExtractor"),
("tvlt", "TvltFeatureExtractor"),
("unispeech", "Wav2Vec2FeatureExtractor"),
("unispeech-sat", "Wav2Vec2FeatureExtractor"),
("van", "ConvNextFeatureExtractor"),
("videomae", "VideoMAEFeatureExtractor"),
("vilt", "ViltFeatureExtractor"),
("vit", "ViTFeatureExtractor"),
("vit_mae", "ViTFeatureExtractor"),
("vit_msn", "ViTFeatureExtractor"),
("wav2vec2", "Wav2Vec2FeatureExtractor"),
("wav2vec2-conformer", "Wav2Vec2FeatureExtractor"),
("wavlm", "Wav2Vec2FeatureExtractor"),
("whisper", "WhisperFeatureExtractor"),
("xclip", "CLIPFeatureExtractor"),
("yolos", "YolosFeatureExtractor"),
]
)
__magic_name__ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FEATURE_EXTRACTOR_MAPPING_NAMES)
def _lowerCAmelCase ( A__: str ):
'''simple docstring'''
for module_name, extractors in FEATURE_EXTRACTOR_MAPPING_NAMES.items():
if class_name in extractors:
UpperCAmelCase = model_type_to_module_name(A__ )
UpperCAmelCase = importlib.import_module(F""".{module_name}""" , '''transformers.models''' )
try:
return getattr(A__ , A__ )
except AttributeError:
continue
for _, extractor in FEATURE_EXTRACTOR_MAPPING._extra_content.items():
if getattr(A__ , '''__name__''' , A__ ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
UpperCAmelCase = importlib.import_module('''transformers''' )
if hasattr(A__ , A__ ):
return getattr(A__ , A__ )
return None
def _lowerCAmelCase ( A__: Union[str, os.PathLike] , A__: Optional[Union[str, os.PathLike]] = None , A__: bool = False , A__: bool = False , A__: Optional[Dict[str, str]] = None , A__: Optional[Union[bool, str]] = None , A__: Optional[str] = None , A__: bool = False , **A__: List[str] , ):
'''simple docstring'''
UpperCAmelCase = get_file_from_repo(
A__ , A__ , cache_dir=A__ , force_download=A__ , resume_download=A__ , proxies=A__ , use_auth_token=A__ , revision=A__ , local_files_only=A__ , )
if resolved_config_file is None:
logger.info(
'''Could not locate the feature extractor configuration file, will try to use the model config instead.''' )
return {}
with open(A__ , encoding='''utf-8''' ) as reader:
return json.load(A__ )
class lowercase :
'''simple docstring'''
def __init__( self ) -> Any:
"""simple docstring"""
raise EnvironmentError(
'''AutoFeatureExtractor is designed to be instantiated '''
'''using the `AutoFeatureExtractor.from_pretrained(pretrained_model_name_or_path)` method.''' )
@classmethod
@replace_list_option_in_docstrings(_snake_case )
def snake_case_ ( cls , _snake_case , **_snake_case ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase = kwargs.pop('''config''' , _snake_case )
UpperCAmelCase = kwargs.pop('''trust_remote_code''' , _snake_case )
UpperCAmelCase = True
UpperCAmelCase , UpperCAmelCase = FeatureExtractionMixin.get_feature_extractor_dict(_snake_case , **_snake_case )
UpperCAmelCase = config_dict.get('''feature_extractor_type''' , _snake_case )
UpperCAmelCase = None
if "AutoFeatureExtractor" in config_dict.get('''auto_map''' , {} ):
UpperCAmelCase = config_dict['''auto_map''']['''AutoFeatureExtractor''']
# If we don't find the feature extractor class in the feature extractor config, let's try the model config.
if feature_extractor_class is None and feature_extractor_auto_map is None:
if not isinstance(_snake_case , _snake_case ):
UpperCAmelCase = AutoConfig.from_pretrained(_snake_case , **_snake_case )
# It could be in `config.feature_extractor_type``
UpperCAmelCase = getattr(_snake_case , '''feature_extractor_type''' , _snake_case )
if hasattr(_snake_case , '''auto_map''' ) and "AutoFeatureExtractor" in config.auto_map:
UpperCAmelCase = config.auto_map['''AutoFeatureExtractor''']
if feature_extractor_class is not None:
UpperCAmelCase = feature_extractor_class_from_name(_snake_case )
UpperCAmelCase = feature_extractor_auto_map is not None
UpperCAmelCase = feature_extractor_class is not None or type(_snake_case ) in FEATURE_EXTRACTOR_MAPPING
UpperCAmelCase = resolve_trust_remote_code(
_snake_case , _snake_case , _snake_case , _snake_case )
if has_remote_code and trust_remote_code:
UpperCAmelCase = get_class_from_dynamic_module(
_snake_case , _snake_case , **_snake_case )
UpperCAmelCase = kwargs.pop('''code_revision''' , _snake_case )
if os.path.isdir(_snake_case ):
feature_extractor_class.register_for_auto_class()
return feature_extractor_class.from_dict(_snake_case , **_snake_case )
elif feature_extractor_class is not None:
return feature_extractor_class.from_dict(_snake_case , **_snake_case )
# Last try: we use the FEATURE_EXTRACTOR_MAPPING.
elif type(_snake_case ) in FEATURE_EXTRACTOR_MAPPING:
UpperCAmelCase = FEATURE_EXTRACTOR_MAPPING[type(_snake_case )]
return feature_extractor_class.from_dict(_snake_case , **_snake_case )
raise ValueError(
f"""Unrecognized feature extractor in {pretrained_model_name_or_path}. Should have a """
f"""`feature_extractor_type` key in its {FEATURE_EXTRACTOR_NAME} of {CONFIG_NAME}, or one of the following """
f"""`model_type` keys in its {CONFIG_NAME}: {', '.join(c for c in FEATURE_EXTRACTOR_MAPPING_NAMES.keys() )}""" )
@staticmethod
def snake_case_ ( _snake_case , _snake_case ) -> Optional[Any]:
"""simple docstring"""
FEATURE_EXTRACTOR_MAPPING.register(_snake_case , _snake_case )
| 254
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_lxmert import LxmertTokenizer
__magic_name__ = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
__magic_name__ = {
"vocab_file": {
"unc-nlp/lxmert-base-uncased": "https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/vocab.txt",
},
"tokenizer_file": {
"unc-nlp/lxmert-base-uncased": (
"https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/tokenizer.json"
),
},
}
__magic_name__ = {
"unc-nlp/lxmert-base-uncased": 512,
}
__magic_name__ = {
"unc-nlp/lxmert-base-uncased": {"do_lower_case": True},
}
class lowercase ( A__ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE = PRETRAINED_INIT_CONFIGURATION
__SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE = LxmertTokenizer
def __init__( self , _snake_case=None , _snake_case=None , _snake_case=True , _snake_case="[UNK]" , _snake_case="[SEP]" , _snake_case="[PAD]" , _snake_case="[CLS]" , _snake_case="[MASK]" , _snake_case=True , _snake_case=None , **_snake_case , ) -> Tuple:
"""simple docstring"""
super().__init__(
_snake_case , tokenizer_file=_snake_case , do_lower_case=_snake_case , unk_token=_snake_case , sep_token=_snake_case , pad_token=_snake_case , cls_token=_snake_case , mask_token=_snake_case , tokenize_chinese_chars=_snake_case , strip_accents=_snake_case , **_snake_case , )
UpperCAmelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , _snake_case ) != do_lower_case
or normalizer_state.get('''strip_accents''' , _snake_case ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , _snake_case ) != tokenize_chinese_chars
):
UpperCAmelCase = getattr(_snake_case , normalizer_state.pop('''type''' ) )
UpperCAmelCase = do_lower_case
UpperCAmelCase = strip_accents
UpperCAmelCase = tokenize_chinese_chars
UpperCAmelCase = normalizer_class(**_snake_case )
UpperCAmelCase = do_lower_case
def snake_case_ ( self , _snake_case , _snake_case=None ) -> List[str]:
"""simple docstring"""
UpperCAmelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def snake_case_ ( self , _snake_case , _snake_case = None ) -> List[int]:
"""simple docstring"""
UpperCAmelCase = [self.sep_token_id]
UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def snake_case_ ( self , _snake_case , _snake_case = None ) -> Tuple[str]:
"""simple docstring"""
UpperCAmelCase = self._tokenizer.model.save(_snake_case , name=_snake_case )
return tuple(_snake_case )
| 254
| 1
|
'''simple docstring'''
from __future__ import annotations
a = 10
def __magic_name__ ( __UpperCAmelCase ) -> list[int]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = 1
__SCREAMING_SNAKE_CASE = max(__UpperCAmelCase )
while placement <= max_digit:
# declare and initialize empty buckets
__SCREAMING_SNAKE_CASE = [[] for _ in range(__UpperCAmelCase )]
# split list_of_ints between the buckets
for i in list_of_ints:
__SCREAMING_SNAKE_CASE = int((i / placement) % RADIX )
buckets[tmp].append(__UpperCAmelCase )
# put each buckets' contents into list_of_ints
__SCREAMING_SNAKE_CASE = 0
for b in range(__UpperCAmelCase ):
for i in buckets[b]:
__SCREAMING_SNAKE_CASE = i
a += 1
# move to next
placement *= RADIX
return list_of_ints
if __name__ == "__main__":
import doctest
doctest.testmod()
| 13
|
'''simple docstring'''
from ...utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_torch_available,
is_transformers_available,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .multicontrolnet import MultiControlNetModel
from .pipeline_controlnet import StableDiffusionControlNetPipeline
from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline
from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline
if is_transformers_available() and is_flax_available():
from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline
| 13
| 1
|
from __future__ import annotations
import os
from typing import Any
import requests
__magic_name__ = "https://api.github.com"
# https://docs.github.com/en/free-pro-team@latest/rest/reference/users#get-the-authenticated-user
__magic_name__ = BASE_URL + "/user"
# https://github.com/settings/tokens
__magic_name__ = os.environ.get("USER_TOKEN", "")
def _lowerCAmelCase ( A__: str ):
'''simple docstring'''
UpperCAmelCase = {
'''Authorization''': F"""token {auth_token}""",
'''Accept''': '''application/vnd.github.v3+json''',
}
return requests.get(A__ , headers=A__ ).json()
if __name__ == "__main__": # pragma: no cover
if USER_TOKEN:
for key, value in fetch_github_info(USER_TOKEN).items():
print(f'''{key}: {value}''')
else:
raise ValueError("'USER_TOKEN' field cannot be empty.")
| 254
|
from math import factorial, radians
def _lowerCAmelCase ( A__: float , A__: int = 18 , A__: int = 10 ):
'''simple docstring'''
UpperCAmelCase = angle_in_degrees - ((angle_in_degrees // 360.0) * 360.0)
# Converting from degrees to radians
UpperCAmelCase = radians(A__ )
UpperCAmelCase = angle_in_radians
UpperCAmelCase = 3
UpperCAmelCase = -1
for _ in range(A__ ):
result += (b * (angle_in_radians**a)) / factorial(A__ )
UpperCAmelCase = -b # One positive term and the next will be negative and so on...
a += 2 # Increased by 2 for every term.
return round(A__ , A__ )
if __name__ == "__main__":
__import__("doctest").testmod()
| 254
| 1
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
'''facebook/dpr-ctx_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json'''
),
'''facebook/dpr-question_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json'''
),
'''facebook/dpr-reader-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json'''
),
'''facebook/dpr-ctx_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json'''
),
'''facebook/dpr-question_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json'''
),
'''facebook/dpr-reader-multiset-base''': (
'''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json'''
),
}
class _snake_case ( a__ ):
snake_case__ = "dpr"
def __init__( self : str , UpperCAmelCase : Optional[Any]=30522 , UpperCAmelCase : Any=768 , UpperCAmelCase : Dict=12 , UpperCAmelCase : Optional[Any]=12 , UpperCAmelCase : Any=3072 , UpperCAmelCase : List[str]="gelu" , UpperCAmelCase : Optional[int]=0.1 , UpperCAmelCase : Union[str, Any]=0.1 , UpperCAmelCase : Union[str, Any]=512 , UpperCAmelCase : int=2 , UpperCAmelCase : Any=0.0_2 , UpperCAmelCase : str=1E-12 , UpperCAmelCase : List[Any]=0 , UpperCAmelCase : Union[str, Any]="absolute" , UpperCAmelCase : int = 0 , **UpperCAmelCase : Optional[int] , ):
super().__init__(pad_token_id=UpperCAmelCase , **UpperCAmelCase )
__lowerCamelCase : List[Any] = vocab_size
__lowerCamelCase : int = hidden_size
__lowerCamelCase : Optional[int] = num_hidden_layers
__lowerCamelCase : str = num_attention_heads
__lowerCamelCase : Union[str, Any] = hidden_act
__lowerCamelCase : List[str] = intermediate_size
__lowerCamelCase : Optional[Any] = hidden_dropout_prob
__lowerCamelCase : int = attention_probs_dropout_prob
__lowerCamelCase : Optional[Any] = max_position_embeddings
__lowerCamelCase : str = type_vocab_size
__lowerCamelCase : Dict = initializer_range
__lowerCamelCase : Tuple = layer_norm_eps
__lowerCamelCase : Optional[Any] = projection_dim
__lowerCamelCase : Dict = position_embedding_type
| 719
|
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class _snake_case ( unittest.TestCase ):
def __init__( self : Optional[Any] , UpperCAmelCase : Tuple , UpperCAmelCase : Optional[int]=7 , UpperCAmelCase : Any=3 , UpperCAmelCase : str=18 , UpperCAmelCase : str=30 , UpperCAmelCase : Any=400 , UpperCAmelCase : List[Any]=True , UpperCAmelCase : Optional[Any]=None , UpperCAmelCase : Optional[Any]=True , UpperCAmelCase : Dict=None , UpperCAmelCase : List[str]=True , UpperCAmelCase : int=[0.5, 0.5, 0.5] , UpperCAmelCase : Tuple=[0.5, 0.5, 0.5] , ):
__lowerCamelCase : Any = size if size is not None else {"shortest_edge": 18}
__lowerCamelCase : Dict = crop_size if crop_size is not None else {"height": 18, "width": 18}
__lowerCamelCase : List[str] = parent
__lowerCamelCase : int = batch_size
__lowerCamelCase : Any = num_channels
__lowerCamelCase : Tuple = image_size
__lowerCamelCase : int = min_resolution
__lowerCamelCase : List[Any] = max_resolution
__lowerCamelCase : List[str] = do_resize
__lowerCamelCase : str = size
__lowerCamelCase : Tuple = do_center_crop
__lowerCamelCase : Optional[int] = crop_size
__lowerCamelCase : Optional[Any] = do_normalize
__lowerCamelCase : Optional[Any] = image_mean
__lowerCamelCase : List[Any] = image_std
def lowerCamelCase__ ( self : int ):
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"do_center_crop": self.do_center_crop,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class _snake_case ( a__ , unittest.TestCase ):
snake_case__ = LevitImageProcessor if is_vision_available() else None
def lowerCamelCase__ ( self : Any ):
__lowerCamelCase : Optional[int] = LevitImageProcessingTester(self )
@property
def lowerCamelCase__ ( self : Union[str, Any] ):
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCamelCase__ ( self : List[str] ):
__lowerCamelCase : Dict = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCAmelCase , "image_mean" ) )
self.assertTrue(hasattr(UpperCAmelCase , "image_std" ) )
self.assertTrue(hasattr(UpperCAmelCase , "do_normalize" ) )
self.assertTrue(hasattr(UpperCAmelCase , "do_resize" ) )
self.assertTrue(hasattr(UpperCAmelCase , "do_center_crop" ) )
self.assertTrue(hasattr(UpperCAmelCase , "size" ) )
def lowerCamelCase__ ( self : int ):
__lowerCamelCase : str = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 18} )
self.assertEqual(image_processor.crop_size , {"height": 18, "width": 18} )
__lowerCamelCase : Tuple = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"shortest_edge": 42} )
self.assertEqual(image_processor.crop_size , {"height": 84, "width": 84} )
def lowerCamelCase__ ( self : Dict ):
pass
def lowerCamelCase__ ( self : Any ):
# Initialize image_processing
__lowerCamelCase : Any = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__lowerCamelCase : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase , Image.Image )
# Test not batched input
__lowerCamelCase : Dict = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
__lowerCamelCase : int = image_processing(UpperCAmelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def lowerCamelCase__ ( self : Any ):
# Initialize image_processing
__lowerCamelCase : Any = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__lowerCamelCase : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase , numpify=UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase , np.ndarray )
# Test not batched input
__lowerCamelCase : Optional[Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
__lowerCamelCase : Dict = image_processing(UpperCAmelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def lowerCamelCase__ ( self : Union[str, Any] ):
# Initialize image_processing
__lowerCamelCase : int = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__lowerCamelCase : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase , torchify=UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase , torch.Tensor )
# Test not batched input
__lowerCamelCase : List[Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
__lowerCamelCase : List[str] = image_processing(UpperCAmelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
| 366
| 0
|
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__lowercase : Any = logging.get_logger(__name__)
__lowercase : List[str] = {'''vocab_file''': '''sentencepiece.bpe.model'''}
__lowercase : List[str] = {
'''vocab_file''': {
'''moussaKam/mbarthez''': '''https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model''',
'''moussaKam/barthez''': '''https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model''',
'''moussaKam/barthez-orangesum-title''': (
'''https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model'''
),
},
}
__lowercase : List[str] = {
'''moussaKam/mbarthez''': 1024,
'''moussaKam/barthez''': 1024,
'''moussaKam/barthez-orangesum-title''': 1024,
}
__lowercase : List[str] = '''▁'''
class __lowercase ( __UpperCAmelCase ):
lowerCamelCase : Any = VOCAB_FILES_NAMES
lowerCamelCase : Any = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : Tuple = ['''input_ids''', '''attention_mask''']
def __init__(self , A , A="<s>" , A="</s>" , A="</s>" , A="<s>" , A="<unk>" , A="<pad>" , A="<mask>" , A = None , **A , ):
# Mask token behave like a normal word, i.e. include the space before it
lowerCamelCase_ : int = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else mask_token
lowerCamelCase_ : Optional[int] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=lowerCamelCase__ , eos_token=lowerCamelCase__ , unk_token=lowerCamelCase__ , sep_token=lowerCamelCase__ , cls_token=lowerCamelCase__ , pad_token=lowerCamelCase__ , mask_token=lowerCamelCase__ , sp_model_kwargs=self.sp_model_kwargs , **lowerCamelCase__ , )
lowerCamelCase_ : int = vocab_file
lowerCamelCase_ : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(lowerCamelCase__ ) )
lowerCamelCase_ : List[Any] = {"""<s>""": 0, """<pad>""": 1, """</s>""": 2, """<unk>""": 3}
lowerCamelCase_ : Optional[Any] = len(self.sp_model ) - 1
lowerCamelCase_ : Optional[int] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def UpperCAmelCase__ (self , A , A = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowerCamelCase_ : Tuple = [self.cls_token_id]
lowerCamelCase_ : Union[str, Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def UpperCAmelCase__ (self , A , A = None , A = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase__ , token_ids_a=lowerCamelCase__ , already_has_special_tokens=lowerCamelCase__ )
if token_ids_a is None:
return [1] + ([0] * len(lowerCamelCase__ )) + [1]
return [1] + ([0] * len(lowerCamelCase__ )) + [1, 1] + ([0] * len(lowerCamelCase__ )) + [1]
def UpperCAmelCase__ (self , A , A = None ):
lowerCamelCase_ : Any = [self.sep_token_id]
lowerCamelCase_ : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def UpperCAmelCase__ (self ):
return len(self.sp_model )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Union[str, Any] = {self.convert_ids_to_tokens(lowerCamelCase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def UpperCAmelCase__ (self , A ):
return self.sp_model.encode(lowerCamelCase__ , out_type=lowerCamelCase__ )
def UpperCAmelCase__ (self , A ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
lowerCamelCase_ : List[Any] = self.sp_model.PieceToId(lowerCamelCase__ )
return spm_id if spm_id else self.unk_token_id
def UpperCAmelCase__ (self , A ):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(lowerCamelCase__ )
def UpperCAmelCase__ (self , A ):
lowerCamelCase_ : str = []
lowerCamelCase_ : str = """"""
lowerCamelCase_ : Any = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(lowerCamelCase__ ) + token
lowerCamelCase_ : Any = True
lowerCamelCase_ : Optional[Any] = []
else:
current_sub_tokens.append(lowerCamelCase__ )
lowerCamelCase_ : Any = False
out_string += self.sp_model.decode(lowerCamelCase__ )
return out_string.strip()
def __getstate__(self ):
lowerCamelCase_ : List[str] = self.__dict__.copy()
lowerCamelCase_ : str = None
return state
def __setstate__(self , A ):
lowerCamelCase_ : Union[str, Any] = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
lowerCamelCase_ : List[str] = {}
lowerCamelCase_ : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCAmelCase__ (self , A , A = None ):
if not os.path.isdir(lowerCamelCase__ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowerCamelCase_ : Any = os.path.join(
lowerCamelCase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowerCamelCase__ )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCamelCase__ , '''wb''' ) as fi:
lowerCamelCase_ : List[Any] = self.sp_model.serialized_model_proto()
fi.write(lowerCamelCase__ )
return (out_vocab_file,)
| 422
|
"""simple docstring"""
import argparse
import collections
import os
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_table.py
__snake_case = 'src/transformers'
__snake_case = 'docs/source/en'
__snake_case = '.'
def _lowerCamelCase ( lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : Any ):
with open(lowerCamelCase__ , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
lowercase__ : List[str] = f.readlines()
# Find the start prompt.
lowercase__ : str = 0
while not lines[start_index].startswith(lowerCamelCase__ ):
start_index += 1
start_index += 1
lowercase__ : Optional[Any] = start_index
while not lines[end_index].startswith(lowerCamelCase__ ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# Add here suffixes that are used to identify models, separated by |
__snake_case = 'Model|Encoder|Decoder|ForConditionalGeneration'
# Regexes that match TF/Flax/PT model names.
__snake_case = re.compile(R'TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
__snake_case = re.compile(R'Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
__snake_case = re.compile(R'(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
# This is to make sure the transformers module imported is the one in the repo.
__snake_case = direct_transformers_import(TRANSFORMERS_PATH)
def _lowerCamelCase ( lowerCamelCase__ : List[Any] ):
lowercase__ : Union[str, Any] = re.finditer(""".+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)""" , lowerCamelCase__ )
return [m.group(0 ) for m in matches]
def _lowerCamelCase ( lowerCamelCase__ : Optional[int] , lowerCamelCase__ : int ):
lowercase__ : Any = 2 if text == """✅""" or text == """❌""" else len(lowerCamelCase__ )
lowercase__ : int = (width - text_length) // 2
lowercase__ : List[str] = width - text_length - left_indent
return " " * left_indent + text + " " * right_indent
def _lowerCamelCase ( ):
lowercase__ : List[str] = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
lowercase__ : Optional[int] = {
name: config_maping_names[code]
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if code in config_maping_names
}
lowercase__ : str = {name: config.replace("""Config""" , """""" ) for name, config in model_name_to_config.items()}
# Dictionaries flagging if each model prefix has a slow/fast tokenizer, backend in PT/TF/Flax.
lowercase__ : Tuple = collections.defaultdict(lowerCamelCase__ )
lowercase__ : Optional[Any] = collections.defaultdict(lowerCamelCase__ )
lowercase__ : Optional[int] = collections.defaultdict(lowerCamelCase__ )
lowercase__ : Dict = collections.defaultdict(lowerCamelCase__ )
lowercase__ : Union[str, Any] = collections.defaultdict(lowerCamelCase__ )
# Let's lookup through all transformers object (once).
for attr_name in dir(lowerCamelCase__ ):
lowercase__ : Optional[Any] = None
if attr_name.endswith("""Tokenizer""" ):
lowercase__ : List[Any] = slow_tokenizers
lowercase__ : List[str] = attr_name[:-9]
elif attr_name.endswith("""TokenizerFast""" ):
lowercase__ : Tuple = fast_tokenizers
lowercase__ : str = attr_name[:-13]
elif _re_tf_models.match(lowerCamelCase__ ) is not None:
lowercase__ : Tuple = tf_models
lowercase__ : List[Any] = _re_tf_models.match(lowerCamelCase__ ).groups()[0]
elif _re_flax_models.match(lowerCamelCase__ ) is not None:
lowercase__ : List[Any] = flax_models
lowercase__ : List[Any] = _re_flax_models.match(lowerCamelCase__ ).groups()[0]
elif _re_pt_models.match(lowerCamelCase__ ) is not None:
lowercase__ : Union[str, Any] = pt_models
lowercase__ : Optional[int] = _re_pt_models.match(lowerCamelCase__ ).groups()[0]
if lookup_dict is not None:
while len(lowerCamelCase__ ) > 0:
if attr_name in model_name_to_prefix.values():
lowercase__ : Tuple = True
break
# Try again after removing the last word in the name
lowercase__ : Union[str, Any] = """""".join(camel_case_split(lowerCamelCase__ )[:-1] )
# Let's build that table!
lowercase__ : Union[str, Any] = list(model_name_to_config.keys() )
model_names.sort(key=str.lower )
lowercase__ : List[Any] = ["""Model""", """Tokenizer slow""", """Tokenizer fast""", """PyTorch support""", """TensorFlow support""", """Flax Support"""]
# We'll need widths to properly display everything in the center (+2 is to leave one extra space on each side).
lowercase__ : str = [len(lowerCamelCase__ ) + 2 for c in columns]
lowercase__ : Tuple = max([len(lowerCamelCase__ ) for name in model_names] ) + 2
# Build the table per se
lowercase__ : List[Any] = """|""" + """|""".join([_center_text(lowerCamelCase__ , lowerCamelCase__ ) for c, w in zip(lowerCamelCase__ , lowerCamelCase__ )] ) + """|\n"""
# Use ":-----:" format to center-aligned table cell texts
table += "|" + "|".join([""":""" + """-""" * (w - 2) + """:""" for w in widths] ) + "|\n"
lowercase__ : List[Any] = {True: """✅""", False: """❌"""}
for name in model_names:
lowercase__ : int = model_name_to_prefix[name]
lowercase__ : List[str] = [
name,
check[slow_tokenizers[prefix]],
check[fast_tokenizers[prefix]],
check[pt_models[prefix]],
check[tf_models[prefix]],
check[flax_models[prefix]],
]
table += "|" + "|".join([_center_text(lowerCamelCase__ , lowerCamelCase__ ) for l, w in zip(lowerCamelCase__ , lowerCamelCase__ )] ) + "|\n"
return table
def _lowerCamelCase ( lowerCamelCase__ : Union[str, Any]=False ):
lowercase__ , lowercase__ , lowercase__ , lowercase__ : str = _find_text_in_file(
filename=os.path.join(lowerCamelCase__ , """index.md""" ) , start_prompt="""<!--This table is updated automatically from the auto modules""" , end_prompt="""<!-- End table-->""" , )
lowercase__ : Union[str, Any] = get_model_table_from_auto_modules()
if current_table != new_table:
if overwrite:
with open(os.path.join(lowerCamelCase__ , """index.md""" ) , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.writelines(lines[:start_index] + [new_table] + lines[end_index:] )
else:
raise ValueError(
"""The model table in the `index.md` has not been updated. Run `make fix-copies` to fix this.""" )
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
__snake_case = parser.parse_args()
check_model_table(args.fix_and_overwrite)
| 200
| 0
|
import inspect
import unittest
from transformers import ConvNextConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextBackbone, ConvNextForImageClassification, ConvNextModel
from transformers.models.convnext.modeling_convnext import CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowercase :
"""simple docstring"""
def __init__( self : Optional[Any] , a_ : Any , a_ : Union[str, Any]=13 , a_ : str=32 , a_ : Any=3 , a_ : Dict=4 , a_ : Union[str, Any]=[10, 20, 30, 40] , a_ : str=[2, 2, 3, 2] , a_ : Dict=True , a_ : List[Any]=True , a_ : Tuple=37 , a_ : Optional[int]="gelu" , a_ : Tuple=10 , a_ : List[str]=0.0_2 , a_ : Optional[int]=["stage2", "stage3", "stage4"] , a_ : str=[2, 3, 4] , a_ : List[Any]=None , ):
"""simple docstring"""
lowerCamelCase__ = parent
lowerCamelCase__ = batch_size
lowerCamelCase__ = image_size
lowerCamelCase__ = num_channels
lowerCamelCase__ = num_stages
lowerCamelCase__ = hidden_sizes
lowerCamelCase__ = depths
lowerCamelCase__ = is_training
lowerCamelCase__ = use_labels
lowerCamelCase__ = intermediate_size
lowerCamelCase__ = hidden_act
lowerCamelCase__ = num_labels
lowerCamelCase__ = initializer_range
lowerCamelCase__ = out_features
lowerCamelCase__ = out_indices
lowerCamelCase__ = scope
def _UpperCamelCase ( self : int ):
"""simple docstring"""
lowerCamelCase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase__ = None
if self.use_labels:
lowerCamelCase__ = ids_tensor([self.batch_size] , self.num_labels )
lowerCamelCase__ = self.get_config()
return config, pixel_values, labels
def _UpperCamelCase ( self : List[str] ):
"""simple docstring"""
return ConvNextConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=a_ , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def _UpperCamelCase ( self : List[str] , a_ : Union[str, Any] , a_ : Tuple , a_ : Optional[Any] ):
"""simple docstring"""
lowerCamelCase__ = ConvNextModel(config=a_ )
model.to(a_ )
model.eval()
lowerCamelCase__ = model(a_ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def _UpperCamelCase ( self : str , a_ : Dict , a_ : int , a_ : List[str] ):
"""simple docstring"""
lowerCamelCase__ = ConvNextForImageClassification(a_ )
model.to(a_ )
model.eval()
lowerCamelCase__ = model(a_ , labels=a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _UpperCamelCase ( self : List[Any] , a_ : str , a_ : List[Any] , a_ : Tuple ):
"""simple docstring"""
lowerCamelCase__ = ConvNextBackbone(config=a_ )
model.to(a_ )
model.eval()
lowerCamelCase__ = model(a_ )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
lowerCamelCase__ = None
lowerCamelCase__ = ConvNextBackbone(config=a_ )
model.to(a_ )
model.eval()
lowerCamelCase__ = model(a_ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def _UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
lowerCamelCase__ = self.prepare_config_and_inputs()
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = config_and_inputs
lowerCamelCase__ = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class lowercase ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = (
(
ConvNextModel,
ConvNextForImageClassification,
ConvNextBackbone,
)
if is_torch_available()
else ()
)
snake_case_ = (
{'feature-extraction': ConvNextModel, 'image-classification': ConvNextForImageClassification}
if is_torch_available()
else {}
)
snake_case_ = True
snake_case_ = False
snake_case_ = False
snake_case_ = False
snake_case_ = False
def _UpperCamelCase ( self : Any ):
"""simple docstring"""
lowerCamelCase__ = ConvNextModelTester(self )
lowerCamelCase__ = ConfigTester(self , config_class=a_ , has_text_modality=a_ , hidden_size=37 )
def _UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
return
@unittest.skip(reason="""ConvNext does not use inputs_embeds""" )
def _UpperCamelCase ( self : str ):
"""simple docstring"""
pass
@unittest.skip(reason="""ConvNext does not support input and output embeddings""" )
def _UpperCamelCase ( self : List[str] ):
"""simple docstring"""
pass
@unittest.skip(reason="""ConvNext does not use feedforward chunking""" )
def _UpperCamelCase ( self : Tuple ):
"""simple docstring"""
pass
def _UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ = model_class(a_ )
lowerCamelCase__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase__ = [*signature.parameters.keys()]
lowerCamelCase__ = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , a_ )
def _UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a_ )
def _UpperCamelCase ( self : int ):
"""simple docstring"""
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*a_ )
def _UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
def check_hidden_states_output(a_ : Dict , a_ : Optional[int] , a_ : List[Any] ):
lowerCamelCase__ = model_class(a_ )
model.to(a_ )
model.eval()
with torch.no_grad():
lowerCamelCase__ = model(**self._prepare_for_class(a_ , a_ ) )
lowerCamelCase__ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowerCamelCase__ = self.model_tester.num_stages
self.assertEqual(len(a_ ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ = True
check_hidden_states_output(a_ , a_ , a_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase__ = True
check_hidden_states_output(a_ , a_ , a_ )
def _UpperCamelCase ( self : Tuple ):
"""simple docstring"""
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*a_ )
@slow
def _UpperCamelCase ( self : Any ):
"""simple docstring"""
for model_name in CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__ = ConvNextModel.from_pretrained(a_ )
self.assertIsNotNone(a_ )
def snake_case ():
'''simple docstring'''
lowerCamelCase__ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class lowercase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _UpperCamelCase ( self : Dict ):
"""simple docstring"""
return AutoImageProcessor.from_pretrained("""facebook/convnext-tiny-224""" ) if is_vision_available() else None
@slow
def _UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
lowerCamelCase__ = ConvNextForImageClassification.from_pretrained("""facebook/convnext-tiny-224""" ).to(a_ )
lowerCamelCase__ = self.default_image_processor
lowerCamelCase__ = prepare_img()
lowerCamelCase__ = image_processor(images=a_ , return_tensors="""pt""" ).to(a_ )
# forward pass
with torch.no_grad():
lowerCamelCase__ = model(**a_ )
# verify the logits
lowerCamelCase__ = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , a_ )
lowerCamelCase__ = torch.tensor([-0.0_2_6_0, -0.4_7_3_9, 0.1_9_1_1] ).to(a_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , a_ , atol=1e-4 ) )
@require_torch
class lowercase ( unittest.TestCase , UpperCAmelCase_ ):
"""simple docstring"""
snake_case_ = (ConvNextBackbone,) if is_torch_available() else ()
snake_case_ = ConvNextConfig
snake_case_ = False
def _UpperCamelCase ( self : int ):
"""simple docstring"""
lowerCamelCase__ = ConvNextModelTester(self )
| 235
|
from ...processing_utils import ProcessorMixin
class lowercase ( UpperCAmelCase_ ):
"""simple docstring"""
snake_case_ = 'WhisperFeatureExtractor'
snake_case_ = 'WhisperTokenizer'
def __init__( self : int , a_ : int , a_ : Union[str, Any] ):
"""simple docstring"""
super().__init__(a_ , a_ )
lowerCamelCase__ = self.feature_extractor
lowerCamelCase__ = False
def _UpperCamelCase ( self : str , a_ : Dict=None , a_ : Union[str, Any]=None , a_ : Tuple=True ):
"""simple docstring"""
return self.tokenizer.get_decoder_prompt_ids(task=a_ , language=a_ , no_timestamps=a_ )
def __call__( self : Any , *a_ : Optional[int] , **a_ : Any ):
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor(*a_ , **a_ )
lowerCamelCase__ = kwargs.pop("""audio""" , a_ )
lowerCamelCase__ = kwargs.pop("""sampling_rate""" , a_ )
lowerCamelCase__ = kwargs.pop("""text""" , a_ )
if len(a_ ) > 0:
lowerCamelCase__ = args[0]
lowerCamelCase__ = args[1:]
if audio is None and text is None:
raise ValueError("""You need to specify either an `audio` or `text` input to process.""" )
if audio is not None:
lowerCamelCase__ = self.feature_extractor(a_ , *a_ , sampling_rate=a_ , **a_ )
if text is not None:
lowerCamelCase__ = self.tokenizer(a_ , **a_ )
if text is None:
return inputs
elif audio is None:
return encodings
else:
lowerCamelCase__ = encodings["""input_ids"""]
return inputs
def _UpperCamelCase ( self : Optional[int] , *a_ : str , **a_ : List[Any] ):
"""simple docstring"""
return self.tokenizer.batch_decode(*a_ , **a_ )
def _UpperCamelCase ( self : Union[str, Any] , *a_ : str , **a_ : List[str] ):
"""simple docstring"""
return self.tokenizer.decode(*a_ , **a_ )
def _UpperCamelCase ( self : int , a_ : str , a_ : Optional[int]="np" ):
"""simple docstring"""
return self.tokenizer.get_prompt_ids(a_ , return_tensors=a_ )
| 235
| 1
|
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ....tokenization_utils_fast import PreTrainedTokenizerFast
from ....utils import logging
from .tokenization_retribert import RetriBertTokenizer
__A = logging.get_logger(__name__)
__A = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
__A = {
"vocab_file": {
"yjernite/retribert-base-uncased": (
"https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"yjernite/retribert-base-uncased": (
"https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/tokenizer.json"
),
},
}
__A = {
"yjernite/retribert-base-uncased": 5_1_2,
}
__A = {
"yjernite/retribert-base-uncased": {"do_lower_case": True},
}
class UpperCAmelCase (_UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase :Dict = VOCAB_FILES_NAMES
_UpperCAmelCase :Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase :Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase :Any = PRETRAINED_INIT_CONFIGURATION
_UpperCAmelCase :Optional[int] = RetriBertTokenizer
_UpperCAmelCase :Union[str, Any] = ["input_ids", "attention_mask"]
def __init__( self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=True , _UpperCAmelCase="[UNK]" , _UpperCAmelCase="[SEP]" , _UpperCAmelCase="[PAD]" , _UpperCAmelCase="[CLS]" , _UpperCAmelCase="[MASK]" , _UpperCAmelCase=True , _UpperCAmelCase=None , **_UpperCAmelCase , ):
super().__init__(
_UpperCAmelCase , tokenizer_file=_UpperCAmelCase , do_lower_case=_UpperCAmelCase , unk_token=_UpperCAmelCase , sep_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , cls_token=_UpperCAmelCase , mask_token=_UpperCAmelCase , tokenize_chinese_chars=_UpperCAmelCase , strip_accents=_UpperCAmelCase , **_UpperCAmelCase , )
lowercase__: Dict = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , _UpperCAmelCase ) != do_lower_case
or normalizer_state.get('''strip_accents''' , _UpperCAmelCase ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , _UpperCAmelCase ) != tokenize_chinese_chars
):
lowercase__: Optional[Any] = getattr(_UpperCAmelCase , normalizer_state.pop('''type''' ) )
lowercase__: Optional[Any] = do_lower_case
lowercase__: Optional[int] = strip_accents
lowercase__: Optional[int] = tokenize_chinese_chars
lowercase__: Tuple = normalizer_class(**_UpperCAmelCase )
lowercase__: Optional[int] = do_lower_case
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase=None ):
lowercase__: Union[str, Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase = None ):
lowercase__: List[Any] = [self.sep_token_id]
lowercase__: Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase = None ):
lowercase__: str = self._tokenizer.model.save(_UpperCAmelCase , name=_UpperCAmelCase )
return tuple(_UpperCAmelCase )
| 586
|
"""simple docstring"""
from __future__ import annotations
from collections import deque
from collections.abc import Sequence
from dataclasses import dataclass
from typing import Any
@dataclass
class UpperCAmelCase :
"""simple docstring"""
_UpperCAmelCase :int
_UpperCAmelCase :Node | None = None
_UpperCAmelCase :Node | None = None
def SCREAMING_SNAKE_CASE__ ( ) -> Node | None:
lowercase__: List[Any] = Node(1 )
lowercase__: Dict = Node(2 )
lowercase__: Optional[int] = Node(3 )
lowercase__: Tuple = Node(4 )
lowercase__: Tuple = Node(5 )
return tree
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> list[int]:
return [root.data, *preorder(root.left ), *preorder(root.right )] if root else []
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> list[int]:
return postorder(root.left ) + postorder(root.right ) + [root.data] if root else []
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> list[int]:
return [*inorder(root.left ), root.data, *inorder(root.right )] if root else []
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> int:
return (max(height(root.left ) , height(root.right ) ) + 1) if root else 0
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> Sequence[Node | None]:
lowercase__: list[Any] = []
if root is None:
return output
lowercase__: Tuple = deque([root] )
while process_queue:
lowercase__: Union[str, Any] = process_queue.popleft()
output.append(node.data )
if node.left:
process_queue.append(node.left )
if node.right:
process_queue.append(node.right )
return output
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase ) -> Sequence[Node | None]:
lowercase__: list[Any] = []
def populate_output(__UpperCAmelCase , __UpperCAmelCase ) -> None:
if not root:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.left , level - 1 )
populate_output(root.right , level - 1 )
populate_output(__UpperCAmelCase , __UpperCAmelCase )
return output
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase ) -> Sequence[Node | None]:
lowercase__: list[Any] = []
def populate_output(__UpperCAmelCase , __UpperCAmelCase ) -> None:
if root is None:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.right , level - 1 )
populate_output(root.left , level - 1 )
populate_output(__UpperCAmelCase , __UpperCAmelCase )
return output
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> Sequence[Node | None] | list[Any]:
if root is None:
return []
lowercase__: list[Sequence[Node | None]] = []
lowercase__: Dict = 0
lowercase__: Optional[Any] = height(__UpperCAmelCase )
for h in range(1 , height_tree + 1 ):
if not flag:
output.append(get_nodes_from_left_to_right(__UpperCAmelCase , __UpperCAmelCase ) )
lowercase__: Tuple = 1
else:
output.append(get_nodes_from_right_to_left(__UpperCAmelCase , __UpperCAmelCase ) )
lowercase__: Any = 0
return output
def SCREAMING_SNAKE_CASE__ ( ) -> None: # Main function for testing.
lowercase__: Dict = make_tree()
print(F"""In-order Traversal: {inorder(__UpperCAmelCase )}""" )
print(F"""Pre-order Traversal: {preorder(__UpperCAmelCase )}""" )
print(F"""Post-order Traversal: {postorder(__UpperCAmelCase )}""" , '''\n''' )
print(F"""Height of Tree: {height(__UpperCAmelCase )}""" , '''\n''' )
print('''Complete Level Order Traversal: ''' )
print(level_order(__UpperCAmelCase ) , '''\n''' )
print('''Level-wise order Traversal: ''' )
for level in range(1 , height(__UpperCAmelCase ) + 1 ):
print(F"""Level {level}:""" , get_nodes_from_left_to_right(__UpperCAmelCase , level=__UpperCAmelCase ) )
print('''\nZigZag order Traversal: ''' )
print(zigzag(__UpperCAmelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 586
| 1
|
"""simple docstring"""
from __future__ import annotations
from collections.abc import Iterator
from typing import Any
class __SCREAMING_SNAKE_CASE :
def __init__( self :Any ,__UpperCAmelCase :Any ) -> str:
"""simple docstring"""
lowerCamelCase__ : Any = data
lowerCamelCase__ : Node | None = None
class __SCREAMING_SNAKE_CASE :
def __init__( self :Any ) -> str:
"""simple docstring"""
lowerCamelCase__ : Union[str, Any] = None
lowerCamelCase__ : Optional[int] = None
def __iter__( self :Dict ) -> Iterator[Any]:
"""simple docstring"""
lowerCamelCase__ : List[Any] = self.head
while self.head:
yield node.data
lowerCamelCase__ : int = node.next
if node == self.head:
break
def __len__( self :Union[str, Any] ) -> int:
"""simple docstring"""
return sum(1 for _ in self )
def __repr__( self :Any ) -> List[Any]:
"""simple docstring"""
return "->".join(str(__UpperCAmelCase ) for item in iter(self ) )
def lowercase_ ( self :List[str] ,__UpperCAmelCase :Any ) -> None:
"""simple docstring"""
self.insert_nth(len(self ) ,__UpperCAmelCase )
def lowercase_ ( self :List[str] ,__UpperCAmelCase :Any ) -> None:
"""simple docstring"""
self.insert_nth(0 ,__UpperCAmelCase )
def lowercase_ ( self :Dict ,__UpperCAmelCase :int ,__UpperCAmelCase :Any ) -> None:
"""simple docstring"""
if index < 0 or index > len(self ):
raise IndexError('''list index out of range.''' )
lowerCamelCase__ : Dict = Node(__UpperCAmelCase )
if self.head is None:
lowerCamelCase__ : Any = new_node # first node points itself
lowerCamelCase__ : Union[str, Any] = new_node
elif index == 0: # insert at head
lowerCamelCase__ : int = self.head
lowerCamelCase__ : Optional[int] = new_node
else:
lowerCamelCase__ : int = self.head
for _ in range(index - 1 ):
lowerCamelCase__ : Optional[Any] = temp.next
lowerCamelCase__ : Union[str, Any] = temp.next
lowerCamelCase__ : int = new_node
if index == len(self ) - 1: # insert at tail
lowerCamelCase__ : Tuple = new_node
def lowercase_ ( self :str ) -> str:
"""simple docstring"""
return self.delete_nth(0 )
def lowercase_ ( self :Optional[int] ) -> Any:
"""simple docstring"""
return self.delete_nth(len(self ) - 1 )
def lowercase_ ( self :Any ,__UpperCAmelCase :int = 0 ) -> Any:
"""simple docstring"""
if not 0 <= index < len(self ):
raise IndexError('''list index out of range.''' )
lowerCamelCase__ : Union[str, Any] = self.head
if self.head == self.tail: # just one node
lowerCamelCase__ : List[str] = None
elif index == 0: # delete head node
lowerCamelCase__ : Tuple = self.tail.next.next
lowerCamelCase__ : Any = self.head.next
else:
lowerCamelCase__ : List[Any] = self.head
for _ in range(index - 1 ):
lowerCamelCase__ : Union[str, Any] = temp.next
lowerCamelCase__ : List[str] = temp.next
lowerCamelCase__ : Any = temp.next.next
if index == len(self ) - 1: # delete at tail
lowerCamelCase__ : List[Any] = temp
return delete_node.data
def lowercase_ ( self :Optional[Any] ) -> bool:
"""simple docstring"""
return len(self ) == 0
def __a ( ):
"""simple docstring"""
lowerCamelCase__ : int = CircularLinkedList()
assert len(_lowercase ) == 0
assert circular_linked_list.is_empty() is True
assert str(_lowercase ) == ""
try:
circular_linked_list.delete_front()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_tail()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_nth(-1 )
raise AssertionError
except IndexError:
assert True
try:
circular_linked_list.delete_nth(0 )
raise AssertionError
except IndexError:
assert True
assert circular_linked_list.is_empty() is True
for i in range(5 ):
assert len(_lowercase ) == i
circular_linked_list.insert_nth(_lowercase , i + 1 )
assert str(_lowercase ) == "->".join(str(_lowercase ) for i in range(1 , 6 ) )
circular_linked_list.insert_tail(6 )
assert str(_lowercase ) == "->".join(str(_lowercase ) for i in range(1 , 7 ) )
circular_linked_list.insert_head(0 )
assert str(_lowercase ) == "->".join(str(_lowercase ) for i in range(0 , 7 ) )
assert circular_linked_list.delete_front() == 0
assert circular_linked_list.delete_tail() == 6
assert str(_lowercase ) == "->".join(str(_lowercase ) for i in range(1 , 6 ) )
assert circular_linked_list.delete_nth(2 ) == 3
circular_linked_list.insert_nth(2 , 3 )
assert str(_lowercase ) == "->".join(str(_lowercase ) for i in range(1 , 6 ) )
assert circular_linked_list.is_empty() is False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 709
|
"""simple docstring"""
import argparse
import json
import os
from collections import OrderedDict
import numpy as np
import tensorflow as tf
import torch
def __a ( _lowercase ):
"""simple docstring"""
lowerCamelCase__ : Any = os.path.join(args.tf_model_dir , '''parameters.json''' )
lowerCamelCase__ : Optional[Any] = json.loads(open(_lowercase ).read() )
if not params:
raise ValueError(
f"""It seems that the json file at {parameter_file} is empty. Make sure you have a correct json file.""" )
if not args.output.endswith('''.pt''' ):
lowerCamelCase__ : Any = args.output + '''.pt'''
lowerCamelCase__ : List[str] = OrderedDict()
with tf.device('''/CPU:0''' ):
lowerCamelCase__ : List[str] = tf.train.load_checkpoint(args.tf_model_dir )
lowerCamelCase__ : Tuple = reader.get_variable_to_shape_map()
for key_name in shapes.keys():
lowerCamelCase__ : Any = reader.get_tensor(_lowercase ).astype(np.floataa )
if key_name.endswith('''/adam_m''' ) or key_name.endswith('''/adam_v''' ):
continue
if key_name.startswith('''pasts/''' ):
if key_name.startswith('''pasts/mlp''' ):
lowerCamelCase__ : Tuple = int(key_name[9] )
elif key_name.startswith('''pasts/out''' ):
lowerCamelCase__ : Union[str, Any] = 8
lowerCamelCase__ : Optional[Any] = '''model.sqout.%d.weight''' % (player * 2) # enter to nn.Sequencial with Tanh, so 2 at a time
lowerCamelCase__ : List[str] = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowerCamelCase__ : List[str] = torch.tensor(_lowercase )
elif key_name.startswith('''model/moe''' ):
lowerCamelCase__ : Dict = int(key_name[9:].split('''/''' )[0] )
if key_name.endswith('''/switch_gating/kernel''' ):
lowerCamelCase__ : Any = '''model.blocks.%d.feed_forward.mlp.router.classifier.weight''' % player
lowerCamelCase__ : int = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowerCamelCase__ : List[Any] = torch.tensor(_lowercase )
elif key_name.endswith('''/softmlp/kernel''' ):
lowerCamelCase__ : Optional[Any] = '''model.blocks.%d.feed_forward.soft_bypass_mlp.weight''' % player
lowerCamelCase__ : List[Any] = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowerCamelCase__ : Dict = torch.tensor(_lowercase )
elif key_name.endswith('''/wo/kernel''' ) or key_name.endswith('''/wi/kernel''' ):
lowerCamelCase__ : Optional[int] = key_name[-9:-7]
for i in range(16 ):
lowerCamelCase__ : str = '''model.blocks.%d.feed_forward.mlp.experts.expert_%d.%s.weight''' % (player, i, nlayer)
lowerCamelCase__ : Union[str, Any] = (
vnp[i].transpose([1, 0] ).copy()
) # In Mesh-Tensorflow, it is one array, so it is divided
lowerCamelCase__ : Optional[int] = torch.tensor(_lowercase )
elif key_name.startswith('''model/mlp''' ):
lowerCamelCase__ : Dict = int(key_name[9:].split('''/''' )[0] )
if key_name.endswith('''/p1/kernel''' ):
lowerCamelCase__ : Dict = '''model.blocks.%d.feed_forward.mlp.wi.weight''' % player
lowerCamelCase__ : Any = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowerCamelCase__ : Tuple = torch.tensor(_lowercase )
elif key_name.endswith('''/p1/bias''' ):
lowerCamelCase__ : Union[str, Any] = '''model.blocks.%d.feed_forward.mlp.wi.bias''' % player
lowerCamelCase__ : Dict = vnp.copy() # same because it is one dimensional
lowerCamelCase__ : Union[str, Any] = torch.tensor(_lowercase )
elif key_name.endswith('''/p2/kernel''' ):
lowerCamelCase__ : Tuple = '''model.blocks.%d.feed_forward.mlp.wo.weight''' % player
lowerCamelCase__ : Union[str, Any] = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowerCamelCase__ : Any = torch.tensor(_lowercase )
elif key_name.endswith('''/p2/bias''' ):
lowerCamelCase__ : int = '''model.blocks.%d.feed_forward.mlp.wo.bias''' % player
lowerCamelCase__ : Dict = vnp.copy() # same because it is one dimensional
lowerCamelCase__ : List[Any] = torch.tensor(_lowercase )
elif key_name.startswith('''model/ln''' ):
lowerCamelCase__ : Tuple = int(key_name[8:].split('''/''' )[0] )
if key_name.endswith('''/b''' ):
lowerCamelCase__ : Union[str, Any] = '''model.blocks.%d.feed_forward.norm.bias''' % player
lowerCamelCase__ : Tuple = vnp.copy() # same because it is one dimensional
lowerCamelCase__ : int = torch.tensor(_lowercase )
elif key_name.endswith('''/g''' ):
lowerCamelCase__ : Tuple = '''model.blocks.%d.feed_forward.norm.weight''' % player
lowerCamelCase__ : Union[str, Any] = vnp.copy() # same because it is one dimensional
lowerCamelCase__ : List[str] = torch.tensor(_lowercase )
elif key_name.startswith('''model/att''' ):
lowerCamelCase__ : str = int(key_name[9:].split('''/''' )[0] )
if key_name.endswith('''/qkv/kernel''' ):
lowerCamelCase__ : List[Any] = vnp.copy() # Compute same dimension as Mesh-tensorflow using einsum
lowerCamelCase__ : Optional[Any] = state[:, 0, :, :]
lowerCamelCase__ : int = state[:, 1, :, :]
lowerCamelCase__ : Optional[int] = state[:, 2, :, :]
lowerCamelCase__ : str = (
state_q.reshape([state_q.shape[0], state_q.shape[1] * state_q.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
lowerCamelCase__ : str = (
state_k.reshape([state_k.shape[0], state_k.shape[1] * state_k.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
lowerCamelCase__ : Tuple = (
state_v.reshape([state_v.shape[0], state_v.shape[1] * state_v.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
lowerCamelCase__ : Optional[Any] = '''model.blocks.%d.self_attn.self_attn.q_proj.weight''' % player
lowerCamelCase__ : Dict = torch.tensor(_lowercase )
lowerCamelCase__ : str = '''model.blocks.%d.self_attn.self_attn.k_proj.weight''' % player
lowerCamelCase__ : Optional[int] = torch.tensor(_lowercase )
lowerCamelCase__ : Union[str, Any] = '''model.blocks.%d.self_attn.self_attn.v_proj.weight''' % player
lowerCamelCase__ : int = torch.tensor(_lowercase )
elif key_name.endswith('''/o/kernel''' ):
lowerCamelCase__ : List[str] = '''model.blocks.%d.self_attn.self_attn.out_proj.weight''' % player
lowerCamelCase__ : Any = (
vnp.reshape([vnp.shape[0] * vnp.shape[1], vnp.shape[2]] ).transpose([1, 0] ).copy()
) # Mesh-Tensorflow is a diagonal matrix
lowerCamelCase__ : Any = torch.tensor(_lowercase )
elif key_name.startswith('''model/an''' ):
lowerCamelCase__ : str = int(key_name[8:].split('''/''' )[0] )
if key_name.endswith('''/b''' ):
lowerCamelCase__ : int = '''model.blocks.%d.self_attn.norm.bias''' % player
lowerCamelCase__ : Tuple = vnp.copy() # same because it is one dimensional
lowerCamelCase__ : Optional[Any] = torch.tensor(_lowercase )
elif key_name.endswith('''/g''' ):
lowerCamelCase__ : int = '''model.blocks.%d.self_attn.norm.weight''' % player
lowerCamelCase__ : Union[str, Any] = vnp.copy() # same because it is one dimensional
lowerCamelCase__ : Any = torch.tensor(_lowercase )
elif (
key_name.startswith('''model/wte''' )
or key_name.startswith('''model/wpe''' )
or key_name.startswith('''model/ete''' )
):
lowerCamelCase__ : Optional[int] = {'''wte''': '''embed_tokens''', '''wpe''': '''position_embeddings''', '''ete''': '''extra_position_embeddings'''}[
key_name[-3:]
]
lowerCamelCase__ : List[Any] = '''model.%s.weight''' % nlayer
lowerCamelCase__ : List[Any] = vnp.copy() # same in embedded
lowerCamelCase__ : Optional[int] = torch.tensor(_lowercase )
if key_name.startswith('''model/wte''' ):
lowerCamelCase__ : str = '''lm_head.weight'''
lowerCamelCase__ : Dict = vnp.copy() # same in embedded
lowerCamelCase__ : List[str] = torch.tensor(_lowercase )
elif key_name.startswith('''model/wob''' ):
lowerCamelCase__ : List[Any] = '''final_logits_bias'''
lowerCamelCase__ : List[str] = vnp.copy() # same in embedded
lowerCamelCase__ : int = state.reshape((1, -1) )
lowerCamelCase__ : Optional[int] = torch.tensor(_lowercase )
elif key_name == "model/dense/kernel":
lowerCamelCase__ : List[Any] = '''model.last_project.weight'''
lowerCamelCase__ : List[Any] = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowerCamelCase__ : Dict = torch.tensor(_lowercase )
elif key_name == "model/dense_1/bias":
lowerCamelCase__ : Dict = '''model.last_project.bias'''
lowerCamelCase__ : Tuple = vnp.copy() # same because it is one dimensional
lowerCamelCase__ : Dict = torch.tensor(_lowercase )
torch.save(_lowercase , args.output )
if __name__ == "__main__":
UpperCAmelCase : Tuple = argparse.ArgumentParser(
description="model converter.", formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument("--tf_model_dir", metavar="PATH", type=str, required=True, help="import model")
parser.add_argument("--output", metavar="PATH", type=str, required=True, help="output model")
UpperCAmelCase : Any = parser.parse_args()
convert_tf_gptsan_to_pt(args)
| 121
| 0
|
'''simple docstring'''
import math
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self : List[Any] , A : Optional[int]=0 ): # a graph with Node 0,1,...,N-1
_UpperCAmelCase : str = n
_UpperCAmelCase : int = [
[math.inf for j in range(0 , A )] for i in range(0 , A )
] # adjacency matrix for weight
_UpperCAmelCase : Any = [
[math.inf for j in range(0 , A )] for i in range(0 , A )
] # dp[i][j] stores minimum distance from i to j
def _A ( self : Optional[int] , A : Optional[int] , A : Union[str, Any] , A : Dict ):
_UpperCAmelCase : Optional[int] = w
def _A ( self : Any ):
for k in range(0 , self.n ):
for i in range(0 , self.n ):
for j in range(0 , self.n ):
_UpperCAmelCase : List[str] = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] )
def _A ( self : List[Any] , A : Any , A : Dict ):
return self.dp[u][v]
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : Optional[Any] = Graph(5)
graph.add_edge(0, 2, 9)
graph.add_edge(0, 4, 10)
graph.add_edge(1, 3, 5)
graph.add_edge(2, 3, 7)
graph.add_edge(3, 0, 10)
graph.add_edge(3, 1, 2)
graph.add_edge(3, 2, 1)
graph.add_edge(3, 4, 6)
graph.add_edge(4, 1, 3)
graph.add_edge(4, 2, 4)
graph.add_edge(4, 3, 9)
graph.floyd_warshall()
graph.show_min(1, 4)
graph.show_min(0, 3)
| 244
|
'''simple docstring'''
def UpperCamelCase_ ( _UpperCAmelCase : int , _UpperCAmelCase : int ) -> int:
"""simple docstring"""
return number | (1 << position)
def UpperCamelCase_ ( _UpperCAmelCase : int , _UpperCAmelCase : int ) -> int:
"""simple docstring"""
return number & ~(1 << position)
def UpperCamelCase_ ( _UpperCAmelCase : int , _UpperCAmelCase : int ) -> int:
"""simple docstring"""
return number ^ (1 << position)
def UpperCamelCase_ ( _UpperCAmelCase : int , _UpperCAmelCase : int ) -> bool:
"""simple docstring"""
return ((number >> position) & 1) == 1
def UpperCamelCase_ ( _UpperCAmelCase : int , _UpperCAmelCase : int ) -> int:
"""simple docstring"""
return int((number & (1 << position)) != 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 244
| 1
|
__lowerCamelCase : dict[str, float] = {
"km/h": 1.0,
"m/s": 3.6,
"mph": 1.609_344,
"knot": 1.852,
}
__lowerCamelCase : dict[str, float] = {
"km/h": 1.0,
"m/s": 0.277_777_778,
"mph": 0.621_371_192,
"knot": 0.539_956_803,
}
def lowerCamelCase_(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> float:
if unit_to not in speed_chart or unit_from not in speed_chart_inverse:
UpperCAmelCase = (
F'Incorrect \'from_type\' or \'to_type\' value: {unit_from!r}, {unit_to!r}\n'
F'Valid values are: {", ".join(lowerCamelCase_ )}'
)
raise ValueError(lowerCamelCase_ )
return round(speed * speed_chart[unit_from] * speed_chart_inverse[unit_to] , 3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 457
|
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
__lowerCamelCase : str = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, "utils"))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If BertLMPredictionHead is changed in modeling_bert.py, this code needs to be manually updated.
__lowerCamelCase : Optional[Any] = " def __init__(self, config):\n super().__init__()\n self.transform = BertPredictionHeadTransform(config)\n\n # The output weights are the same as the input embeddings, but there is\n # an output-only bias for each token.\n self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)\n\n self.bias = nn.Parameter(torch.zeros(config.vocab_size))\n\n # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`\n self.decoder.bias = self.bias\n\n def forward(self, hidden_states):\n hidden_states = self.transform(hidden_states)\n hidden_states = self.decoder(hidden_states)\n return hidden_states\n"
class __magic_name__ ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase = tempfile.mkdtemp()
os.makedirs(os.path.join(self.transformer_dir , "models/bert/" ) )
UpperCAmelCase = self.transformer_dir
shutil.copy(
os.path.join(UpperCamelCase__ , "src/transformers/models/bert/modeling_bert.py" ) , os.path.join(self.transformer_dir , "models/bert/modeling_bert.py" ) , )
def SCREAMING_SNAKE_CASE_ ( self : int ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase = "src/transformers"
shutil.rmtree(self.transformer_dir )
def SCREAMING_SNAKE_CASE_ ( self : Tuple , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Union[str, Any]=None ) -> str:
'''simple docstring'''
UpperCAmelCase = comment + F'\nclass {class_name}(nn.Module):\n' + class_code
if overwrite_result is not None:
UpperCAmelCase = comment + F'\nclass {class_name}(nn.Module):\n' + overwrite_result
UpperCAmelCase = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=1_19 )
UpperCAmelCase = black.format_str(UpperCamelCase__ , mode=UpperCamelCase__ )
UpperCAmelCase = os.path.join(self.transformer_dir , "new_code.py" )
with open(UpperCamelCase__ , "w" , newline="\n" ) as f:
f.write(UpperCamelCase__ )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(UpperCamelCase__ ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=UpperCamelCase__ )
with open(UpperCamelCase__ , "r" ) as f:
self.assertTrue(f.read() , UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase = check_copies.find_code_in_transformers("models.bert.modeling_bert.BertLMPredictionHead" )
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ) -> Dict:
'''simple docstring'''
self.check_copy_consistency(
"# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead" , "BertLMPredictionHead" , REFERENCE_CODE + "\n" , )
# With no empty line at the end
self.check_copy_consistency(
"# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead" , "BertLMPredictionHead" , UpperCamelCase__ , )
# Copy consistency with rename
self.check_copy_consistency(
"# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel" , "TestModelLMPredictionHead" , re.sub("Bert" , "TestModel" , UpperCamelCase__ ) , )
# Copy consistency with a really long name
UpperCAmelCase = "TestModelWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason"
self.check_copy_consistency(
F'# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->{long_class_name}' , F'{long_class_name}LMPredictionHead' , re.sub("Bert" , UpperCamelCase__ , UpperCamelCase__ ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
"# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel" , "TestModelLMPredictionHead" , UpperCamelCase__ , overwrite_result=re.sub("Bert" , "TestModel" , UpperCamelCase__ ) , )
def SCREAMING_SNAKE_CASE_ ( self : int ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase = check_copies.LOCALIZED_READMES["README_zh-hans.md"]
UpperCAmelCase = (
"1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the"
" Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for"
" Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong"
" Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.\n1."
" **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (from HuggingFace),"
" released together with the paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and"
" lighter](https://arxiv.org/abs/1910.01108) by Victor Sanh, Lysandre Debut and Thomas Wolf. The same"
" method has been applied to compress GPT2 into"
" [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into"
" [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),"
" Multilingual BERT into"
" [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German"
" version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)**"
" (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders"
" as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang"
" Luong, Quoc V. Le, Christopher D. Manning."
)
UpperCAmelCase = (
"1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"
" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"
" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"
" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"
)
UpperCAmelCase = (
"1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"
" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"
" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"
" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n1."
" **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (来自 HuggingFace) 伴随论文"
" [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and"
" lighter](https://arxiv.org/abs/1910.01108) 由 Victor Sanh, Lysandre Debut and Thomas Wolf 发布。 The same"
" method has been applied to compress GPT2 into"
" [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into"
" [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),"
" Multilingual BERT into"
" [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German"
" version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)** (来自"
" Google Research/Stanford University) 伴随论文 [ELECTRA: Pre-training text encoders as discriminators rather"
" than generators](https://arxiv.org/abs/2003.10555) 由 Kevin Clark, Minh-Thang Luong, Quoc V. Le,"
" Christopher D. Manning 发布。\n"
)
UpperCAmelCase , UpperCAmelCase = check_copies.convert_to_localized_md(
UpperCamelCase__ , UpperCamelCase__ , localized_readme["format_model_list"] )
self.assertFalse(UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
UpperCAmelCase , UpperCAmelCase = check_copies.convert_to_localized_md(
UpperCamelCase__ , UpperCamelCase__ , localized_readme["format_model_list"] )
# Check whether the number of models is equal to README.md after conversion.
self.assertTrue(UpperCamelCase__ )
UpperCAmelCase = (
"1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the"
" Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for"
" Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong"
" Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut."
)
UpperCAmelCase = (
"1. **[ALBERT](https://huggingface.co/transformers/main/model_doc/albert.html)** (来自 Google Research and"
" the Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"
" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"
" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"
)
UpperCAmelCase = (
"1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"
" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"
" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"
" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"
)
UpperCAmelCase , UpperCAmelCase = check_copies.convert_to_localized_md(
UpperCamelCase__ , UpperCamelCase__ , localized_readme["format_model_list"] )
# Check if the model link is synchronized.
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
| 457
| 1
|
import argparse
import json
import os
import time
import zipfile
from get_ci_error_statistics import download_artifact, get_artifacts_links
from transformers import logging
_lowerCamelCase = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Optional[Any] , __UpperCamelCase : Optional[Any] ) -> Optional[Any]:
UpperCAmelCase_ = set()
UpperCAmelCase_ = []
def parse_line(__UpperCamelCase : str ):
for line in fp:
if isinstance(__UpperCamelCase , __UpperCamelCase ):
UpperCAmelCase_ = line.decode('''UTF-8''' )
if "warnings summary (final)" in line:
continue
# This means we are outside the body of a warning
elif not line.startswith(''' ''' ):
# process a single warning and move it to `selected_warnings`.
if len(__UpperCamelCase ) > 0:
UpperCAmelCase_ = '''\n'''.join(__UpperCamelCase )
# Only keep the warnings specified in `targets`
if any(f': {x}: ' in warning for x in targets ):
selected_warnings.add(__UpperCamelCase )
buffer.clear()
continue
else:
UpperCAmelCase_ = line.strip()
buffer.append(__UpperCamelCase )
if from_gh:
for filename in os.listdir(__UpperCamelCase ):
UpperCAmelCase_ = os.path.join(__UpperCamelCase , __UpperCamelCase )
if not os.path.isdir(__UpperCamelCase ):
# read the file
if filename != "warnings.txt":
continue
with open(__UpperCamelCase ) as fp:
parse_line(__UpperCamelCase )
else:
try:
with zipfile.ZipFile(__UpperCamelCase ) as z:
for filename in z.namelist():
if not os.path.isdir(__UpperCamelCase ):
# read the file
if filename != "warnings.txt":
continue
with z.open(__UpperCamelCase ) as fp:
parse_line(__UpperCamelCase )
except Exception:
logger.warning(
f'{artifact_path} is either an invalid zip file or something else wrong. This file is skipped.' )
return selected_warnings
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Optional[int] , __UpperCamelCase : str ) -> Any:
UpperCAmelCase_ = set()
UpperCAmelCase_ = [os.path.join(__UpperCamelCase , __UpperCamelCase ) for p in os.listdir(__UpperCamelCase ) if (p.endswith('''.zip''' ) or from_gh)]
for p in paths:
selected_warnings.update(extract_warnings_from_single_artifact(__UpperCamelCase , __UpperCamelCase ) )
return selected_warnings
if __name__ == "__main__":
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[str] ) -> List[Any]:
return values.split(''',''' )
_lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--workflow_run_id', type=str, required=True, help='A GitHub Actions workflow run id.')
parser.add_argument(
'--output_dir',
type=str,
required=True,
help='Where to store the downloaded artifacts and other result files.',
)
parser.add_argument('--token', default=None, type=str, help='A token that has actions:read permission.')
# optional parameters
parser.add_argument(
'--targets',
default='DeprecationWarning,UserWarning,FutureWarning',
type=list_str,
help='Comma-separated list of target warning(s) which we want to extract.',
)
parser.add_argument(
'--from_gh',
action='store_true',
help='If running from a GitHub action workflow and collecting warnings from its artifacts.',
)
_lowerCamelCase = parser.parse_args()
_lowerCamelCase = args.from_gh
if from_gh:
# The artifacts have to be downloaded using `actions/download-artifact@v3`
pass
else:
os.makedirs(args.output_dir, exist_ok=True)
# get download links
_lowerCamelCase = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, 'artifacts.json'), 'w', encoding='UTF-8') as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
# download artifacts
for idx, (name, url) in enumerate(artifacts.items()):
print(name)
print(url)
print('=' * 80)
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
# extract warnings from artifacts
_lowerCamelCase = extract_warnings(args.output_dir, args.targets)
_lowerCamelCase = sorted(selected_warnings)
with open(os.path.join(args.output_dir, 'selected_warnings.json'), 'w', encoding='UTF-8') as fp:
json.dump(selected_warnings, fp, ensure_ascii=False, indent=4)
| 144
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
'''google/fnet-base''': '''https://huggingface.co/google/fnet-base/resolve/main/config.json''',
'''google/fnet-large''': '''https://huggingface.co/google/fnet-large/resolve/main/config.json'''
# See all FNet models at https://huggingface.co/models?filter=fnet
}
class A__ ( __SCREAMING_SNAKE_CASE ):
lowerCamelCase__ : Optional[Any] ="fnet"
def __init__( self , lowerCamelCase=32000 , lowerCamelCase=768 , lowerCamelCase=12 , lowerCamelCase=3072 , lowerCamelCase="gelu_new" , lowerCamelCase=0.1 , lowerCamelCase=512 , lowerCamelCase=4 , lowerCamelCase=0.0_2 , lowerCamelCase=1e-12 , lowerCamelCase=False , lowerCamelCase=512 , lowerCamelCase=3 , lowerCamelCase=1 , lowerCamelCase=2 , **lowerCamelCase , ) -> int:
"""simple docstring"""
super().__init__(pad_token_id=lowerCamelCase , bos_token_id=lowerCamelCase , eos_token_id=lowerCamelCase , **lowerCamelCase )
__magic_name__ : Tuple = vocab_size
__magic_name__ : Dict = max_position_embeddings
__magic_name__ : Any = hidden_size
__magic_name__ : Any = num_hidden_layers
__magic_name__ : Union[str, Any] = intermediate_size
__magic_name__ : int = hidden_act
__magic_name__ : Tuple = hidden_dropout_prob
__magic_name__ : Optional[int] = initializer_range
__magic_name__ : Optional[int] = type_vocab_size
__magic_name__ : List[str] = layer_norm_eps
__magic_name__ : List[Any] = use_tpu_fourier_optimizations
__magic_name__ : List[str] = tpu_short_seq_length
| 154
| 0
|
'''simple docstring'''
from __future__ import annotations
import bisect
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = 0 , __UpperCAmelCase = -1 ) -> int:
'''simple docstring'''
if hi < 0:
__SCREAMING_SNAKE_CASE = len(__UpperCAmelCase )
while lo < hi:
__SCREAMING_SNAKE_CASE = lo + (hi - lo) // 2
if sorted_collection[mid] < item:
__SCREAMING_SNAKE_CASE = mid + 1
else:
__SCREAMING_SNAKE_CASE = mid
return lo
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = 0 , __UpperCAmelCase = -1 ) -> int:
'''simple docstring'''
if hi < 0:
__SCREAMING_SNAKE_CASE = len(__UpperCAmelCase )
while lo < hi:
__SCREAMING_SNAKE_CASE = lo + (hi - lo) // 2
if sorted_collection[mid] <= item:
__SCREAMING_SNAKE_CASE = mid + 1
else:
__SCREAMING_SNAKE_CASE = mid
return lo
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = 0 , __UpperCAmelCase = -1 ) -> None:
'''simple docstring'''
sorted_collection.insert(bisect_left(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) , __UpperCAmelCase )
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = 0 , __UpperCAmelCase = -1 ) -> None:
'''simple docstring'''
sorted_collection.insert(bisect_right(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) , __UpperCAmelCase )
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase ) -> int | None:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = len(__UpperCAmelCase ) - 1
while left <= right:
__SCREAMING_SNAKE_CASE = left + (right - left) // 2
__SCREAMING_SNAKE_CASE = sorted_collection[midpoint]
if current_item == item:
return midpoint
elif item < current_item:
__SCREAMING_SNAKE_CASE = midpoint - 1
else:
__SCREAMING_SNAKE_CASE = midpoint + 1
return None
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase ) -> int | None:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = bisect.bisect_left(__UpperCAmelCase , __UpperCAmelCase )
if index != len(__UpperCAmelCase ) and sorted_collection[index] == item:
return index
return None
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> int | None:
'''simple docstring'''
if right < left:
return None
__SCREAMING_SNAKE_CASE = left + (right - left) // 2
if sorted_collection[midpoint] == item:
return midpoint
elif sorted_collection[midpoint] > item:
return binary_search_by_recursion(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , midpoint - 1 )
else:
return binary_search_by_recursion(__UpperCAmelCase , __UpperCAmelCase , midpoint + 1 , __UpperCAmelCase )
if __name__ == "__main__":
a = input("Enter numbers separated by comma:\n").strip()
a = sorted(int(item) for item in user_input.split(","))
a = int(input("Enter a single number to be found in the list:\n"))
a = binary_search(collection, target)
if result is None:
print(F'''{target} was not found in {collection}.''')
else:
print(F'''{target} was found at position {result} in {collection}.''')
| 13
|
'''simple docstring'''
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
a = [
"python",
"tqdm",
"regex",
"requests",
"packaging",
"filelock",
"numpy",
"tokenizers",
"huggingface-hub",
"safetensors",
"accelerate",
"pyyaml",
]
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
elif pkg == "accelerate":
# must be loaded here, or else tqdm check may fail
from .utils import is_accelerate_available
# Maybe switch to is_torch_available in the future here so that Accelerate is hard dep of
# Transformers with PyTorch
if not is_accelerate_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(F'''can\'t find {pkg} in {deps.keys()}, check dependency_versions_table.py''')
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase=None ) -> Optional[Any]:
'''simple docstring'''
require_version(deps[pkg] , __UpperCAmelCase )
| 13
| 1
|
from argparse import ArgumentParser
from .add_new_model import AddNewModelCommand
from .add_new_model_like import AddNewModelLikeCommand
from .convert import ConvertCommand
from .download import DownloadCommand
from .env import EnvironmentCommand
from .lfs import LfsCommands
from .pt_to_tf import PTtoTFCommand
from .run import RunCommand
from .serving import ServeCommand
from .user import UserCommands
def __UpperCamelCase ( ):
"""simple docstring"""
UpperCAmelCase = ArgumentParser("Transformers CLI tool" , usage="transformers-cli <command> [<args>]" )
UpperCAmelCase = parser.add_subparsers(help="transformers-cli command helpers" )
# Register commands
ConvertCommand.register_subcommand(_lowerCAmelCase )
DownloadCommand.register_subcommand(_lowerCAmelCase )
EnvironmentCommand.register_subcommand(_lowerCAmelCase )
RunCommand.register_subcommand(_lowerCAmelCase )
ServeCommand.register_subcommand(_lowerCAmelCase )
UserCommands.register_subcommand(_lowerCAmelCase )
AddNewModelCommand.register_subcommand(_lowerCAmelCase )
AddNewModelLikeCommand.register_subcommand(_lowerCAmelCase )
LfsCommands.register_subcommand(_lowerCAmelCase )
PTtoTFCommand.register_subcommand(_lowerCAmelCase )
# Let's go
UpperCAmelCase = parser.parse_args()
if not hasattr(_lowerCAmelCase , "func" ):
parser.print_help()
exit(1 )
# Run
UpperCAmelCase = args.func(_lowerCAmelCase )
service.run()
if __name__ == "__main__":
main()
| 333
|
from __future__ import annotations
import unittest
import numpy as np
from transformers import LayoutLMConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.layoutlm.modeling_tf_layoutlm import (
TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLayoutLMForMaskedLM,
TFLayoutLMForQuestionAnswering,
TFLayoutLMForSequenceClassification,
TFLayoutLMForTokenClassification,
TFLayoutLMModel,
)
class __magic_name__ :
def __init__( self : str ,__SCREAMING_SNAKE_CASE : Union[str, Any] ,__SCREAMING_SNAKE_CASE : str=1_3 ,__SCREAMING_SNAKE_CASE : Optional[Any]=7 ,__SCREAMING_SNAKE_CASE : Optional[Any]=True ,__SCREAMING_SNAKE_CASE : List[str]=True ,__SCREAMING_SNAKE_CASE : int=True ,__SCREAMING_SNAKE_CASE : int=True ,__SCREAMING_SNAKE_CASE : Tuple=9_9 ,__SCREAMING_SNAKE_CASE : str=3_2 ,__SCREAMING_SNAKE_CASE : Any=2 ,__SCREAMING_SNAKE_CASE : Union[str, Any]=4 ,__SCREAMING_SNAKE_CASE : Tuple=3_7 ,__SCREAMING_SNAKE_CASE : List[str]="gelu" ,__SCREAMING_SNAKE_CASE : List[Any]=0.1 ,__SCREAMING_SNAKE_CASE : Optional[int]=0.1 ,__SCREAMING_SNAKE_CASE : Tuple=5_1_2 ,__SCREAMING_SNAKE_CASE : Dict=1_6 ,__SCREAMING_SNAKE_CASE : Tuple=2 ,__SCREAMING_SNAKE_CASE : List[str]=0.02 ,__SCREAMING_SNAKE_CASE : Optional[Any]=3 ,__SCREAMING_SNAKE_CASE : Dict=4 ,__SCREAMING_SNAKE_CASE : Union[str, Any]=None ,__SCREAMING_SNAKE_CASE : Dict=1_0_0_0 ,):
UpperCAmelCase = parent
UpperCAmelCase = batch_size
UpperCAmelCase = seq_length
UpperCAmelCase = is_training
UpperCAmelCase = use_input_mask
UpperCAmelCase = use_token_type_ids
UpperCAmelCase = use_labels
UpperCAmelCase = vocab_size
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_act
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = type_vocab_size
UpperCAmelCase = type_sequence_label_size
UpperCAmelCase = initializer_range
UpperCAmelCase = num_labels
UpperCAmelCase = num_choices
UpperCAmelCase = scope
UpperCAmelCase = range_bbox
def _UpperCAmelCase ( self : Dict ):
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
# convert bbox to numpy since TF does not support item assignment
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length, 4] ,self.range_bbox ).numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
UpperCAmelCase = bbox[i, j, 3]
UpperCAmelCase = bbox[i, j, 1]
UpperCAmelCase = t
if bbox[i, j, 2] < bbox[i, j, 0]:
UpperCAmelCase = bbox[i, j, 2]
UpperCAmelCase = bbox[i, j, 0]
UpperCAmelCase = t
UpperCAmelCase = tf.convert_to_tensor(__SCREAMING_SNAKE_CASE )
UpperCAmelCase = None
if self.use_input_mask:
UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase = None
if self.use_token_type_ids:
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
UpperCAmelCase = None
UpperCAmelCase = None
UpperCAmelCase = None
if self.use_labels:
UpperCAmelCase = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
UpperCAmelCase = ids_tensor([self.batch_size] ,self.num_choices )
UpperCAmelCase = LayoutLMConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,initializer_range=self.initializer_range ,)
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _UpperCAmelCase ( self : Dict ,__SCREAMING_SNAKE_CASE : List[str] ,__SCREAMING_SNAKE_CASE : Optional[Any] ,__SCREAMING_SNAKE_CASE : Dict ,__SCREAMING_SNAKE_CASE : Union[str, Any] ,__SCREAMING_SNAKE_CASE : int ,__SCREAMING_SNAKE_CASE : int ,__SCREAMING_SNAKE_CASE : Optional[Any] ,__SCREAMING_SNAKE_CASE : str ):
UpperCAmelCase = TFLayoutLMModel(config=__SCREAMING_SNAKE_CASE )
UpperCAmelCase = model(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,attention_mask=__SCREAMING_SNAKE_CASE ,token_type_ids=__SCREAMING_SNAKE_CASE )
UpperCAmelCase = model(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,token_type_ids=__SCREAMING_SNAKE_CASE )
UpperCAmelCase = model(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape ,(self.batch_size, self.hidden_size) )
def _UpperCAmelCase ( self : Dict ,__SCREAMING_SNAKE_CASE : Tuple ,__SCREAMING_SNAKE_CASE : Tuple ,__SCREAMING_SNAKE_CASE : List[str] ,__SCREAMING_SNAKE_CASE : str ,__SCREAMING_SNAKE_CASE : Union[str, Any] ,__SCREAMING_SNAKE_CASE : Any ,__SCREAMING_SNAKE_CASE : List[Any] ,__SCREAMING_SNAKE_CASE : List[Any] ):
UpperCAmelCase = TFLayoutLMForMaskedLM(config=__SCREAMING_SNAKE_CASE )
UpperCAmelCase = model(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,attention_mask=__SCREAMING_SNAKE_CASE ,token_type_ids=__SCREAMING_SNAKE_CASE ,labels=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def _UpperCAmelCase ( self : Union[str, Any] ,__SCREAMING_SNAKE_CASE : str ,__SCREAMING_SNAKE_CASE : List[Any] ,__SCREAMING_SNAKE_CASE : int ,__SCREAMING_SNAKE_CASE : Tuple ,__SCREAMING_SNAKE_CASE : List[str] ,__SCREAMING_SNAKE_CASE : Optional[int] ,__SCREAMING_SNAKE_CASE : Union[str, Any] ,__SCREAMING_SNAKE_CASE : Union[str, Any] ):
UpperCAmelCase = self.num_labels
UpperCAmelCase = TFLayoutLMForSequenceClassification(config=__SCREAMING_SNAKE_CASE )
UpperCAmelCase = model(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,attention_mask=__SCREAMING_SNAKE_CASE ,token_type_ids=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def _UpperCAmelCase ( self : List[str] ,__SCREAMING_SNAKE_CASE : List[str] ,__SCREAMING_SNAKE_CASE : int ,__SCREAMING_SNAKE_CASE : List[str] ,__SCREAMING_SNAKE_CASE : int ,__SCREAMING_SNAKE_CASE : Dict ,__SCREAMING_SNAKE_CASE : Dict ,__SCREAMING_SNAKE_CASE : str ,__SCREAMING_SNAKE_CASE : Union[str, Any] ):
UpperCAmelCase = self.num_labels
UpperCAmelCase = TFLayoutLMForTokenClassification(config=__SCREAMING_SNAKE_CASE )
UpperCAmelCase = model(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,attention_mask=__SCREAMING_SNAKE_CASE ,token_type_ids=__SCREAMING_SNAKE_CASE ,labels=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def _UpperCAmelCase ( self : Optional[int] ,__SCREAMING_SNAKE_CASE : Union[str, Any] ,__SCREAMING_SNAKE_CASE : Optional[int] ,__SCREAMING_SNAKE_CASE : Optional[Any] ,__SCREAMING_SNAKE_CASE : Dict ,__SCREAMING_SNAKE_CASE : int ,__SCREAMING_SNAKE_CASE : Optional[Any] ,__SCREAMING_SNAKE_CASE : List[str] ,__SCREAMING_SNAKE_CASE : str ):
UpperCAmelCase = TFLayoutLMForQuestionAnswering(config=__SCREAMING_SNAKE_CASE )
UpperCAmelCase = model(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,attention_mask=__SCREAMING_SNAKE_CASE ,token_type_ids=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def _UpperCAmelCase ( self : List[Any] ):
UpperCAmelCase = self.prepare_config_and_inputs()
(
(
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) ,
) = config_and_inputs
UpperCAmelCase = {
"input_ids": input_ids,
"bbox": bbox,
"token_type_ids": token_type_ids,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_tf
class __magic_name__ ( _a , _a , unittest.TestCase):
_UpperCAmelCase : Optional[int] = (
(
TFLayoutLMModel,
TFLayoutLMForMaskedLM,
TFLayoutLMForTokenClassification,
TFLayoutLMForSequenceClassification,
TFLayoutLMForQuestionAnswering,
)
if is_tf_available()
else ()
)
_UpperCAmelCase : str = (
{
'feature-extraction': TFLayoutLMModel,
'fill-mask': TFLayoutLMForMaskedLM,
'text-classification': TFLayoutLMForSequenceClassification,
'token-classification': TFLayoutLMForTokenClassification,
'zero-shot': TFLayoutLMForSequenceClassification,
}
if is_tf_available()
else {}
)
_UpperCAmelCase : Tuple = False
_UpperCAmelCase : int = True
_UpperCAmelCase : Union[str, Any] = 10
def _UpperCAmelCase ( self : Tuple ):
UpperCAmelCase = TFLayoutLMModelTester(self )
UpperCAmelCase = ConfigTester(self ,config_class=__SCREAMING_SNAKE_CASE ,hidden_size=3_7 )
def _UpperCAmelCase ( self : List[str] ):
self.config_tester.run_common_tests()
def _UpperCAmelCase ( self : List[str] ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE )
def _UpperCAmelCase ( self : Dict ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__SCREAMING_SNAKE_CASE )
def _UpperCAmelCase ( self : Dict ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__SCREAMING_SNAKE_CASE )
def _UpperCAmelCase ( self : Any ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__SCREAMING_SNAKE_CASE )
def _UpperCAmelCase ( self : List[str] ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__SCREAMING_SNAKE_CASE )
@slow
def _UpperCAmelCase ( self : List[str] ):
for model_name in TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase = TFLayoutLMModel.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
@unittest.skip("Onnx compliancy broke with TF 2.10" )
def _UpperCAmelCase ( self : List[str] ):
pass
def __UpperCamelCase ( ):
"""simple docstring"""
UpperCAmelCase = tf.convert_to_tensor([[1_01,10_19,10_14,10_16,10_37,1_28_49,47_47,10_04,1_42_46,22_78,54_39,45_24,50_02,29_30,21_93,29_30,43_41,32_08,10_05,10_55,21_71,28_48,1_13_00,35_31,1_02],[1_01,40_70,40_34,70_20,10_24,30_58,10_15,10_13,28_61,10_13,60_70,1_92_74,27_72,62_05,2_78_14,1_61_47,1_61_47,43_43,20_47,1_02_83,1_09_69,1_43_89,10_12,23_38,1_02]] ) # noqa: E231
UpperCAmelCase = tf.convert_to_tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],] ) # noqa: E231
UpperCAmelCase = tf.convert_to_tensor([[[0,0,0,0],[4_23,2_37,4_40,2_51],[4_27,2_72,4_41,2_87],[4_19,1_15,4_37,1_29],[9_61,8_85,9_92,9_12],[2_56,38,3_30,58],[2_56,38,3_30,58],[3_36,42,3_53,57],[3_60,39,4_01,56],[3_60,39,4_01,56],[4_11,39,4_71,59],[4_79,41,5_28,59],[5_33,39,6_30,60],[67,1_13,1_34,1_31],[1_41,1_15,2_09,1_32],[68,1_49,1_33,1_66],[1_41,1_49,1_87,1_64],[1_95,1_48,2_87,1_65],[1_95,1_48,2_87,1_65],[1_95,1_48,2_87,1_65],[2_95,1_48,3_49,1_65],[4_41,1_49,4_92,1_66],[4_97,1_49,5_46,1_64],[64,2_01,1_25,2_18],[10_00,10_00,10_00,10_00]],[[0,0,0,0],[6_62,1_50,7_54,1_66],[6_65,1_99,7_42,2_11],[5_19,2_13,5_54,2_28],[5_19,2_13,5_54,2_28],[1_34,4_33,1_87,4_54],[1_30,4_67,2_04,4_80],[1_30,4_67,2_04,4_80],[1_30,4_67,2_04,4_80],[1_30,4_67,2_04,4_80],[1_30,4_67,2_04,4_80],[3_14,4_69,3_76,4_82],[5_04,6_84,5_82,7_06],[9_41,8_25,9_73,9_00],[9_41,8_25,9_73,9_00],[9_41,8_25,9_73,9_00],[9_41,8_25,9_73,9_00],[6_10,7_49,6_52,7_65],[1_30,6_59,1_68,6_72],[1_76,6_57,2_37,6_72],[2_38,6_57,3_12,6_72],[4_43,6_53,6_28,6_72],[4_43,6_53,6_28,6_72],[7_16,3_01,8_25,3_17],[10_00,10_00,10_00,10_00]]] ) # noqa: E231
UpperCAmelCase = tf.convert_to_tensor([[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]] ) # noqa: E231
# these are sequence labels (i.e. at the token level)
UpperCAmelCase = tf.convert_to_tensor([[-1_00,10,10,10,9,1,-1_00,7,7,-1_00,7,7,4,2,5,2,8,8,-1_00,-1_00,5,0,3,2,-1_00],[-1_00,12,12,12,-1_00,12,10,-1_00,-1_00,-1_00,-1_00,10,12,9,-1_00,-1_00,-1_00,10,10,10,9,12,-1_00,10,-1_00]] ) # noqa: E231
# fmt: on
return input_ids, attention_mask, bbox, token_type_ids, labels
@require_tf
class __magic_name__ ( unittest.TestCase):
@slow
def _UpperCAmelCase ( self : Any ):
UpperCAmelCase = TFLayoutLMModel.from_pretrained("microsoft/layoutlm-base-uncased" )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = prepare_layoutlm_batch_inputs()
# forward pass
UpperCAmelCase = model(input_ids=__SCREAMING_SNAKE_CASE ,bbox=__SCREAMING_SNAKE_CASE ,attention_mask=__SCREAMING_SNAKE_CASE ,token_type_ids=__SCREAMING_SNAKE_CASE )
# test the sequence output on [0, :3, :3]
UpperCAmelCase = tf.convert_to_tensor(
[[0.1785, -0.1947, -0.0425], [-0.3254, -0.2807, 0.2553], [-0.5391, -0.3322, 0.3364]] ,)
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] ,__SCREAMING_SNAKE_CASE ,atol=1e-3 ) )
# test the pooled output on [1, :3]
UpperCAmelCase = tf.convert_to_tensor([-0.6580, -0.0214, 0.8552] )
self.assertTrue(np.allclose(outputs.pooler_output[1, :3] ,__SCREAMING_SNAKE_CASE ,atol=1e-3 ) )
@slow
def _UpperCAmelCase ( self : Union[str, Any] ):
# initialize model with randomly initialized sequence classification head
UpperCAmelCase = TFLayoutLMForSequenceClassification.from_pretrained("microsoft/layoutlm-base-uncased" ,num_labels=2 )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = prepare_layoutlm_batch_inputs()
# forward pass
UpperCAmelCase = model(
input_ids=__SCREAMING_SNAKE_CASE ,bbox=__SCREAMING_SNAKE_CASE ,attention_mask=__SCREAMING_SNAKE_CASE ,token_type_ids=__SCREAMING_SNAKE_CASE ,labels=tf.convert_to_tensor([1, 1] ) ,)
# test whether we get a loss as a scalar
UpperCAmelCase = outputs.loss
UpperCAmelCase = (2,)
self.assertEqual(loss.shape ,__SCREAMING_SNAKE_CASE )
# test the shape of the logits
UpperCAmelCase = outputs.logits
UpperCAmelCase = (2, 2)
self.assertEqual(logits.shape ,__SCREAMING_SNAKE_CASE )
@slow
def _UpperCAmelCase ( self : Tuple ):
# initialize model with randomly initialized token classification head
UpperCAmelCase = TFLayoutLMForTokenClassification.from_pretrained("microsoft/layoutlm-base-uncased" ,num_labels=1_3 )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = prepare_layoutlm_batch_inputs()
# forward pass
UpperCAmelCase = model(
input_ids=__SCREAMING_SNAKE_CASE ,bbox=__SCREAMING_SNAKE_CASE ,attention_mask=__SCREAMING_SNAKE_CASE ,token_type_ids=__SCREAMING_SNAKE_CASE ,labels=__SCREAMING_SNAKE_CASE )
# test the shape of the logits
UpperCAmelCase = outputs.logits
UpperCAmelCase = tf.convert_to_tensor((2, 2_5, 1_3) )
self.assertEqual(logits.shape ,__SCREAMING_SNAKE_CASE )
@slow
def _UpperCAmelCase ( self : List[Any] ):
# initialize model with randomly initialized token classification head
UpperCAmelCase = TFLayoutLMForQuestionAnswering.from_pretrained("microsoft/layoutlm-base-uncased" )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = prepare_layoutlm_batch_inputs()
# forward pass
UpperCAmelCase = model(input_ids=__SCREAMING_SNAKE_CASE ,bbox=__SCREAMING_SNAKE_CASE ,attention_mask=__SCREAMING_SNAKE_CASE ,token_type_ids=__SCREAMING_SNAKE_CASE )
# test the shape of the logits
UpperCAmelCase = tf.convert_to_tensor((2, 2_5) )
self.assertEqual(outputs.start_logits.shape ,__SCREAMING_SNAKE_CASE )
self.assertEqual(outputs.end_logits.shape ,__SCREAMING_SNAKE_CASE )
| 333
| 1
|
"""simple docstring"""
import os
def snake_case ( _a: Tuple )-> List[str]:
'''simple docstring'''
lowerCamelCase__ = len(grid[0] )
lowerCamelCase__ = len(_a )
lowerCamelCase__ = 0
lowerCamelCase__ = 0
lowerCamelCase__ = 0
# Check vertically, horizontally, diagonally at the same time (only works
# for nxn grid)
for i in range(_a ):
for j in range(n_rows - 3 ):
lowerCamelCase__ = grid[j][i] * grid[j + 1][i] * grid[j + 2][i] * grid[j + 3][i]
lowerCamelCase__ = grid[i][j] * grid[i][j + 1] * grid[i][j + 2] * grid[i][j + 3]
# Left-to-right diagonal (\) product
if i < n_columns - 3:
lowerCamelCase__ = (
grid[i][j]
* grid[i + 1][j + 1]
* grid[i + 2][j + 2]
* grid[i + 3][j + 3]
)
# Right-to-left diagonal(/) product
if i > 2:
lowerCamelCase__ = (
grid[i][j]
* grid[i - 1][j + 1]
* grid[i - 2][j + 2]
* grid[i - 3][j + 3]
)
lowerCamelCase__ = max(
_a , _a , _a , _a )
if max_product > largest:
lowerCamelCase__ = max_product
return largest
def snake_case ( )-> Optional[int]:
'''simple docstring'''
lowerCamelCase__ = []
with open(os.path.dirname(_a ) + '/grid.txt' ) as file:
for line in file:
grid.append(line.strip('\n' ).split(' ' ) )
lowerCamelCase__ = [[int(_a ) for i in grid[j]] for j in range(len(_a ) )]
return largest_product(_a )
if __name__ == "__main__":
print(solution())
| 659
|
"""simple docstring"""
def snake_case ( _a: int , _a: list[int] , _a: int )-> int:
'''simple docstring'''
def count_of_possible_combinations(_a: int ) -> int:
if target < 0:
return 0
if target == 0:
return 1
return sum(count_of_possible_combinations(target - item ) for item in array )
return count_of_possible_combinations(_a )
def snake_case ( _a: int , _a: list[int] , _a: int )-> int:
'''simple docstring'''
def count_of_possible_combinations_with_dp_array(
_a: int , _a: list[int] ) -> int:
if target < 0:
return 0
if target == 0:
return 1
if dp_array[target] != -1:
return dp_array[target]
lowerCamelCase__ = sum(
count_of_possible_combinations_with_dp_array(target - item , _a )
for item in array )
lowerCamelCase__ = answer
return answer
lowerCamelCase__ = [-1] * (target + 1)
return count_of_possible_combinations_with_dp_array(_a , _a )
def snake_case ( _a: int , _a: list[int] , _a: int )-> int:
'''simple docstring'''
lowerCamelCase__ = [0] * (target + 1)
lowerCamelCase__ = 1
for i in range(1 , target + 1 ):
for j in range(_a ):
if i - array[j] >= 0:
dp_array[i] += dp_array[i - array[j]]
return dp_array[target]
if __name__ == "__main__":
import doctest
doctest.testmod()
_snake_case = 3
_snake_case = 5
_snake_case = [1, 2, 5]
print(combination_sum_iv(n, array, target))
| 659
| 1
|
import unittest
from accelerate import debug_launcher
from accelerate.test_utils import require_cpu, test_ops, test_script
@require_cpu
class _lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
debug_launcher(test_script.main )
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
debug_launcher(test_ops.main )
| 411
|
"""simple docstring"""
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_torch_available
from transformers.testing_utils import require_torch, torch_device
if is_torch_available():
from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments
@require_torch
class UpperCAmelCase_ ( unittest.TestCase ):
def snake_case_ ( self : Union[str, Any] , A : Optional[Any] ):
for model_result in results.values():
for batch_size, sequence_length in zip(model_result["bs"] , model_result["ss"] ):
_UpperCAmelCase : List[str] = model_result["result"][batch_size][sequence_length]
self.assertIsNotNone(A )
def snake_case_ ( self : Optional[Any] ):
_UpperCAmelCase : Any = "sshleifer/tiny-gpt2"
_UpperCAmelCase : int = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A , inference=A , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A , )
_UpperCAmelCase : List[Any] = PyTorchBenchmark(A )
_UpperCAmelCase : List[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def snake_case_ ( self : Dict ):
_UpperCAmelCase : List[str] = "sgugger/tiny-distilbert-classification"
_UpperCAmelCase : Optional[int] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A , inference=A , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A , only_pretrain_model=A , )
_UpperCAmelCase : Dict = PyTorchBenchmark(A )
_UpperCAmelCase : Optional[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def snake_case_ ( self : int ):
_UpperCAmelCase : Any = "sshleifer/tiny-gpt2"
_UpperCAmelCase : List[Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A , inference=A , torchscript=A , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A , )
_UpperCAmelCase : Dict = PyTorchBenchmark(A )
_UpperCAmelCase : Optional[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(torch_device == "cpu" , "Cant do half precision" )
def snake_case_ ( self : List[str] ):
_UpperCAmelCase : Union[str, Any] = "sshleifer/tiny-gpt2"
_UpperCAmelCase : Optional[Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A , inference=A , fpaa=A , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A , )
_UpperCAmelCase : List[str] = PyTorchBenchmark(A )
_UpperCAmelCase : List[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def snake_case_ ( self : List[Any] ):
_UpperCAmelCase : Optional[Any] = "sshleifer/tiny-gpt2"
_UpperCAmelCase : int = AutoConfig.from_pretrained(A )
# set architectures equal to `None`
_UpperCAmelCase : Optional[Any] = None
_UpperCAmelCase : Optional[Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A , inference=A , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A , )
_UpperCAmelCase : List[Any] = PyTorchBenchmark(A , configs=[config] )
_UpperCAmelCase : Optional[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def snake_case_ ( self : int ):
_UpperCAmelCase : Dict = "sshleifer/tiny-gpt2"
_UpperCAmelCase : Union[str, Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A , inference=A , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A , )
_UpperCAmelCase : Tuple = PyTorchBenchmark(A )
_UpperCAmelCase : Union[str, Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
@unittest.skipIf(torch_device == "cpu" , "Can't do half precision" )
def snake_case_ ( self : List[Any] ):
_UpperCAmelCase : Optional[int] = "sshleifer/tiny-gpt2"
_UpperCAmelCase : str = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A , inference=A , sequence_lengths=[8] , batch_sizes=[1] , fpaa=A , multi_process=A , )
_UpperCAmelCase : Tuple = PyTorchBenchmark(A )
_UpperCAmelCase : List[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def snake_case_ ( self : List[Any] ):
_UpperCAmelCase : Optional[Any] = "sshleifer/tiny-gpt2"
_UpperCAmelCase : Union[str, Any] = AutoConfig.from_pretrained(A )
_UpperCAmelCase : Dict = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A , inference=A , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A , )
_UpperCAmelCase : List[Any] = PyTorchBenchmark(A , configs=[config] )
_UpperCAmelCase : List[str] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def snake_case_ ( self : str ):
_UpperCAmelCase : List[str] = "sshleifer/tinier_bart"
_UpperCAmelCase : Any = AutoConfig.from_pretrained(A )
_UpperCAmelCase : Optional[int] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A , inference=A , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A , )
_UpperCAmelCase : int = PyTorchBenchmark(A , configs=[config] )
_UpperCAmelCase : Union[str, Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def snake_case_ ( self : Any ):
_UpperCAmelCase : Tuple = "sshleifer/tiny-gpt2"
_UpperCAmelCase : Tuple = AutoConfig.from_pretrained(A )
_UpperCAmelCase : List[Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A , inference=A , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A , )
_UpperCAmelCase : int = PyTorchBenchmark(A , configs=[config] )
_UpperCAmelCase : Optional[int] = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def snake_case_ ( self : str ):
_UpperCAmelCase : Optional[int] = "sshleifer/tinier_bart"
_UpperCAmelCase : int = AutoConfig.from_pretrained(A )
_UpperCAmelCase : List[Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A , inference=A , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A , )
_UpperCAmelCase : str = PyTorchBenchmark(A , configs=[config] )
_UpperCAmelCase : int = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def snake_case_ ( self : Dict ):
_UpperCAmelCase : int = "sshleifer/tiny-gpt2"
with tempfile.TemporaryDirectory() as tmp_dir:
_UpperCAmelCase : Union[str, Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A , inference=A , save_to_csv=A , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(A , "inf_time.csv" ) , train_memory_csv_file=os.path.join(A , "train_mem.csv" ) , inference_memory_csv_file=os.path.join(A , "inf_mem.csv" ) , train_time_csv_file=os.path.join(A , "train_time.csv" ) , env_info_csv_file=os.path.join(A , "env.csv" ) , multi_process=A , )
_UpperCAmelCase : int = PyTorchBenchmark(A )
benchmark.run()
self.assertTrue(Path(os.path.join(A , "inf_time.csv" ) ).exists() )
self.assertTrue(Path(os.path.join(A , "train_time.csv" ) ).exists() )
self.assertTrue(Path(os.path.join(A , "inf_mem.csv" ) ).exists() )
self.assertTrue(Path(os.path.join(A , "train_mem.csv" ) ).exists() )
self.assertTrue(Path(os.path.join(A , "env.csv" ) ).exists() )
def snake_case_ ( self : List[str] ):
_UpperCAmelCase : List[Any] = "sshleifer/tiny-gpt2"
def _check_summary_is_not_empty(A : int ):
self.assertTrue(hasattr(A , "sequential" ) )
self.assertTrue(hasattr(A , "cumulative" ) )
self.assertTrue(hasattr(A , "current" ) )
self.assertTrue(hasattr(A , "total" ) )
with tempfile.TemporaryDirectory() as tmp_dir:
_UpperCAmelCase : int = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A , inference=A , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(A , "log.txt" ) , log_print=A , trace_memory_line_by_line=A , multi_process=A , )
_UpperCAmelCase : int = PyTorchBenchmark(A )
_UpperCAmelCase : Any = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
_check_summary_is_not_empty(result.train_summary )
self.assertTrue(Path(os.path.join(A , "log.txt" ) ).exists() )
| 289
| 0
|
'''simple docstring'''
import argparse
import json
from typing import List
from ltp import LTP
from transformers.models.bert.tokenization_bert import BertTokenizer
def A__ ( __A ):
'''simple docstring'''
if (
(cp >= 0X4e00 and cp <= 0X9fff)
or (cp >= 0X3400 and cp <= 0X4dbf) #
or (cp >= 0X20000 and cp <= 0X2a6df) #
or (cp >= 0X2a700 and cp <= 0X2b73f) #
or (cp >= 0X2b740 and cp <= 0X2b81f) #
or (cp >= 0X2b820 and cp <= 0X2ceaf) #
or (cp >= 0Xf900 and cp <= 0Xfaff)
or (cp >= 0X2f800 and cp <= 0X2fa1f) #
): #
return True
return False
def A__ ( __A ):
'''simple docstring'''
for char in word:
_lowerCamelCase : Tuple = ord(lowerCAmelCase_ )
if not _is_chinese_char(lowerCAmelCase_ ):
return 0
return 1
def A__ ( __A ):
'''simple docstring'''
_lowerCamelCase : Tuple = set()
for token in tokens:
_lowerCamelCase : Tuple = len(lowerCAmelCase_ ) > 1 and is_chinese(lowerCAmelCase_ )
if chinese_word:
word_set.add(lowerCAmelCase_ )
_lowerCamelCase : Any = list(lowerCAmelCase_ )
return word_list
def A__ ( __A , __A ):
'''simple docstring'''
if not chinese_word_set:
return bert_tokens
_lowerCamelCase : str = max([len(lowerCAmelCase_ ) for w in chinese_word_set] )
_lowerCamelCase : Tuple = bert_tokens
_lowerCamelCase : Dict = 0, len(lowerCAmelCase_ )
while start < end:
_lowerCamelCase : Any = True
if is_chinese(bert_word[start] ):
_lowerCamelCase : int = min(end - start , lowerCAmelCase_ )
for i in range(lowerCAmelCase_ , 1 , -1 ):
_lowerCamelCase : int = ''''''.join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
_lowerCamelCase : Optional[int] = '''##''' + bert_word[j]
_lowerCamelCase : Dict = start + i
_lowerCamelCase : Dict = False
break
if single_word:
start += 1
return bert_word
def A__ ( __A , __A , __A ):
'''simple docstring'''
_lowerCamelCase : List[Any] = []
for i in range(0 , len(lowerCAmelCase_ ) , 100 ):
_lowerCamelCase : Any = ltp_tokenizer.pipeline(lines[i : i + 100] , tasks=["""cws"""] ).cws
_lowerCamelCase : List[Any] = [get_chinese_word(lowerCAmelCase_ ) for r in res]
ltp_res.extend(lowerCAmelCase_ )
assert len(lowerCAmelCase_ ) == len(lowerCAmelCase_ )
_lowerCamelCase : Dict = []
for i in range(0 , len(lowerCAmelCase_ ) , 100 ):
_lowerCamelCase : Dict = bert_tokenizer(lines[i : i + 100] , add_special_tokens=lowerCAmelCase_ , truncation=lowerCAmelCase_ , max_length=512 )
bert_res.extend(res["""input_ids"""] )
assert len(lowerCAmelCase_ ) == len(lowerCAmelCase_ )
_lowerCamelCase : Optional[Any] = []
for input_ids, chinese_word in zip(lowerCAmelCase_ , lowerCAmelCase_ ):
_lowerCamelCase : Dict = []
for id in input_ids:
_lowerCamelCase : Optional[int] = bert_tokenizer._convert_id_to_token(lowerCAmelCase_ )
input_tokens.append(lowerCAmelCase_ )
_lowerCamelCase : Dict = add_sub_symbol(lowerCAmelCase_ , lowerCAmelCase_ )
_lowerCamelCase : Tuple = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(lowerCAmelCase_ ):
if token[:2] == "##":
_lowerCamelCase : Tuple = token[2:]
# save chinese tokens' pos
if len(lowerCAmelCase_ ) == 1 and _is_chinese_char(ord(lowerCAmelCase_ ) ):
ref_id.append(lowerCAmelCase_ )
ref_ids.append(lowerCAmelCase_ )
assert len(lowerCAmelCase_ ) == len(lowerCAmelCase_ )
return ref_ids
def A__ ( __A ):
'''simple docstring'''
with open(args.file_name , """r""" , encoding="""utf-8""" ) as f:
_lowerCamelCase : int = f.readlines()
_lowerCamelCase : int = [line.strip() for line in data if len(lowerCAmelCase_ ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
_lowerCamelCase : Union[str, Any] = LTP(args.ltp ) # faster in GPU device
_lowerCamelCase : Any = BertTokenizer.from_pretrained(args.bert )
_lowerCamelCase : Tuple = prepare_ref(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
with open(args.save_path , """w""" , encoding="""utf-8""" ) as f:
_lowerCamelCase : Dict = [json.dumps(lowerCAmelCase_ ) + '''\n''' for ref in ref_ids]
f.writelines(lowerCAmelCase_ )
if __name__ == "__main__":
lowerCAmelCase : Union[str, Any] =argparse.ArgumentParser(description="prepare_chinese_ref")
parser.add_argument(
"--file_name",
required=False,
type=str,
default="./resources/chinese-demo.txt",
help="file need process, same as training data in lm",
)
parser.add_argument(
"--ltp",
required=False,
type=str,
default="./resources/ltp",
help="resources for LTP tokenizer, usually a path",
)
parser.add_argument(
"--bert",
required=False,
type=str,
default="./resources/robert",
help="resources for Bert tokenizer",
)
parser.add_argument(
"--save_path",
required=False,
type=str,
default="./resources/ref.txt",
help="path to save res",
)
lowerCAmelCase : Union[str, Any] =parser.parse_args()
main(args)
| 702
|
from __future__ import annotations
class __snake_case :
'''simple docstring'''
def __init__( self : Tuple , _UpperCamelCase : int = 0) ->str:
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = key
def _SCREAMING_SNAKE_CASE ( self : Any , _UpperCamelCase : str , _UpperCamelCase : int) ->list[str]:
"""simple docstring"""
assert isinstance(_UpperCamelCase , _UpperCamelCase) and isinstance(_UpperCamelCase , _UpperCamelCase)
_lowerCamelCase : Union[str, Any] = key or self.__key or 1
# make sure key is an appropriate size
key %= 255
return [chr(ord(_UpperCamelCase) ^ key) for ch in content]
def _SCREAMING_SNAKE_CASE ( self : str , _UpperCamelCase : str , _UpperCamelCase : int) ->list[str]:
"""simple docstring"""
assert isinstance(_UpperCamelCase , _UpperCamelCase) and isinstance(_UpperCamelCase , _UpperCamelCase)
_lowerCamelCase : Optional[int] = key or self.__key or 1
# make sure key is an appropriate size
key %= 255
return [chr(ord(_UpperCamelCase) ^ key) for ch in content]
def _SCREAMING_SNAKE_CASE ( self : List[str] , _UpperCamelCase : str , _UpperCamelCase : int = 0) ->str:
"""simple docstring"""
assert isinstance(_UpperCamelCase , _UpperCamelCase) and isinstance(_UpperCamelCase , _UpperCamelCase)
_lowerCamelCase : int = key or self.__key or 1
# make sure key can be any size
while key > 255:
key -= 255
# This will be returned
_lowerCamelCase : Any = """"""
for ch in content:
ans += chr(ord(_UpperCamelCase) ^ key)
return ans
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , _UpperCamelCase : str , _UpperCamelCase : int = 0) ->str:
"""simple docstring"""
assert isinstance(_UpperCamelCase , _UpperCamelCase) and isinstance(_UpperCamelCase , _UpperCamelCase)
_lowerCamelCase : int = key or self.__key or 1
# make sure key can be any size
while key > 255:
key -= 255
# This will be returned
_lowerCamelCase : Optional[Any] = """"""
for ch in content:
ans += chr(ord(_UpperCamelCase) ^ key)
return ans
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , _UpperCamelCase : str , _UpperCamelCase : int = 0) ->bool:
"""simple docstring"""
assert isinstance(_UpperCamelCase , _UpperCamelCase) and isinstance(_UpperCamelCase , _UpperCamelCase)
try:
with open(_UpperCamelCase) as fin, open("""encrypt.out""" , """w+""") as fout:
# actual encrypt-process
for line in fin:
fout.write(self.encrypt_string(_UpperCamelCase , _UpperCamelCase))
except OSError:
return False
return True
def _SCREAMING_SNAKE_CASE ( self : List[Any] , _UpperCamelCase : str , _UpperCamelCase : int) ->bool:
"""simple docstring"""
assert isinstance(_UpperCamelCase , _UpperCamelCase) and isinstance(_UpperCamelCase , _UpperCamelCase)
try:
with open(_UpperCamelCase) as fin, open("""decrypt.out""" , """w+""") as fout:
# actual encrypt-process
for line in fin:
fout.write(self.decrypt_string(_UpperCamelCase , _UpperCamelCase))
except OSError:
return False
return True
# Tests
# crypt = XORCipher()
# key = 67
# # test encrypt
# print(crypt.encrypt("hallo welt",key))
# # test decrypt
# print(crypt.decrypt(crypt.encrypt("hallo welt",key), key))
# # test encrypt_string
# print(crypt.encrypt_string("hallo welt",key))
# # test decrypt_string
# print(crypt.decrypt_string(crypt.encrypt_string("hallo welt",key),key))
# if (crypt.encrypt_file("test.txt",key)):
# print("encrypt successful")
# else:
# print("encrypt unsuccessful")
# if (crypt.decrypt_file("encrypt.out",key)):
# print("decrypt successful")
# else:
# print("decrypt unsuccessful")
| 15
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase = {
'configuration_x_clip': [
'XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP',
'XCLIPConfig',
'XCLIPTextConfig',
'XCLIPVisionConfig',
],
'processing_x_clip': ['XCLIPProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'XCLIPModel',
'XCLIPPreTrainedModel',
'XCLIPTextModel',
'XCLIPVisionModel',
]
if TYPE_CHECKING:
from .configuration_x_clip import (
XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
XCLIPConfig,
XCLIPTextConfig,
XCLIPVisionConfig,
)
from .processing_x_clip import XCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_x_clip import (
XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
XCLIPModel,
XCLIPPreTrainedModel,
XCLIPTextModel,
XCLIPVisionModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 269
|
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _A ( UpperCAmelCase_ ):
lowercase_ : Tuple = ['''image_processor''', '''tokenizer''']
lowercase_ : str = '''BlipImageProcessor'''
lowercase_ : str = ('''BertTokenizer''', '''BertTokenizerFast''')
def __init__( self : List[str] , lowerCamelCase__ : str , lowerCamelCase__ : Union[str, Any] ):
"""simple docstring"""
__UpperCamelCase : Optional[int] = False
super().__init__(lowerCamelCase__ , lowerCamelCase__ )
__UpperCamelCase : Tuple = self.image_processor
def __call__( self : Optional[Any] , lowerCamelCase__ : ImageInput = None , lowerCamelCase__ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , lowerCamelCase__ : bool = True , lowerCamelCase__ : Union[bool, str, PaddingStrategy] = False , lowerCamelCase__ : Union[bool, str, TruncationStrategy] = None , lowerCamelCase__ : Optional[int] = None , lowerCamelCase__ : int = 0 , lowerCamelCase__ : Optional[int] = None , lowerCamelCase__ : Optional[bool] = None , lowerCamelCase__ : bool = False , lowerCamelCase__ : bool = False , lowerCamelCase__ : bool = False , lowerCamelCase__ : bool = False , lowerCamelCase__ : bool = False , lowerCamelCase__ : bool = True , lowerCamelCase__ : Optional[Union[str, TensorType]] = None , **lowerCamelCase__ : int , ):
"""simple docstring"""
if images is None and text is None:
raise ValueError("""You have to specify either images or text.""" )
# Get only text
if images is None:
__UpperCamelCase : Optional[int] = self.tokenizer
__UpperCamelCase : Dict = self.tokenizer(
text=lowerCamelCase__ , add_special_tokens=lowerCamelCase__ , padding=lowerCamelCase__ , truncation=lowerCamelCase__ , max_length=lowerCamelCase__ , stride=lowerCamelCase__ , pad_to_multiple_of=lowerCamelCase__ , return_attention_mask=lowerCamelCase__ , return_overflowing_tokens=lowerCamelCase__ , return_special_tokens_mask=lowerCamelCase__ , return_offsets_mapping=lowerCamelCase__ , return_token_type_ids=lowerCamelCase__ , return_length=lowerCamelCase__ , verbose=lowerCamelCase__ , return_tensors=lowerCamelCase__ , **lowerCamelCase__ , )
return text_encoding
# add pixel_values
__UpperCamelCase : str = self.image_processor(lowerCamelCase__ , return_tensors=lowerCamelCase__ )
if text is not None:
__UpperCamelCase : Optional[int] = self.tokenizer(
text=lowerCamelCase__ , add_special_tokens=lowerCamelCase__ , padding=lowerCamelCase__ , truncation=lowerCamelCase__ , max_length=lowerCamelCase__ , stride=lowerCamelCase__ , pad_to_multiple_of=lowerCamelCase__ , return_attention_mask=lowerCamelCase__ , return_overflowing_tokens=lowerCamelCase__ , return_special_tokens_mask=lowerCamelCase__ , return_offsets_mapping=lowerCamelCase__ , return_token_type_ids=lowerCamelCase__ , return_length=lowerCamelCase__ , verbose=lowerCamelCase__ , return_tensors=lowerCamelCase__ , **lowerCamelCase__ , )
else:
__UpperCamelCase : Optional[Any] = None
if text_encoding is not None:
encoding_image_processor.update(lowerCamelCase__ )
return encoding_image_processor
def a ( self : List[str] , *lowerCamelCase__ : Any , **lowerCamelCase__ : List[Any] ):
"""simple docstring"""
return self.tokenizer.batch_decode(*lowerCamelCase__ , **lowerCamelCase__ )
def a ( self : List[str] , *lowerCamelCase__ : int , **lowerCamelCase__ : Tuple ):
"""simple docstring"""
return self.tokenizer.decode(*lowerCamelCase__ , **lowerCamelCase__ )
@property
def a ( self : Any ):
"""simple docstring"""
__UpperCamelCase : str = self.tokenizer.model_input_names
__UpperCamelCase : str = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 269
| 1
|
from __future__ import annotations
from collections import deque
from collections.abc import Sequence
from dataclasses import dataclass
from typing import Any
@dataclass
class _SCREAMING_SNAKE_CASE :
a_ : str = 42
a_ : Optional[int] = None
a_ : Dict = None
def SCREAMING_SNAKE_CASE ( ) -> Dict:
__UpperCAmelCase =Node(1 )
__UpperCAmelCase =Node(2 )
__UpperCAmelCase =Node(3 )
__UpperCAmelCase =Node(4 )
__UpperCAmelCase =Node(5 )
return tree
def SCREAMING_SNAKE_CASE ( snake_case__ ) -> Optional[int]:
return [root.data, *preorder(root.left ), *preorder(root.right )] if root else []
def SCREAMING_SNAKE_CASE ( snake_case__ ) -> Optional[int]:
return postorder(root.left ) + postorder(root.right ) + [root.data] if root else []
def SCREAMING_SNAKE_CASE ( snake_case__ ) -> Optional[int]:
return [*inorder(root.left ), root.data, *inorder(root.right )] if root else []
def SCREAMING_SNAKE_CASE ( snake_case__ ) -> List[str]:
return (max(height(root.left ) , height(root.right ) ) + 1) if root else 0
def SCREAMING_SNAKE_CASE ( snake_case__ ) -> Tuple:
__UpperCAmelCase =[]
if root is None:
return output
__UpperCAmelCase =deque([root] )
while process_queue:
__UpperCAmelCase =process_queue.popleft()
output.append(node.data )
if node.left:
process_queue.append(node.left )
if node.right:
process_queue.append(node.right )
return output
def SCREAMING_SNAKE_CASE ( snake_case__ , snake_case__ ) -> Dict:
__UpperCAmelCase =[]
def populate_output(snake_case__ , snake_case__ ) -> None:
if not root:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.left , level - 1 )
populate_output(root.right , level - 1 )
populate_output(snake_case__ , snake_case__ )
return output
def SCREAMING_SNAKE_CASE ( snake_case__ , snake_case__ ) -> str:
__UpperCAmelCase =[]
def populate_output(snake_case__ , snake_case__ ) -> None:
if root is None:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.right , level - 1 )
populate_output(root.left , level - 1 )
populate_output(snake_case__ , snake_case__ )
return output
def SCREAMING_SNAKE_CASE ( snake_case__ ) -> Any:
if root is None:
return []
__UpperCAmelCase =[]
__UpperCAmelCase =0
__UpperCAmelCase =height(snake_case__ )
for h in range(1 , height_tree + 1 ):
if not flag:
output.append(get_nodes_from_left_to_right(snake_case__ , snake_case__ ) )
__UpperCAmelCase =1
else:
output.append(get_nodes_from_right_to_left(snake_case__ , snake_case__ ) )
__UpperCAmelCase =0
return output
def SCREAMING_SNAKE_CASE ( ) -> List[str]: # Main function for testing.
__UpperCAmelCase =make_tree()
print(f"""In-order Traversal: {inorder(snake_case__ )}""" )
print(f"""Pre-order Traversal: {preorder(snake_case__ )}""" )
print(f"""Post-order Traversal: {postorder(snake_case__ )}""" , '''\n''' )
print(f"""Height of Tree: {height(snake_case__ )}""" , '''\n''' )
print('''Complete Level Order Traversal: ''' )
print(level_order(snake_case__ ) , '''\n''' )
print('''Level-wise order Traversal: ''' )
for level in range(1 , height(snake_case__ ) + 1 ):
print(f"""Level {level}:""" , get_nodes_from_left_to_right(snake_case__ , level=snake_case__ ) )
print('''\nZigZag order Traversal: ''' )
print(zigzag(snake_case__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 702
|
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
torch.set_grad_enabled(False)
def SCREAMING_SNAKE_CASE ( snake_case__ , snake_case__=False ) -> Optional[Any]:
__UpperCAmelCase =[]
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"""module.blocks.{i}.norm1.weight""", f"""vit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((f"""module.blocks.{i}.norm1.bias""", f"""vit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append(
(f"""module.blocks.{i}.attn.proj.weight""", f"""vit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((f"""module.blocks.{i}.attn.proj.bias""", f"""vit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((f"""module.blocks.{i}.norm2.weight""", f"""vit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((f"""module.blocks.{i}.norm2.bias""", f"""vit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((f"""module.blocks.{i}.mlp.fc1.weight""", f"""vit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((f"""module.blocks.{i}.mlp.fc1.bias""", f"""vit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((f"""module.blocks.{i}.mlp.fc2.weight""", f"""vit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((f"""module.blocks.{i}.mlp.fc2.bias""", f"""vit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
('''module.cls_token''', '''vit.embeddings.cls_token'''),
('''module.patch_embed.proj.weight''', '''vit.embeddings.patch_embeddings.projection.weight'''),
('''module.patch_embed.proj.bias''', '''vit.embeddings.patch_embeddings.projection.bias'''),
('''module.pos_embed''', '''vit.embeddings.position_embeddings'''),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('''module.norm.weight''', '''layernorm.weight'''),
('''module.norm.bias''', '''layernorm.bias'''),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
__UpperCAmelCase =[(pair[0], pair[1][4:]) if pair[1].startswith('''vit''' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('''norm.weight''', '''vit.layernorm.weight'''),
('''norm.bias''', '''vit.layernorm.bias'''),
('''head.weight''', '''classifier.weight'''),
('''head.bias''', '''classifier.bias'''),
] )
return rename_keys
def SCREAMING_SNAKE_CASE ( snake_case__ , snake_case__ , snake_case__=False ) -> Any:
for i in range(config.num_hidden_layers ):
if base_model:
__UpperCAmelCase =''''''
else:
__UpperCAmelCase ='''vit.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
__UpperCAmelCase =state_dict.pop(f"""module.blocks.{i}.attn.qkv.weight""" )
__UpperCAmelCase =state_dict.pop(f"""module.blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
__UpperCAmelCase =in_proj_weight[
: config.hidden_size, :
]
__UpperCAmelCase =in_proj_bias[: config.hidden_size]
__UpperCAmelCase =in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
__UpperCAmelCase =in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
__UpperCAmelCase =in_proj_weight[
-config.hidden_size :, :
]
__UpperCAmelCase =in_proj_bias[-config.hidden_size :]
def SCREAMING_SNAKE_CASE ( snake_case__ ) -> Tuple:
__UpperCAmelCase =['''head.weight''', '''head.bias''']
for k in ignore_keys:
state_dict.pop(snake_case__ , snake_case__ )
def SCREAMING_SNAKE_CASE ( snake_case__ ) -> Any:
# projection head is used in the self-supervised pre-training in MSN,
# for downstream task it's not needed.
__UpperCAmelCase =[
'''module.fc.fc1.weight''',
'''module.fc.fc1.bias''',
'''module.fc.bn1.weight''',
'''module.fc.bn1.bias''',
'''module.fc.bn1.running_mean''',
'''module.fc.bn1.running_var''',
'''module.fc.bn1.num_batches_tracked''',
'''module.fc.fc2.weight''',
'''module.fc.fc2.bias''',
'''module.fc.bn2.weight''',
'''module.fc.bn2.bias''',
'''module.fc.bn2.running_mean''',
'''module.fc.bn2.running_var''',
'''module.fc.bn2.num_batches_tracked''',
'''module.fc.fc3.weight''',
'''module.fc.fc3.bias''',
]
for k in ignore_keys:
state_dict.pop(snake_case__ , snake_case__ )
def SCREAMING_SNAKE_CASE ( snake_case__ , snake_case__ , snake_case__ ) -> List[str]:
__UpperCAmelCase =dct.pop(snake_case__ )
__UpperCAmelCase =val
def SCREAMING_SNAKE_CASE ( snake_case__ , snake_case__ ) -> Optional[Any]:
__UpperCAmelCase =ViTMSNConfig()
__UpperCAmelCase =1000
__UpperCAmelCase ='''datasets/huggingface/label-files'''
__UpperCAmelCase ='''imagenet-1k-id2label.json'''
__UpperCAmelCase =json.load(open(hf_hub_download(snake_case__ , snake_case__ ) , '''r''' ) )
__UpperCAmelCase ={int(snake_case__ ): v for k, v in idalabel.items()}
__UpperCAmelCase =idalabel
__UpperCAmelCase ={v: k for k, v in idalabel.items()}
if "s16" in checkpoint_url:
__UpperCAmelCase =384
__UpperCAmelCase =1536
__UpperCAmelCase =6
elif "l16" in checkpoint_url:
__UpperCAmelCase =1024
__UpperCAmelCase =4096
__UpperCAmelCase =24
__UpperCAmelCase =16
__UpperCAmelCase =0.1
elif "b4" in checkpoint_url:
__UpperCAmelCase =4
elif "l7" in checkpoint_url:
__UpperCAmelCase =7
__UpperCAmelCase =1024
__UpperCAmelCase =4096
__UpperCAmelCase =24
__UpperCAmelCase =16
__UpperCAmelCase =0.1
__UpperCAmelCase =ViTMSNModel(snake_case__ )
__UpperCAmelCase =torch.hub.load_state_dict_from_url(snake_case__ , map_location='''cpu''' )['''target_encoder''']
__UpperCAmelCase =ViTImageProcessor(size=config.image_size )
remove_projection_head(snake_case__ )
__UpperCAmelCase =create_rename_keys(snake_case__ , base_model=snake_case__ )
for src, dest in rename_keys:
rename_key(snake_case__ , snake_case__ , snake_case__ )
read_in_q_k_v(snake_case__ , snake_case__ , base_model=snake_case__ )
model.load_state_dict(snake_case__ )
model.eval()
__UpperCAmelCase ='''http://images.cocodataset.org/val2017/000000039769.jpg'''
__UpperCAmelCase =Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw )
__UpperCAmelCase =ViTImageProcessor(
size=config.image_size , image_mean=snake_case__ , image_std=snake_case__ )
__UpperCAmelCase =image_processor(images=snake_case__ , return_tensors='''pt''' )
# forward pass
torch.manual_seed(2 )
__UpperCAmelCase =model(**snake_case__ )
__UpperCAmelCase =outputs.last_hidden_state
# The following Colab Notebook was used to generate these outputs:
# https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb
if "s16" in checkpoint_url:
__UpperCAmelCase =torch.tensor([[-1.0915, -1.4876, -1.1809]] )
elif "b16" in checkpoint_url:
__UpperCAmelCase =torch.tensor([[14.2889, -18.9045, 11.7281]] )
elif "l16" in checkpoint_url:
__UpperCAmelCase =torch.tensor([[41.5028, -22.8681, 45.6475]] )
elif "b4" in checkpoint_url:
__UpperCAmelCase =torch.tensor([[-4.3868, 5.2932, -0.4137]] )
else:
__UpperCAmelCase =torch.tensor([[-0.1792, -0.6465, 2.4263]] )
# verify logits
assert torch.allclose(last_hidden_state[:, 0, :3] , snake_case__ , atol=1e-4 )
print(f"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(snake_case__ )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(snake_case__ )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar',
type=str,
help='URL of the checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
UpperCamelCase_ = parser.parse_args()
convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 142
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A_ = {
"""configuration_autoformer""": [
"""AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""AutoformerConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
"""AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""AutoformerForPrediction""",
"""AutoformerModel""",
"""AutoformerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_autoformer import (
AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_autoformer import (
AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
AutoformerForPrediction,
AutoformerModel,
AutoformerPreTrainedModel,
)
else:
import sys
A_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 29
|
import random
def snake_case (__lowercase , __lowercase ) -> tuple:
'''simple docstring'''
_snake_case ,_snake_case ,_snake_case : List[Any] = [], [], []
for element in data:
if element < pivot:
less.append(__lowercase )
elif element > pivot:
greater.append(__lowercase )
else:
equal.append(__lowercase )
return less, equal, greater
def snake_case (__lowercase , __lowercase ) -> List[Any]:
'''simple docstring'''
if index >= len(__lowercase ) or index < 0:
return None
_snake_case : Any = items[random.randint(0 , len(__lowercase ) - 1 )]
_snake_case : Tuple = 0
_snake_case ,_snake_case ,_snake_case : Tuple = _partition(__lowercase , __lowercase )
_snake_case : Tuple = len(__lowercase )
_snake_case : List[str] = len(__lowercase )
# index is the pivot
if m <= index < m + count:
return pivot
# must be in smaller
elif m > index:
return quick_select(__lowercase , __lowercase )
# must be in larger
else:
return quick_select(__lowercase , index - (m + count) )
| 670
| 0
|
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
lowercase : Union[str, Any] = logging.get_logger(__name__)
class A__ ( lowercase_ ):
"""simple docstring"""
__A : List[Any] = ['''input_features''']
def __init__( self , lowercase=80 , lowercase=1_6000 , lowercase=160 , lowercase=30 , lowercase=400 , lowercase=0.0 , lowercase=False , **lowercase , ) -> int:
'''simple docstring'''
super().__init__(
feature_size=lowercase , sampling_rate=lowercase , padding_value=lowercase , return_attention_mask=lowercase , **lowercase , )
a__ : Optional[int] = n_fft
a__ : Dict = hop_length
a__ : Any = chunk_length
a__ : List[Any] = chunk_length * sampling_rate
a__ : Optional[int] = self.n_samples // hop_length
a__ : Dict = sampling_rate
a__ : Any = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=lowercase , min_frequency=0.0 , max_frequency=8000.0 , sampling_rate=lowercase , norm='slaney' , mel_scale='slaney' , )
def __lowercase ( self , lowercase) -> str:
'''simple docstring'''
a__ : Tuple = spectrogram(
lowercase , window_function(self.n_fft , 'hann') , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters , log_mel='log10' , )
a__ : str = log_spec[:, :-1]
a__ : Optional[int] = np.maximum(lowercase , log_spec.max() - 8.0)
a__ : int = (log_spec + 4.0) / 4.0
return log_spec
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def __lowercase ( lowercase , lowercase , lowercase = 0.0) -> Dict:
'''simple docstring'''
if attention_mask is not None:
a__ : List[Any] = np.array(lowercase , np.intaa)
a__ : str = []
for vector, length in zip(lowercase , attention_mask.sum(-1)):
a__ : Union[str, Any] = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1e-7)
if length < normed_slice.shape[0]:
a__ : List[Any] = padding_value
normed_input_values.append(lowercase)
else:
a__ : Tuple = [(x - x.mean()) / np.sqrt(x.var() + 1e-7) for x in input_values]
return normed_input_values
def __call__( self , lowercase , lowercase = True , lowercase = None , lowercase = None , lowercase = None , lowercase = "max_length" , lowercase = None , lowercase = None , lowercase = None , **lowercase , ) -> Tuple:
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a'
F' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input'
F' was sampled with {self.sampling_rate} and not {sampling_rate}.')
else:
logger.warning(
'It is strongly recommended to pass the `sampling_rate` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.')
a__ : Optional[Any] = isinstance(lowercase , np.ndarray) and len(raw_speech.shape) > 1
if is_batched_numpy and len(raw_speech.shape) > 2:
raise ValueError(F'Only mono-channel audio is supported for input to {self}')
a__ : List[Any] = is_batched_numpy or (
isinstance(lowercase , (list, tuple)) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list)))
)
if is_batched:
a__ : Dict = [np.asarray([speech] , dtype=np.floataa).T for speech in raw_speech]
elif not is_batched and not isinstance(lowercase , np.ndarray):
a__ : Optional[int] = np.asarray(lowercase , dtype=np.floataa)
elif isinstance(lowercase , np.ndarray) and raw_speech.dtype is np.dtype(np.floataa):
a__ : List[Any] = raw_speech.astype(np.floataa)
# always return batch
if not is_batched:
a__ : List[str] = [np.asarray([raw_speech]).T]
a__ : str = BatchFeature({'input_features': raw_speech})
# convert into correct format for padding
a__ : List[str] = self.pad(
lowercase , padding=lowercase , max_length=max_length if max_length else self.n_samples , truncation=lowercase , pad_to_multiple_of=lowercase , return_attention_mask=return_attention_mask or do_normalize , )
# zero-mean and unit-variance normalization
if do_normalize:
a__ : Optional[Any] = self.zero_mean_unit_var_norm(
padded_inputs['input_features'] , attention_mask=padded_inputs['attention_mask'] , padding_value=self.padding_value , )
a__ : Optional[int] = np.stack(padded_inputs['input_features'] , axis=0)
# make sure list is in array format
a__ : Optional[Any] = padded_inputs.get('input_features').transpose(2 , 0 , 1)
a__ : List[Any] = [self._np_extract_fbank_features(lowercase) for waveform in input_features[0]]
if isinstance(input_features[0] , lowercase):
a__ : Dict = [np.asarray(lowercase , dtype=np.floataa) for feature in input_features]
else:
a__ : str = input_features
if return_attention_mask:
# rescale from sample (48000) to feature (3000)
a__ : Union[str, Any] = padded_inputs['attention_mask'][:, :: self.hop_length]
if return_tensors is not None:
a__ : List[Any] = padded_inputs.convert_to_tensors(lowercase)
return padded_inputs
def __lowercase ( self) -> Optional[Any]:
'''simple docstring'''
a__ : Dict = copy.deepcopy(self.__dict__)
a__ : Optional[int] = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
return output
| 720
|
import math
import random
from typing import Any
from .hill_climbing import SearchProblem
def A_ ( A__ , A__ = True , A__ = math.inf , A__ = -math.inf , A__ = math.inf , A__ = -math.inf , A__ = False , A__ = 100 , A__ = 0.01 , A__ = 1 , ) -> Any:
a__ : List[str] = False
a__ : Optional[int] = search_prob
a__ : Any = start_temperate
a__ : Any = []
a__ : int = 0
a__ : Any = None
while not search_end:
a__ : Tuple = current_state.score()
if best_state is None or current_score > best_state.score():
a__ : Optional[Any] = current_state
scores.append(A__ )
iterations += 1
a__ : Union[str, Any] = None
a__ : List[str] = current_state.get_neighbors()
while (
next_state is None and neighbors
): # till we do not find a neighbor that we can move to
a__ : Optional[int] = random.randint(0 , len(A__ ) - 1 ) # picking a random neighbor
a__ : List[str] = neighbors.pop(A__ )
a__ : List[Any] = picked_neighbor.score() - current_score
if (
picked_neighbor.x > max_x
or picked_neighbor.x < min_x
or picked_neighbor.y > max_y
or picked_neighbor.y < min_y
):
continue # neighbor outside our bounds
if not find_max:
a__ : int = change * -1 # in case we are finding minimum
if change > 0: # improves the solution
a__ : Optional[Any] = picked_neighbor
else:
a__ : List[Any] = (math.e) ** (
change / current_temp
) # probability generation function
if random.random() < probability: # random number within probability
a__ : Tuple = picked_neighbor
a__ : Union[str, Any] = current_temp - (current_temp * rate_of_decrease)
if current_temp < threshold_temp or next_state is None:
# temperature below threshold, or could not find a suitable neighbor
a__ : int = True
else:
a__ : Union[str, Any] = next_state
if visualization:
from matplotlib import pyplot as plt
plt.plot(range(A__ ) , A__ )
plt.xlabel('Iterations' )
plt.ylabel('Function values' )
plt.show()
return best_state
if __name__ == "__main__":
def A_ ( A__ , A__ ) -> Optional[Any]:
return (x**2) + (y**2)
# starting the problem with initial coordinates (12, 47)
lowercase : List[Any] = SearchProblem(x=1_2, y=4_7, step_size=1, function_to_optimize=test_fa)
lowercase : int = simulated_annealing(
prob, find_max=False, max_x=1_0_0, min_x=5, max_y=5_0, min_y=-5, visualization=True
)
print(
"""The minimum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 """
F"""and 50 > y > - 5 found via hill climbing: {local_min.score()}"""
)
# starting the problem with initial coordinates (12, 47)
lowercase : List[str] = SearchProblem(x=1_2, y=4_7, step_size=1, function_to_optimize=test_fa)
lowercase : List[Any] = simulated_annealing(
prob, find_max=True, max_x=1_0_0, min_x=5, max_y=5_0, min_y=-5, visualization=True
)
print(
"""The maximum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 """
F"""and 50 > y > - 5 found via hill climbing: {local_min.score()}"""
)
def A_ ( A__ , A__ ) -> Optional[Any]:
return (3 * x**2) - (6 * y)
lowercase : Tuple = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
lowercase : Optional[Any] = simulated_annealing(prob, find_max=False, visualization=True)
print(
"""The minimum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: """
F"""{local_min.score()}"""
)
lowercase : str = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
lowercase : List[Any] = simulated_annealing(prob, find_max=True, visualization=True)
print(
"""The maximum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: """
F"""{local_min.score()}"""
)
| 392
| 0
|
import collections
import os
from typing import List, Optional, Tuple
from transformers.utils import is_jieba_available, requires_backends
if is_jieba_available():
import jieba
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
snake_case__ : Any = logging.get_logger(__name__)
snake_case__ : Any = {"""vocab_file""": """vocab.txt"""}
snake_case__ : int = {
"""vocab_file""": {
"""openbmb/cpm-ant-10b""": """https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt""",
},
}
snake_case__ : int = {
"""openbmb/cpm-ant-10b""": 1_0_2_4,
}
def _snake_case (__lowercase):
UpperCamelCase_ = collections.OrderedDict()
with open(__lowercase , 'r' , encoding='utf-8') as reader:
UpperCamelCase_ = reader.readlines()
for index, token in enumerate(__lowercase):
UpperCamelCase_ = token.rstrip('\n')
UpperCamelCase_ = index
return vocab
class _a ( UpperCAmelCase__ ):
"""simple docstring"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase="<unk>" , _UpperCAmelCase=200 ) -> Any:
UpperCamelCase_ = vocab
UpperCamelCase_ = unk_token
UpperCamelCase_ = max_input_chars_per_word
def _UpperCAmelCase ( self , _UpperCAmelCase ) -> Tuple:
UpperCamelCase_ = list(_UpperCAmelCase )
if len(_UpperCAmelCase ) > self.max_input_chars_per_word:
return [self.unk_token]
UpperCamelCase_ = 0
UpperCamelCase_ = []
while start < len(_UpperCAmelCase ):
UpperCamelCase_ = len(_UpperCAmelCase )
UpperCamelCase_ = None
while start < end:
UpperCamelCase_ = ''.join(chars[start:end] )
if substr in self.vocab:
UpperCamelCase_ = substr
break
end -= 1
if cur_substr is None:
sub_tokens.append(self.unk_token )
start += 1
else:
sub_tokens.append(_UpperCAmelCase )
UpperCamelCase_ = end
return sub_tokens
class _a ( UpperCAmelCase__ ):
"""simple docstring"""
A_ = VOCAB_FILES_NAMES
A_ = PRETRAINED_VOCAB_FILES_MAP
A_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A_ = ["""input_ids""", """attention_mask"""]
A_ = False
def __init__( self , _UpperCAmelCase , _UpperCAmelCase="<d>" , _UpperCAmelCase="</d>" , _UpperCAmelCase="<s>" , _UpperCAmelCase="</s>" , _UpperCAmelCase="<pad>" , _UpperCAmelCase="<unk>" , _UpperCAmelCase="</n>" , _UpperCAmelCase="</_>" , _UpperCAmelCase="left" , **_UpperCAmelCase , ) -> List[Any]:
requires_backends(self , ['jieba'] )
super().__init__(
bod_token=_UpperCAmelCase , eod_token=_UpperCAmelCase , bos_token=_UpperCAmelCase , eos_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , unk_token=_UpperCAmelCase , line_token=_UpperCAmelCase , space_token=_UpperCAmelCase , padding_side=_UpperCAmelCase , **_UpperCAmelCase , )
UpperCamelCase_ = bod_token
UpperCamelCase_ = eod_token
UpperCamelCase_ = load_vocab(_UpperCAmelCase )
UpperCamelCase_ = self.encoder[space_token]
UpperCamelCase_ = self.encoder[line_token]
del self.encoder[space_token]
del self.encoder[line_token]
UpperCamelCase_ = collections.OrderedDict(sorted(self.encoder.items() , key=lambda _UpperCAmelCase : x[1] ) )
UpperCamelCase_ = {v: k for k, v in self.encoder.items()}
UpperCamelCase_ = WordpieceTokenizer(vocab=self.encoder , unk_token=self.unk_token )
@property
def _UpperCAmelCase ( self ) -> Optional[Any]:
return self.encoder[self.bod_token]
@property
def _UpperCAmelCase ( self ) -> List[Any]:
return self.encoder[self.eod_token]
@property
def _UpperCAmelCase ( self ) -> Any:
return self.encoder["\n"]
@property
def _UpperCAmelCase ( self ) -> int:
return len(self.encoder )
def _UpperCAmelCase ( self ) -> str:
return dict(self.encoder , **self.added_tokens_encoder )
def _UpperCAmelCase ( self , _UpperCAmelCase ) -> Optional[int]:
UpperCamelCase_ = []
for x in jieba.cut(_UpperCAmelCase , cut_all=_UpperCAmelCase ):
output_tokens.extend(self.wordpiece_tokenizer.tokenize(_UpperCAmelCase ) )
return output_tokens
def _UpperCAmelCase ( self , _UpperCAmelCase , **_UpperCAmelCase ) -> Optional[int]:
UpperCamelCase_ = [i for i in token_ids if i >= 0]
UpperCamelCase_ = [
x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id
]
return super()._decode(_UpperCAmelCase , **_UpperCAmelCase )
def _UpperCAmelCase ( self , _UpperCAmelCase ) -> str:
return token in self.encoder
def _UpperCAmelCase ( self , _UpperCAmelCase ) -> str:
return "".join(_UpperCAmelCase )
def _UpperCAmelCase ( self , _UpperCAmelCase ) -> int:
return self.encoder.get(_UpperCAmelCase , self.encoder.get(self.unk_token ) )
def _UpperCAmelCase ( self , _UpperCAmelCase ) -> Optional[Any]:
return self.decoder.get(_UpperCAmelCase , self.unk_token )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase = None ) -> Tuple[str]:
if os.path.isdir(_UpperCAmelCase ):
UpperCamelCase_ = os.path.join(
_UpperCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
else:
UpperCamelCase_ = (filename_prefix + '-' if filename_prefix else '') + save_directory
UpperCamelCase_ = 0
if " " in self.encoder:
UpperCamelCase_ = self.encoder[' ']
del self.encoder[" "]
if "\n" in self.encoder:
UpperCamelCase_ = self.encoder['\n']
del self.encoder["\n"]
UpperCamelCase_ = collections.OrderedDict(sorted(self.encoder.items() , key=lambda _UpperCAmelCase : x[1] ) )
with open(_UpperCAmelCase , 'w' , encoding='utf-8' ) as writer:
for token, token_index in self.encoder.items():
if index != token_index:
logger.warning(
f"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."""
' Please check that the vocabulary is not corrupted!' )
UpperCamelCase_ = token_index
writer.write(token + '\n' )
index += 1
return (vocab_file,)
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase = None ) -> List[int]:
if token_ids_a is None:
return [self.bos_token_id] + token_ids_a
return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_UpperCAmelCase , token_ids_a=_UpperCAmelCase , already_has_special_tokens=_UpperCAmelCase )
if token_ids_a is not None:
return [1] + ([0] * len(_UpperCAmelCase )) + [1] + ([0] * len(_UpperCAmelCase ))
return [1] + ([0] * len(_UpperCAmelCase ))
| 23
|
'''simple docstring'''
import sys
from typing import Tuple
import numpy as np
import torch
from PIL import Image
from torch import nn
from transformers.image_utils import PILImageResampling
from utils import img_tensorize
class UpperCAmelCase :
def __init__(self : Optional[Any] , A__ : Optional[Any] , A__ : Optional[int]=sys.maxsize ) -> Optional[Any]:
lowercase = "bilinear"
lowercase = max_size
lowercase = short_edge_length
def __call__(self : Union[str, Any] , A__ : Optional[int] ) -> Tuple:
lowercase = []
for img in imgs:
lowercase , lowercase = img.shape[:2]
# later: provide list and randomly choose index for resize
lowercase = np.random.randint(self.short_edge_length[0] , self.short_edge_length[1] + 1 )
if size == 0:
return img
lowercase = size * 1.0 / min(A__ , A__ )
if h < w:
lowercase , lowercase = size, scale * w
else:
lowercase , lowercase = scale * h, size
if max(A__ , A__ ) > self.max_size:
lowercase = self.max_size * 1.0 / max(A__ , A__ )
lowercase = newh * scale
lowercase = neww * scale
lowercase = int(neww + 0.5 )
lowercase = int(newh + 0.5 )
if img.dtype == np.uinta:
lowercase = Image.fromarray(A__ )
lowercase = pil_image.resize((neww, newh) , PILImageResampling.BILINEAR )
lowercase = np.asarray(A__ )
else:
lowercase = img.permute(2 , 0 , 1 ).unsqueeze(0 ) # 3, 0, 1) # hw(c) -> nchw
lowercase = nn.functional.interpolate(
A__ , (newh, neww) , mode=self.interp_method , align_corners=A__ ).squeeze(0 )
img_augs.append(A__ )
return img_augs
class UpperCAmelCase :
def __init__(self : Union[str, Any] , A__ : List[Any] ) -> Optional[int]:
lowercase = ResizeShortestEdge([cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST] , cfg.INPUT.MAX_SIZE_TEST )
lowercase = cfg.INPUT.FORMAT
lowercase = cfg.SIZE_DIVISIBILITY
lowercase = cfg.PAD_VALUE
lowercase = cfg.INPUT.MAX_SIZE_TEST
lowercase = cfg.MODEL.DEVICE
lowercase = torch.tensor(cfg.MODEL.PIXEL_STD ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 )
lowercase = torch.tensor(cfg.MODEL.PIXEL_MEAN ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 )
lowercase = lambda A__ : (x - self.pixel_mean) / self.pixel_std
def UpperCAmelCase__ (self : List[Any] , A__ : Any ) -> int:
lowercase = tuple(max(A__ ) for s in zip(*[img.shape for img in images] ) )
lowercase = [im.shape[-2:] for im in images]
lowercase = [
nn.functional.pad(
A__ , [0, max_size[-1] - size[1], 0, max_size[-2] - size[0]] , value=self.pad_value , )
for size, im in zip(A__ , A__ )
]
return torch.stack(A__ ), torch.tensor(A__ )
def __call__(self : Optional[int] , A__ : Union[str, Any] , A__ : Optional[Any]=False ) -> str:
with torch.no_grad():
if not isinstance(A__ , A__ ):
lowercase = [images]
if single_image:
assert len(A__ ) == 1
for i in range(len(A__ ) ):
if isinstance(images[i] , torch.Tensor ):
images.insert(A__ , images.pop(A__ ).to(self.device ).float() )
elif not isinstance(images[i] , torch.Tensor ):
images.insert(
A__ , torch.as_tensor(img_tensorize(images.pop(A__ ) , input_format=self.input_format ) )
.to(self.device )
.float() , )
# resize smallest edge
lowercase = torch.tensor([im.shape[:2] for im in images] )
lowercase = self.aug(A__ )
# transpose images and convert to torch tensors
# images = [torch.as_tensor(i.astype("float32")).permute(2, 0, 1).to(self.device) for i in images]
# now normalize before pad to avoid useless arithmetic
lowercase = [self.normalizer(A__ ) for x in images]
# now pad them to do the following operations
lowercase , lowercase = self.pad(A__ )
# Normalize
if self.size_divisibility > 0:
raise NotImplementedError()
# pad
lowercase = torch.true_divide(A__ , A__ )
if single_image:
return images[0], sizes[0], scales_yx[0]
else:
return images, sizes, scales_yx
def UpperCAmelCase_ ( lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
boxes[:, 0::2] *= scale_yx[:, 1]
boxes[:, 1::2] *= scale_yx[:, 0]
return boxes
def UpperCAmelCase_ ( lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
assert torch.isfinite(lowerCAmelCase_ ).all(), "Box tensor contains infinite or NaN!"
lowercase , lowercase = box_size
tensor[:, 0].clamp_(min=0 , max=lowerCAmelCase_ )
tensor[:, 1].clamp_(min=0 , max=lowerCAmelCase_ )
tensor[:, 2].clamp_(min=0 , max=lowerCAmelCase_ )
tensor[:, 3].clamp_(min=0 , max=lowerCAmelCase_ )
| 310
| 0
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE : List[str] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : Tuple = {
"facebook/s2t-small-librispeech-asr": (
"https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/config.json"
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech_to_text
}
class UpperCamelCase ( __a ):
a__ :Optional[Any] = '''speech_to_text'''
a__ :List[str] = ['''past_key_values''']
a__ :str = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__(self , __UpperCamelCase=10_000 , __UpperCamelCase=12 , __UpperCamelCase=2_048 , __UpperCamelCase=4 , __UpperCamelCase=6 , __UpperCamelCase=2_048 , __UpperCamelCase=4 , __UpperCamelCase=0.0 , __UpperCamelCase=0.0 , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase="relu" , __UpperCamelCase=256 , __UpperCamelCase=0.1 , __UpperCamelCase=0.0 , __UpperCamelCase=0.0 , __UpperCamelCase=0.02 , __UpperCamelCase=2 , __UpperCamelCase=True , __UpperCamelCase=1 , __UpperCamelCase=0 , __UpperCamelCase=2 , __UpperCamelCase=6_000 , __UpperCamelCase=1_024 , __UpperCamelCase=2 , __UpperCamelCase=(5, 5) , __UpperCamelCase=1_024 , __UpperCamelCase=80 , __UpperCamelCase=1 , **__UpperCamelCase , ) -> Union[str, Any]:
UpperCamelCase_ : Any = vocab_size
UpperCamelCase_ : Dict = d_model
UpperCamelCase_ : Any = encoder_ffn_dim
UpperCamelCase_ : int = encoder_layers
UpperCamelCase_ : Any = encoder_attention_heads
UpperCamelCase_ : List[Any] = decoder_ffn_dim
UpperCamelCase_ : Union[str, Any] = decoder_layers
UpperCamelCase_ : Optional[int] = decoder_attention_heads
UpperCamelCase_ : Dict = dropout
UpperCamelCase_ : int = attention_dropout
UpperCamelCase_ : List[str] = activation_dropout
UpperCamelCase_ : int = activation_function
UpperCamelCase_ : Tuple = init_std
UpperCamelCase_ : Dict = encoder_layerdrop
UpperCamelCase_ : List[str] = decoder_layerdrop
UpperCamelCase_ : List[Any] = use_cache
UpperCamelCase_ : int = encoder_layers
UpperCamelCase_ : Union[str, Any] = scale_embedding # scale factor will be sqrt(d_model) if True
UpperCamelCase_ : Optional[Any] = max_source_positions
UpperCamelCase_ : List[Any] = max_target_positions
UpperCamelCase_ : str = num_conv_layers
UpperCamelCase_ : List[Any] = list(__UpperCamelCase )
UpperCamelCase_ : List[str] = conv_channels
UpperCamelCase_ : Optional[Any] = input_feat_per_channel
UpperCamelCase_ : str = input_channels
if len(self.conv_kernel_sizes ) != self.num_conv_layers:
raise ValueError(
"""Configuration for convolutional module is incorrect. """
"""It is required that `len(config.conv_kernel_sizes)` == `config.num_conv_layers` """
f'''but is `len(config.conv_kernel_sizes) = {len(self.conv_kernel_sizes )}`, '''
f'''`config.num_conv_layers = {self.num_conv_layers}`.''' )
super().__init__(
pad_token_id=__UpperCamelCase , bos_token_id=__UpperCamelCase , eos_token_id=__UpperCamelCase , is_encoder_decoder=__UpperCamelCase , decoder_start_token_id=__UpperCamelCase , **__UpperCamelCase , )
| 705
|
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
from typing import List
from unittest.mock import Mock
import torch
from torch.utils.data import DataLoader, IterableDataset, TensorDataset
from accelerate.accelerator import Accelerator
from accelerate.utils.dataclasses import DistributedType
class UpperCamelCase ( __a ):
def __init__(self , __UpperCamelCase ) -> Tuple:
UpperCamelCase_ : str = data
def __iter__(self ) -> Tuple:
for element in self.data:
yield element
def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE : Union[str, Any]=True ):
UpperCamelCase_ : Dict = Accelerator(even_batches=_SCREAMING_SNAKE_CASE )
assert accelerator.num_processes == 2, "this script expects that two GPUs are available"
return accelerator
def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE : Accelerator , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : bool = False ):
if iterable:
UpperCamelCase_ : Any = DummyIterableDataset(torch.as_tensor(range(_SCREAMING_SNAKE_CASE ) ) )
else:
UpperCamelCase_ : Optional[int] = TensorDataset(torch.as_tensor(range(_SCREAMING_SNAKE_CASE ) ) )
UpperCamelCase_ : int = DataLoader(_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE )
UpperCamelCase_ : int = accelerator.prepare(_SCREAMING_SNAKE_CASE )
return dl
def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE : Accelerator , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : List[int] , _SCREAMING_SNAKE_CASE : List[int] , ):
UpperCamelCase_ : int = create_dataloader(accelerator=_SCREAMING_SNAKE_CASE , dataset_size=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE )
UpperCamelCase_ : Any = [len(batch[0] ) for batch in dl]
if accelerator.process_index == 0:
assert batch_sizes == process_0_expected_batch_sizes
elif accelerator.process_index == 1:
assert batch_sizes == process_1_expected_batch_sizes
def lowerCAmelCase_ ( ):
UpperCamelCase_ : Tuple = create_accelerator()
# without padding, we would expect a different number of batches
verify_dataloader_batch_sizes(
_SCREAMING_SNAKE_CASE , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1, 1] , )
# without padding, we would expect the same number of batches, but different sizes
verify_dataloader_batch_sizes(
_SCREAMING_SNAKE_CASE , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 2] , )
def lowerCAmelCase_ ( ):
UpperCamelCase_ : Optional[int] = create_accelerator(even_batches=_SCREAMING_SNAKE_CASE )
verify_dataloader_batch_sizes(
_SCREAMING_SNAKE_CASE , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1] , )
verify_dataloader_batch_sizes(
_SCREAMING_SNAKE_CASE , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 1] , )
def lowerCAmelCase_ ( ):
UpperCamelCase_ : Tuple = create_accelerator(even_batches=_SCREAMING_SNAKE_CASE )
UpperCamelCase_ : Union[str, Any] = torch.nn.Linear(1 , 1 )
UpperCamelCase_ : Union[str, Any] = accelerator.prepare(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ : Tuple = create_dataloader(_SCREAMING_SNAKE_CASE , dataset_size=3 , batch_size=1 )
UpperCamelCase_ : List[str] = []
with accelerator.join_uneven_inputs([ddp_model] ):
for batch_idx, batch in enumerate(_SCREAMING_SNAKE_CASE ):
UpperCamelCase_ : int = ddp_model(batch[0].float() )
UpperCamelCase_ : Optional[Any] = output.sum()
loss.backward()
batch_idxs.append(_SCREAMING_SNAKE_CASE )
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
assert batch_idxs == [0, 1]
elif accelerator.process_index == 1:
assert batch_idxs == [0]
def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE : str ):
with warnings.catch_warnings(record=_SCREAMING_SNAKE_CASE ) as w:
with accelerator.join_uneven_inputs([Mock()] ):
pass
assert issubclass(w[-1].category , _SCREAMING_SNAKE_CASE )
assert "only supported for multi-GPU" in str(w[-1].message )
def lowerCAmelCase_ ( ):
UpperCamelCase_ : Optional[int] = True
UpperCamelCase_ : Dict = False
UpperCamelCase_ : Any = create_accelerator(even_batches=_SCREAMING_SNAKE_CASE )
UpperCamelCase_ : Optional[int] = torch.nn.Linear(1 , 1 )
UpperCamelCase_ : Dict = accelerator.prepare(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ : Any = create_dataloader(_SCREAMING_SNAKE_CASE , dataset_size=3 , batch_size=1 )
UpperCamelCase_ : List[str] = create_dataloader(_SCREAMING_SNAKE_CASE , dataset_size=3 , batch_size=1 )
with accelerator.join_uneven_inputs([ddp_model] , even_batches=_SCREAMING_SNAKE_CASE ):
UpperCamelCase_ : Tuple = train_dl.batch_sampler.even_batches
UpperCamelCase_ : int = valid_dl.batch_sampler.even_batches
assert train_dl_overridden_value == overridden_even_batches
assert valid_dl_overridden_value == overridden_even_batches
assert train_dl.batch_sampler.even_batches == default_even_batches
assert valid_dl.batch_sampler.even_batches == default_even_batches
def lowerCAmelCase_ ( ):
UpperCamelCase_ : List[Any] = True
UpperCamelCase_ : Any = False
UpperCamelCase_ : Tuple = create_accelerator(even_batches=_SCREAMING_SNAKE_CASE )
UpperCamelCase_ : int = torch.nn.Linear(1 , 1 )
UpperCamelCase_ : List[str] = accelerator.prepare(_SCREAMING_SNAKE_CASE )
create_dataloader(_SCREAMING_SNAKE_CASE , dataset_size=3 , batch_size=1 , iterable=_SCREAMING_SNAKE_CASE )
UpperCamelCase_ : int = create_dataloader(_SCREAMING_SNAKE_CASE , dataset_size=3 , batch_size=1 )
with warnings.catch_warnings():
warnings.filterwarnings("""ignore""" )
try:
with accelerator.join_uneven_inputs([ddp_model] , even_batches=_SCREAMING_SNAKE_CASE ):
UpperCamelCase_ : List[str] = batch_dl.batch_sampler.even_batches
except AttributeError:
# ensure attribute error is not raised when processing iterable dl
raise AssertionError
assert batch_dl_overridden_value == overridden_even_batches
assert batch_dl.batch_sampler.even_batches == default_even_batches
def lowerCAmelCase_ ( ):
UpperCamelCase_ : Dict = create_accelerator()
UpperCamelCase_ : int = torch.nn.Linear(1 , 1 )
UpperCamelCase_ : List[Any] = accelerator.prepare(_SCREAMING_SNAKE_CASE )
create_dataloader(_SCREAMING_SNAKE_CASE , dataset_size=3 , batch_size=1 , iterable=_SCREAMING_SNAKE_CASE )
with warnings.catch_warnings(record=_SCREAMING_SNAKE_CASE ) as w:
with accelerator.join_uneven_inputs([ddp_model] , even_batches=_SCREAMING_SNAKE_CASE ):
pass
assert issubclass(w[-1].category , _SCREAMING_SNAKE_CASE )
assert "only supported for map-style datasets" in str(w[-1].message )
def lowerCAmelCase_ ( ):
UpperCamelCase_ : Dict = create_accelerator()
accelerator.print("""Test that even_batches variable ensures uniform batches across processes""" )
test_default_ensures_even_batch_sizes()
accelerator.print("""Run tests with even_batches disabled""" )
test_can_disable_even_batches()
accelerator.print("""Test joining uneven inputs""" )
test_can_join_uneven_inputs()
accelerator.print("""Test overriding even_batches when joining uneven inputs""" )
test_join_can_override_even_batches()
accelerator.print("""Test overriding even_batches for mixed dataloader types""" )
test_join_can_override_for_mixed_type_dataloaders()
accelerator.print("""Test overriding even_batches raises a warning for iterable dataloaders""" )
test_join_raises_warning_for_iterable_when_overriding_even_batches()
accelerator.print("""Test join with non DDP distributed raises warning""" )
UpperCamelCase_ : str = accelerator.state.distributed_type
UpperCamelCase_ : int = DistributedType.FSDP
test_join_raises_warning_for_non_ddp_distributed(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ : Tuple = original_state
if __name__ == "__main__":
main()
| 138
| 0
|
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from ...utils.dataclasses import (
ComputeEnvironment,
DistributedType,
DynamoBackend,
PrecisionType,
SageMakerDistributedType,
)
from ..menu import BulletMenu
A_ : Any =[
"""EAGER""",
"""AOT_EAGER""",
"""INDUCTOR""",
"""NVFUSER""",
"""AOT_NVFUSER""",
"""AOT_CUDAGRAPHS""",
"""OFI""",
"""FX2TRT""",
"""ONNXRT""",
"""IPEX""",
]
def lowerCamelCase_ ( UpperCAmelCase__ , UpperCAmelCase__=None , UpperCAmelCase__=None , UpperCAmelCase__=None ):
"""simple docstring"""
a_ = True
while ask_again:
a_ = input(UpperCAmelCase__ )
try:
if default is not None and len(UpperCAmelCase__ ) == 0:
return default
return convert_value(UpperCAmelCase__ ) if convert_value is not None else result
except Exception:
if error_message is not None:
print(UpperCAmelCase__ )
def lowerCamelCase_ ( UpperCAmelCase__ , UpperCAmelCase__=[] , UpperCAmelCase__=None , UpperCAmelCase__=0 ):
"""simple docstring"""
a_ = BulletMenu(UpperCAmelCase__ , UpperCAmelCase__ )
a_ = menu.run(default_choice=UpperCAmelCase__ )
return convert_value(UpperCAmelCase__ ) if convert_value is not None else result
def lowerCamelCase_ ( UpperCAmelCase__ ):
"""simple docstring"""
a_ = int(UpperCAmelCase__ )
return ComputeEnvironment(["""LOCAL_MACHINE""", """AMAZON_SAGEMAKER"""][value] )
def lowerCamelCase_ ( UpperCAmelCase__ ):
"""simple docstring"""
a_ = int(UpperCAmelCase__ )
return DistributedType(["""NO""", """MULTI_CPU""", """MULTI_XPU""", """MULTI_GPU""", """MULTI_NPU""", """TPU"""][value] )
def lowerCamelCase_ ( UpperCAmelCase__ ):
"""simple docstring"""
a_ = int(UpperCAmelCase__ )
return DynamoBackend(DYNAMO_BACKENDS[value] ).value
def lowerCamelCase_ ( UpperCAmelCase__ ):
"""simple docstring"""
a_ = int(UpperCAmelCase__ )
return PrecisionType(["""no""", """fp16""", """bf16""", """fp8"""][value] )
def lowerCamelCase_ ( UpperCAmelCase__ ):
"""simple docstring"""
a_ = int(UpperCAmelCase__ )
return SageMakerDistributedType(["""NO""", """DATA_PARALLEL""", """MODEL_PARALLEL"""][value] )
def lowerCamelCase_ ( UpperCAmelCase__ ):
"""simple docstring"""
return {"yes": True, "no": False}[value.lower()]
class lowercase_ ( argparse.RawDescriptionHelpFormatter):
"""simple docstring"""
def lowercase__ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
a_ = super()._format_usage(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
a_ = usage.replace("""<command> [<args>] """ , """""" )
return usage
| 483
|
import sys
import webbrowser
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
print("""Googling.....""")
A_ : List[str] ="""https://www.google.com/search?q=""" + """ """.join(sys.argv[1:])
A_ : int =requests.get(url, headers={"""UserAgent""": UserAgent().random})
# res.raise_for_status()
with open("""project1a.html""", """wb""") as out_file: # only for knowing the class
for data in res.iter_content(10000):
out_file.write(data)
A_ : Tuple =BeautifulSoup(res.text, """html.parser""")
A_ : Optional[Any] =list(soup.select(""".eZt8xd"""))[:5]
print(len(links))
for link in links:
if link.text == "Maps":
webbrowser.open(link.get("""href"""))
else:
webbrowser.open(F'''https://google.com{link.get('href')}''')
| 483
| 1
|
"""simple docstring"""
import fire
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoTokenizer
from utils import SeqaSeqDataset, pickle_save
def lowercase__ ( snake_case_ :str , snake_case_ :int , snake_case_ :Any=1_024 , snake_case_ :Dict=1_024 , snake_case_ :int=False , **snake_case_ :int ):
__UpperCAmelCase = AutoTokenizer.from_pretrained(lowerCAmelCase_ )
__UpperCAmelCase = SeqaSeqDataset(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , type_path='''train''' , **lowerCAmelCase_ )
__UpperCAmelCase = tok.pad_token_id
def get_lens(snake_case_ :Dict ):
__UpperCAmelCase = tqdm(
DataLoader(lowerCAmelCase_ , batch_size=512 , num_workers=8 , shuffle=lowerCAmelCase_ , collate_fn=ds.collate_fn ) , desc=str(ds.len_file ) , )
__UpperCAmelCase = []
for batch in dl:
__UpperCAmelCase = batch['''input_ids'''].ne(lowerCAmelCase_ ).sum(1 ).tolist()
__UpperCAmelCase = batch['''labels'''].ne(lowerCAmelCase_ ).sum(1 ).tolist()
if consider_target:
for src, tgt in zip(lowerCAmelCase_ , lowerCAmelCase_ ):
max_lens.append(max(lowerCAmelCase_ , lowerCAmelCase_ ) )
else:
max_lens.extend(lowerCAmelCase_ )
return max_lens
__UpperCAmelCase = get_lens(lowerCAmelCase_ )
__UpperCAmelCase = SeqaSeqDataset(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , type_path='''val''' , **lowerCAmelCase_ )
__UpperCAmelCase = get_lens(lowerCAmelCase_ )
pickle_save(lowerCAmelCase_ , train_ds.len_file )
pickle_save(lowerCAmelCase_ , val_ds.len_file )
if __name__ == "__main__":
fire.Fire(save_len_file)
| 703
|
"""simple docstring"""
import pytest
import requests
from datasets.utils.file_utils import http_head
from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline
@pytest.mark.integration
def lowercase__ ( ):
with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ):
with pytest.raises(snake_case_ ):
requests.request('''GET''' , '''https://huggingface.co''' )
with pytest.raises(requests.exceptions.ConnectTimeout ):
requests.request('''GET''' , '''https://huggingface.co''' , timeout=1.0 )
@pytest.mark.integration
def lowercase__ ( ):
with offline(OfflineSimulationMode.CONNECTION_FAILS ):
with pytest.raises(requests.exceptions.ConnectionError ):
requests.request('''GET''' , '''https://huggingface.co''' )
def lowercase__ ( ):
with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ):
with pytest.raises(snake_case_ ):
http_head('''https://huggingface.co''' )
| 397
| 0
|
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class UpperCamelCase_ ( unittest.TestCase):
"""simple docstring"""
def __init__( self : Union[str, Any] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : List[str]=7 , UpperCAmelCase__ : Optional[int]=3 , UpperCAmelCase__ : Optional[int]=1_8 , UpperCAmelCase__ : Any=3_0 , UpperCAmelCase__ : Any=4_0_0 , UpperCAmelCase__ : List[str]=True , UpperCAmelCase__ : Dict=None , UpperCAmelCase__ : Dict=True , ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE = size if size is not None else {"height": 1_8, "width": 1_8}
__SCREAMING_SNAKE_CASE = parent
__SCREAMING_SNAKE_CASE = batch_size
__SCREAMING_SNAKE_CASE = num_channels
__SCREAMING_SNAKE_CASE = image_size
__SCREAMING_SNAKE_CASE = min_resolution
__SCREAMING_SNAKE_CASE = max_resolution
__SCREAMING_SNAKE_CASE = do_resize
__SCREAMING_SNAKE_CASE = size
__SCREAMING_SNAKE_CASE = apply_ocr
def UpperCAmelCase_ ( self : List[Any] ) -> List[str]:
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class UpperCamelCase_ ( UpperCamelCase , unittest.TestCase):
"""simple docstring"""
snake_case__ : Tuple = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def UpperCAmelCase_ ( self : Any ) -> List[str]:
__SCREAMING_SNAKE_CASE = LayoutLMvaImageProcessingTester(self )
@property
def UpperCAmelCase_ ( self : Optional[int] ) -> int:
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase_ ( self : Optional[Any] ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCAmelCase__ , "do_resize" ) )
self.assertTrue(hasattr(UpperCAmelCase__ , "size" ) )
self.assertTrue(hasattr(UpperCAmelCase__ , "apply_ocr" ) )
def UpperCAmelCase_ ( self : Optional[Any] ) -> Optional[int]:
__SCREAMING_SNAKE_CASE = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"height": 1_8, "width": 1_8} )
__SCREAMING_SNAKE_CASE = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 )
self.assertEqual(image_processor.size , {"height": 4_2, "width": 4_2} )
def UpperCAmelCase_ ( self : str ) -> Dict:
pass
def UpperCAmelCase_ ( self : Dict ) -> Union[str, Any]:
# Initialize image_processing
__SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase__ , Image.Image )
# Test not batched input
__SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] , return_tensors="pt" )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
self.assertIsInstance(encoding.words , UpperCAmelCase__ )
self.assertIsInstance(encoding.boxes , UpperCAmelCase__ )
# Test batched
__SCREAMING_SNAKE_CASE = image_processing(UpperCAmelCase__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def UpperCAmelCase_ ( self : Tuple ) -> List[Any]:
# Initialize image_processing
__SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ , numpify=UpperCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase__ , np.ndarray )
# Test not batched input
__SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
__SCREAMING_SNAKE_CASE = image_processing(UpperCAmelCase__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def UpperCAmelCase_ ( self : Optional[int] ) -> List[Any]:
# Initialize image_processing
__SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ , torchify=UpperCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase__ , torch.Tensor )
# Test not batched input
__SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
__SCREAMING_SNAKE_CASE = image_processing(UpperCAmelCase__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def UpperCAmelCase_ ( self : Dict ) -> List[str]:
# with apply_OCR = True
__SCREAMING_SNAKE_CASE = LayoutLMvaImageProcessor()
from datasets import load_dataset
__SCREAMING_SNAKE_CASE = load_dataset("hf-internal-testing/fixtures_docvqa" , split="test" )
__SCREAMING_SNAKE_CASE = Image.open(ds[0]["file"] ).convert("RGB" )
__SCREAMING_SNAKE_CASE = image_processing(UpperCAmelCase__ , return_tensors="pt" )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_2_4, 2_2_4) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
__SCREAMING_SNAKE_CASE = [["11:14", "to", "11:39", "a.m", "11:39", "to", "11:44", "a.m.", "11:44", "a.m.", "to", "12:25", "p.m.", "12:25", "to", "12:58", "p.m.", "12:58", "to", "4:00", "p.m.", "2:00", "to", "5:00", "p.m.", "Coffee", "Break", "Coffee", "will", "be", "served", "for", "men", "and", "women", "in", "the", "lobby", "adjacent", "to", "exhibit", "area.", "Please", "move", "into", "exhibit", "area.", "(Exhibits", "Open)", "TRRF", "GENERAL", "SESSION", "(PART", "|)", "Presiding:", "Lee", "A.", "Waller", "TRRF", "Vice", "President", "“Introductory", "Remarks”", "Lee", "A.", "Waller,", "TRRF", "Vice", "Presi-", "dent", "Individual", "Interviews", "with", "TRRF", "Public", "Board", "Members", "and", "Sci-", "entific", "Advisory", "Council", "Mem-", "bers", "Conducted", "by", "TRRF", "Treasurer", "Philip", "G.", "Kuehn", "to", "get", "answers", "which", "the", "public", "refrigerated", "warehousing", "industry", "is", "looking", "for.", "Plus", "questions", "from", "the", "floor.", "Dr.", "Emil", "M.", "Mrak,", "University", "of", "Cal-", "ifornia,", "Chairman,", "TRRF", "Board;", "Sam", "R.", "Cecil,", "University", "of", "Georgia", "College", "of", "Agriculture;", "Dr.", "Stanley", "Charm,", "Tufts", "University", "School", "of", "Medicine;", "Dr.", "Robert", "H.", "Cotton,", "ITT", "Continental", "Baking", "Company;", "Dr.", "Owen", "Fennema,", "University", "of", "Wis-", "consin;", "Dr.", "Robert", "E.", "Hardenburg,", "USDA.", "Questions", "and", "Answers", "Exhibits", "Open", "Capt.", "Jack", "Stoney", "Room", "TRRF", "Scientific", "Advisory", "Council", "Meeting", "Ballroom", "Foyer"]] # noqa: E231
__SCREAMING_SNAKE_CASE = [[[1_4_1, 5_7, 2_1_4, 6_9], [2_2_8, 5_8, 2_5_2, 6_9], [1_4_1, 7_5, 2_1_6, 8_8], [2_3_0, 7_9, 2_8_0, 8_8], [1_4_2, 2_6_0, 2_1_8, 2_7_3], [2_3_0, 2_6_1, 2_5_5, 2_7_3], [1_4_3, 2_7_9, 2_1_8, 2_9_0], [2_3_1, 2_8_2, 2_9_0, 2_9_1], [1_4_3, 3_4_2, 2_1_8, 3_5_4], [2_3_1, 3_4_5, 2_8_9, 3_5_5], [2_0_2, 3_6_2, 2_2_7, 3_7_3], [1_4_3, 3_7_9, 2_2_0, 3_9_2], [2_3_1, 3_8_2, 2_9_1, 3_9_4], [1_4_4, 7_1_4, 2_2_0, 7_2_6], [2_3_1, 7_1_5, 2_5_6, 7_2_6], [1_4_4, 7_3_2, 2_2_0, 7_4_5], [2_3_2, 7_3_6, 2_9_1, 7_4_7], [1_4_4, 7_6_9, 2_1_8, 7_8_2], [2_3_1, 7_7_0, 2_5_6, 7_8_2], [1_4_1, 7_8_8, 2_0_2, 8_0_1], [2_1_5, 7_9_1, 2_7_4, 8_0_4], [1_4_3, 8_2_6, 2_0_4, 8_3_8], [2_1_5, 8_2_6, 2_4_0, 8_3_8], [1_4_2, 8_4_4, 2_0_2, 8_5_7], [2_1_5, 8_4_7, 2_7_4, 8_5_9], [3_3_4, 5_7, 4_2_7, 6_9], [4_4_0, 5_7, 5_2_2, 6_9], [3_6_9, 7_5, 4_6_1, 8_8], [4_6_9, 7_5, 5_1_6, 8_8], [5_2_8, 7_6, 5_6_2, 8_8], [5_7_0, 7_6, 6_6_7, 8_8], [6_7_5, 7_5, 7_1_1, 8_7], [7_2_1, 7_9, 7_7_8, 8_8], [7_8_9, 7_5, 8_4_0, 8_8], [3_6_9, 9_7, 4_7_0, 1_0_7], [4_8_4, 9_4, 5_0_7, 1_0_6], [5_1_8, 9_4, 5_6_2, 1_0_7], [5_7_6, 9_4, 6_5_5, 1_1_0], [6_6_8, 9_4, 7_9_2, 1_0_9], [8_0_4, 9_5, 8_2_9, 1_0_7], [3_6_9, 1_1_3, 4_6_5, 1_2_5], [4_7_7, 1_1_6, 5_4_7, 1_2_5], [5_6_2, 1_1_3, 6_5_8, 1_2_5], [6_7_1, 1_1_6, 7_4_8, 1_2_5], [7_6_1, 1_1_3, 8_1_1, 1_2_5], [3_6_9, 1_3_1, 4_6_5, 1_4_3], [4_7_7, 1_3_3, 5_4_8, 1_4_3], [5_6_3, 1_3_0, 6_9_8, 1_4_5], [7_1_0, 1_3_0, 8_0_2, 1_4_6], [3_3_6, 1_7_1, 4_1_2, 1_8_3], [4_2_3, 1_7_1, 5_7_2, 1_8_3], [5_8_2, 1_7_0, 7_1_6, 1_8_4], [7_2_8, 1_7_1, 8_1_7, 1_8_7], [8_2_9, 1_7_1, 8_4_4, 1_8_6], [3_3_8, 1_9_7, 4_8_2, 2_1_2], [5_0_7, 1_9_6, 5_5_7, 2_0_9], [5_6_9, 1_9_6, 5_9_5, 2_0_8], [6_1_0, 1_9_6, 7_0_2, 2_0_9], [5_0_5, 2_1_4, 5_8_3, 2_2_6], [5_9_5, 2_1_4, 6_5_6, 2_2_7], [6_7_0, 2_1_5, 8_0_7, 2_2_7], [3_3_5, 2_5_9, 5_4_3, 2_7_4], [5_5_6, 2_5_9, 7_0_8, 2_7_2], [3_7_2, 2_7_9, 4_2_2, 2_9_1], [4_3_5, 2_7_9, 4_6_0, 2_9_1], [4_7_4, 2_7_9, 5_7_4, 2_9_2], [5_8_7, 2_7_8, 6_6_4, 2_9_1], [6_7_6, 2_7_8, 7_3_8, 2_9_1], [7_5_1, 2_7_9, 8_3_4, 2_9_1], [3_7_2, 2_9_8, 4_3_4, 3_1_0], [3_3_5, 3_4_1, 4_8_3, 3_5_4], [4_9_7, 3_4_1, 6_5_5, 3_5_4], [6_6_7, 3_4_1, 7_2_8, 3_5_4], [7_4_0, 3_4_1, 8_2_5, 3_5_4], [3_3_5, 3_6_0, 4_3_0, 3_7_2], [4_4_2, 3_6_0, 5_3_4, 3_7_2], [5_4_5, 3_5_9, 6_8_7, 3_7_2], [6_9_7, 3_6_0, 7_5_4, 3_7_2], [7_6_5, 3_6_0, 8_2_3, 3_7_3], [3_3_4, 3_7_8, 4_2_8, 3_9_1], [4_4_0, 3_7_8, 5_7_7, 3_9_4], [5_9_0, 3_7_8, 7_0_5, 3_9_1], [7_2_0, 3_7_8, 8_0_1, 3_9_1], [3_3_4, 3_9_7, 4_0_0, 4_0_9], [3_7_0, 4_1_6, 5_2_9, 4_2_9], [5_4_4, 4_1_6, 5_7_6, 4_3_2], [5_8_7, 4_1_6, 6_6_5, 4_2_8], [6_7_7, 4_1_6, 8_1_4, 4_2_9], [3_7_2, 4_3_5, 4_5_2, 4_5_0], [4_6_5, 4_3_4, 4_9_5, 4_4_7], [5_1_1, 4_3_4, 6_0_0, 4_4_7], [6_1_1, 4_3_6, 6_3_7, 4_4_7], [6_4_9, 4_3_6, 6_9_4, 4_5_1], [7_0_5, 4_3_8, 8_2_4, 4_4_7], [3_6_9, 4_5_3, 4_5_2, 4_6_6], [4_6_4, 4_5_4, 5_0_9, 4_6_6], [5_2_2, 4_5_3, 6_1_1, 4_6_9], [6_2_5, 4_5_3, 7_9_2, 4_6_9], [3_7_0, 4_7_2, 5_5_6, 4_8_8], [5_7_0, 4_7_2, 6_8_4, 4_8_7], [6_9_7, 4_7_2, 7_1_8, 4_8_5], [7_3_2, 4_7_2, 8_3_5, 4_8_8], [3_6_9, 4_9_0, 4_1_1, 5_0_3], [4_2_5, 4_9_0, 4_8_4, 5_0_3], [4_9_6, 4_9_0, 6_3_5, 5_0_6], [6_4_5, 4_9_0, 7_0_7, 5_0_3], [7_1_8, 4_9_1, 7_6_1, 5_0_3], [7_7_1, 4_9_0, 8_4_0, 5_0_3], [3_3_6, 5_1_0, 3_7_4, 5_2_1], [3_8_8, 5_1_0, 4_4_7, 5_2_2], [4_6_0, 5_1_0, 4_8_9, 5_2_1], [5_0_3, 5_1_0, 5_8_0, 5_2_2], [5_9_2, 5_0_9, 7_3_6, 5_2_5], [7_4_5, 5_0_9, 7_7_0, 5_2_2], [7_8_1, 5_0_9, 8_4_0, 5_2_2], [3_3_8, 5_2_8, 4_3_4, 5_4_1], [4_4_8, 5_2_8, 5_9_6, 5_4_1], [6_0_9, 5_2_7, 6_8_7, 5_4_0], [7_0_0, 5_2_8, 7_9_2, 5_4_1], [3_3_6, 5_4_6, 3_9_7, 5_5_9], [4_0_7, 5_4_6, 4_3_1, 5_5_9], [4_4_3, 5_4_6, 5_2_5, 5_6_0], [5_3_7, 5_4_6, 6_8_0, 5_6_2], [6_8_8, 5_4_6, 7_1_4, 5_5_9], [7_2_2, 5_4_6, 8_3_7, 5_6_2], [3_3_6, 5_6_5, 4_4_9, 5_8_1], [4_6_1, 5_6_5, 4_8_5, 5_7_7], [4_9_7, 5_6_5, 6_6_5, 5_8_1], [6_8_1, 5_6_5, 7_1_8, 5_7_7], [7_3_2, 5_6_5, 8_3_7, 5_8_0], [3_3_7, 5_8_4, 4_3_8, 5_9_7], [4_5_2, 5_8_3, 5_2_1, 5_9_6], [5_3_5, 5_8_4, 6_7_7, 5_9_9], [6_9_0, 5_8_3, 7_8_7, 5_9_6], [8_0_1, 5_8_3, 8_2_5, 5_9_6], [3_3_8, 6_0_2, 4_7_8, 6_1_5], [4_9_2, 6_0_2, 5_3_0, 6_1_4], [5_4_3, 6_0_2, 6_3_8, 6_1_5], [6_5_0, 6_0_2, 6_7_6, 6_1_4], [6_8_8, 6_0_2, 7_8_8, 6_1_5], [8_0_2, 6_0_2, 8_4_3, 6_1_4], [3_3_7, 6_2_1, 5_0_2, 6_3_3], [5_1_6, 6_2_1, 6_1_5, 6_3_7], [6_2_9, 6_2_1, 7_7_4, 6_3_6], [7_8_9, 6_2_1, 8_2_7, 6_3_3], [3_3_7, 6_3_9, 4_1_8, 6_5_2], [4_3_2, 6_4_0, 5_7_1, 6_5_3], [5_8_7, 6_3_9, 7_3_1, 6_5_5], [7_4_3, 6_3_9, 7_6_9, 6_5_2], [7_8_0, 6_3_9, 8_4_1, 6_5_2], [3_3_8, 6_5_8, 4_4_0, 6_7_3], [4_5_5, 6_5_8, 4_9_1, 6_7_0], [5_0_8, 6_5_8, 6_0_2, 6_7_1], [6_1_6, 6_5_8, 6_3_8, 6_7_0], [6_5_4, 6_5_8, 8_3_5, 6_7_4], [3_3_7, 6_7_7, 4_2_9, 6_8_9], [3_3_7, 7_1_4, 4_8_2, 7_2_6], [4_9_5, 7_1_4, 5_4_8, 7_2_6], [5_6_1, 7_1_4, 6_8_3, 7_2_6], [3_3_8, 7_7_0, 4_6_1, 7_8_2], [4_7_4, 7_6_9, 5_5_4, 7_8_5], [4_8_9, 7_8_8, 5_6_2, 8_0_3], [5_7_6, 7_8_8, 6_4_3, 8_0_1], [6_5_6, 7_8_7, 7_5_1, 8_0_4], [7_6_4, 7_8_8, 8_4_4, 8_0_1], [3_3_4, 8_2_5, 4_2_1, 8_3_8], [4_3_0, 8_2_4, 5_7_4, 8_3_8], [5_8_4, 8_2_4, 7_2_3, 8_4_1], [3_3_5, 8_4_4, 4_5_0, 8_5_7], [4_6_4, 8_4_3, 5_8_3, 8_6_0], [6_2_8, 8_6_2, 7_5_5, 8_7_5], [7_6_9, 8_6_1, 8_4_8, 8_7_8]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , UpperCAmelCase__ )
self.assertListEqual(encoding.boxes , UpperCAmelCase__ )
# with apply_OCR = False
__SCREAMING_SNAKE_CASE = LayoutLMvaImageProcessor(apply_ocr=UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = image_processing(UpperCAmelCase__ , return_tensors="pt" )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_2_4, 2_2_4) )
| 682
|
"""simple docstring"""
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
if edge <= 0 or not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
raise ValueError("Length must be a positive." )
return 3 * ((25 + 10 * (5 ** (1 / 2))) ** (1 / 2)) * (edge**2)
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
if edge <= 0 or not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
raise ValueError("Length must be a positive." )
return ((15 + (7 * (5 ** (1 / 2)))) / 4) * (edge**3)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 682
| 1
|
import argparse
import math
import traceback
import dateutil.parser as date_parser
import requests
def __UpperCamelCase ( _lowerCAmelCase ) -> str:
"""simple docstring"""
A : Tuple = {}
A : Optional[Any] = job["""started_at"""]
A : Union[str, Any] = job["""completed_at"""]
A : Tuple = date_parser.parse(SCREAMING_SNAKE_CASE_ )
A : Tuple = date_parser.parse(SCREAMING_SNAKE_CASE_ )
A : List[str] = round((end_datetime - start_datetime).total_seconds() / 60.0 )
A : int = start
A : List[Any] = end
A : Dict = duration_in_min
return job_info
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase=None ) -> str:
"""simple docstring"""
A : Dict = None
if token is not None:
A : Tuple = {"""Accept""": """application/vnd.github+json""", """Authorization""": f'''Bearer {token}'''}
A : Tuple = f'''https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100'''
A : Tuple = requests.get(SCREAMING_SNAKE_CASE_ , headers=SCREAMING_SNAKE_CASE_ ).json()
A : Any = {}
try:
job_time.update({job["""name"""]: extract_time_from_single_job(SCREAMING_SNAKE_CASE_ ) for job in result["""jobs"""]} )
A : Optional[Any] = math.ceil((result["""total_count"""] - 100) / 100 )
for i in range(SCREAMING_SNAKE_CASE_ ):
A : Tuple = requests.get(url + f'''&page={i + 2}''' , headers=SCREAMING_SNAKE_CASE_ ).json()
job_time.update({job["""name"""]: extract_time_from_single_job(SCREAMING_SNAKE_CASE_ ) for job in result["""jobs"""]} )
return job_time
except Exception:
print(f'''Unknown error, could not fetch links:\n{traceback.format_exc()}''' )
return {}
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_:int = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""--workflow_run_id""", type=str, required=True, help="""A GitHub Actions workflow run id.""")
SCREAMING_SNAKE_CASE_:int = parser.parse_args()
SCREAMING_SNAKE_CASE_:Optional[int] = get_job_time(args.workflow_run_id)
SCREAMING_SNAKE_CASE_:Optional[Any] = dict(sorted(job_time.items(), key=lambda item: item[1]["duration"], reverse=True))
for k, v in job_time.items():
print(F"""{k}: {v["duration"]}""")
| 700
|
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> Optional[Any]:
"""simple docstring"""
A : Dict = [0 for i in range(r + 1 )]
# nc0 = 1
A : Dict = 1
for i in range(1 , n + 1 ):
# to compute current row from previous row.
A : Any = min(_lowerCAmelCase , _lowerCAmelCase )
while j > 0:
c[j] += c[j - 1]
j -= 1
return c[r]
print(binomial_coefficient(n=10, r=5))
| 520
| 0
|
def lowerCamelCase__ ( ):
__UpperCAmelCase : Optional[Any] = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
__UpperCAmelCase : str = 6
__UpperCAmelCase : List[str] = 1
__UpperCAmelCase : Tuple = 1901
__UpperCAmelCase : Union[str, Any] = 0
while year < 2001:
day += 7
if (year % 4 == 0 and year % 100 != 0) or (year % 400 == 0):
if day > days_per_month[month - 1] and month != 2:
month += 1
__UpperCAmelCase : Optional[int] = day - days_per_month[month - 2]
elif day > 29 and month == 2:
month += 1
__UpperCAmelCase : int = day - 29
else:
if day > days_per_month[month - 1]:
month += 1
__UpperCAmelCase : Optional[int] = day - days_per_month[month - 2]
if month > 12:
year += 1
__UpperCAmelCase : List[Any] = 1
if year < 2001 and day == 1:
sundays += 1
return sundays
if __name__ == "__main__":
print(solution())
| 63
|
"""simple docstring"""
import math
from typing import Optional
import numpy as np
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ : int = logging.get_logger(__name__)
a_ : Union[str, Any] = {
'''facebook/encodec_24khz''': '''https://huggingface.co/facebook/encodec_24khz/resolve/main/config.json''',
'''facebook/encodec_48khz''': '''https://huggingface.co/facebook/encodec_48khz/resolve/main/config.json''',
}
class __lowercase( lowercase__ ):
'''simple docstring'''
__a : Any = 'encodec'
def __init__( self , __a=[1.5, 3.0, 6.0, 12.0, 24.0] , __a=24000 , __a=1 , __a=False , __a=None , __a=None , __a=128 , __a=32 , __a=1 , __a=[8, 5, 4, 2] , __a="weight_norm" , __a=7 , __a=7 , __a=3 , __a=2 , __a=True , __a="reflect" , __a=2 , __a=2 , __a=1.0 , __a=1024 , __a=None , __a=True , **__a , ):
__lowerCamelCase : Optional[int] = target_bandwidths
__lowerCamelCase : Dict = sampling_rate
__lowerCamelCase : Tuple = audio_channels
__lowerCamelCase : List[Any] = normalize
__lowerCamelCase : List[str] = chunk_length_s
__lowerCamelCase : Optional[int] = overlap
__lowerCamelCase : List[str] = hidden_size
__lowerCamelCase : Tuple = num_filters
__lowerCamelCase : Optional[Any] = num_residual_layers
__lowerCamelCase : List[Any] = upsampling_ratios
__lowerCamelCase : int = norm_type
__lowerCamelCase : str = kernel_size
__lowerCamelCase : Tuple = last_kernel_size
__lowerCamelCase : str = residual_kernel_size
__lowerCamelCase : Tuple = dilation_growth_rate
__lowerCamelCase : Any = use_causal_conv
__lowerCamelCase : str = pad_mode
__lowerCamelCase : List[str] = compress
__lowerCamelCase : int = num_lstm_layers
__lowerCamelCase : str = trim_right_ratio
__lowerCamelCase : Optional[int] = codebook_size
__lowerCamelCase : Any = codebook_dim if codebook_dim is not None else hidden_size
__lowerCamelCase : Tuple = use_conv_shortcut
if self.norm_type not in ["weight_norm", "time_group_norm"]:
raise ValueError(
f'''self.norm_type must be one of `"weight_norm"`, `"time_group_norm"`), got {self.norm_type}''' )
super().__init__(**__a )
@property
def snake_case_ ( self ):
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def snake_case_ ( self ):
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length ) )
@property
def snake_case_ ( self ):
__lowerCamelCase : str = np.prod(self.upsampling_ratios )
return math.ceil(self.sampling_rate / hop_length )
@property
def snake_case_ ( self ):
return int(1000 * self.target_bandwidths[-1] // (self.frame_rate * 10) )
| 594
| 0
|
'''simple docstring'''
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline # noqa: F401
deprecate(
"stable diffusion controlnet",
"0.22.0",
"Importing `FlaxStableDiffusionControlNetPipeline` from diffusers.pipelines.stable_diffusion.flax_pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import FlaxStableDiffusionControlNetPipeline` instead.",
standard_warn=False,
stacklevel=3,
)
| 10
|
'''simple docstring'''
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> List[str]:
"""simple docstring"""
__UpperCAmelCase : Dict = (boundary[1] - boundary[0]) / steps
__UpperCAmelCase : Tuple = boundary[0]
__UpperCAmelCase : List[str] = boundary[1]
__UpperCAmelCase : List[Any] = make_points(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
__UpperCAmelCase : int = 0.0
y += (h / 2.0) * f(lowerCamelCase__ )
for i in x_i:
# print(i)
y += h * f(lowerCamelCase__ )
y += (h / 2.0) * f(lowerCamelCase__ )
return y
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> List[Any]:
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = a + h
while x < (b - h):
yield x
__UpperCAmelCase : List[str] = x + h
def _lowercase ( lowerCamelCase__ ) -> Optional[Any]: # enter your function here
"""simple docstring"""
__UpperCAmelCase : str = (x - 0) * (x - 0)
return y
def _lowercase ( ) -> int:
"""simple docstring"""
__UpperCAmelCase : Tuple = 0.0 # Lower bound of integration
__UpperCAmelCase : Union[str, Any] = 1.0 # Upper bound of integration
__UpperCAmelCase : Union[str, Any] = 10.0 # define number of steps or resolution
__UpperCAmelCase : Dict = [a, b] # define boundary of integration
__UpperCAmelCase : Optional[int] = method_a(lowerCamelCase__ , lowerCamelCase__ )
print(f"""y = {y}""" )
if __name__ == "__main__":
main()
| 10
| 1
|
'''simple docstring'''
import math
from datetime import datetime, timedelta
def UpperCAmelCase__ ( UpperCAmelCase_ : int ) -> datetime:
__lowerCamelCase : Union[str, Any] = year % 19
__lowerCamelCase : List[str] = year % 4
__lowerCamelCase : Union[str, Any] = year % 7
__lowerCamelCase : Tuple = math.floor(year / 1_00 )
__lowerCamelCase : str = math.floor((13 + 8 * leap_day_inhibits) / 25 )
__lowerCamelCase : Dict = leap_day_inhibits / 4
__lowerCamelCase : Optional[Any] = (
15 - lunar_orbit_correction + leap_day_inhibits - leap_day_reinstall_number
) % 30
__lowerCamelCase : int = (4 + leap_day_inhibits - leap_day_reinstall_number) % 7
# days to be added to March 21
__lowerCamelCase : Dict = (19 * metonic_cycle + secular_moon_shift) % 30
# PHM -> Paschal Full Moon
__lowerCamelCase : Union[str, Any] = (
2 * julian_leap_year
+ 4 * non_leap_year
+ 6 * days_to_add
+ century_starting_point
) % 7
if days_to_add == 29 and days_from_phm_to_sunday == 6:
return datetime(UpperCAmelCase_ , 4 , 19 )
elif days_to_add == 28 and days_from_phm_to_sunday == 6:
return datetime(UpperCAmelCase_ , 4 , 18 )
else:
return datetime(UpperCAmelCase_ , 3 , 22 ) + timedelta(
days=int(days_to_add + days_from_phm_to_sunday ) )
if __name__ == "__main__":
for year in (1994, 2000, 2010, 2021, 2023):
A__ : Optional[int] = """will be""" if year > datetime.now().year else """was"""
print(f'''Easter in {year} {tense} {gauss_easter(year)}''')
| 13
|
'''simple docstring'''
import argparse
A__ : Optional[Any] = """docs/source/_static/js/custom.js"""
def UpperCAmelCase__ ( UpperCAmelCase_ : Optional[int] ) -> int:
with open(UpperCAmelCase_ , encoding='utf-8' , newline='\n' ) as f:
__lowerCamelCase : Dict = f.readlines()
__lowerCamelCase : Tuple = 0
# First let's put the right version
while not lines[index].startswith('const stableVersion =' ):
index += 1
__lowerCamelCase : Dict = F'const stableVersion = "v{version}"\n'
# Then update the dictionary
while not lines[index].startswith('const versionMapping = {' ):
index += 1
# We go until the end
while not lines[index].startswith('}' ):
index += 1
# We add the new version at the end
lines[index - 1] += F' "v{version}": "v{version}",\n'
with open(UpperCAmelCase_ , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.writelines(UpperCAmelCase_ )
if __name__ == "__main__":
A__ : str = argparse.ArgumentParser()
parser.add_argument("""--version""", help="""Release version.""")
A__ : Any = parser.parse_args()
update_custom_js(args.version)
| 13
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase = {
'configuration_clipseg': [
'CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP',
'CLIPSegConfig',
'CLIPSegTextConfig',
'CLIPSegVisionConfig',
],
'processing_clipseg': ['CLIPSegProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
'CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST',
'CLIPSegModel',
'CLIPSegPreTrainedModel',
'CLIPSegTextModel',
'CLIPSegVisionModel',
'CLIPSegForImageSegmentation',
]
if TYPE_CHECKING:
from .configuration_clipseg import (
CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPSegConfig,
CLIPSegTextConfig,
CLIPSegVisionConfig,
)
from .processing_clipseg import CLIPSegProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clipseg import (
CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPSegForImageSegmentation,
CLIPSegModel,
CLIPSegPreTrainedModel,
CLIPSegTextModel,
CLIPSegVisionModel,
)
else:
import sys
UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 720
|
import io
import json
import fsspec
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.json import JsonDatasetReader, JsonDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def A_ ( __a : List[str] , __a : Optional[int] ):
"""simple docstring"""
assert isinstance(__a , __a )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def A_ ( __a : Tuple , __a : Dict , __a : List[str] ):
"""simple docstring"""
a__ = tmp_path / """cache"""
a__ = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
a__ = JsonDatasetReader(__a , cache_dir=__a , keep_in_memory=__a ).read()
_check_json_dataset(__a , __a )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""},
{"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""},
{"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""},
{"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""},
] , )
def A_ ( __a : int , __a : Dict , __a : List[Any] ):
"""simple docstring"""
a__ = tmp_path / """cache"""
a__ = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
a__ = features.copy() if features else default_expected_features
a__ = (
Features({feature: Value(__a ) for feature, dtype in features.items()} ) if features is not None else None
)
a__ = JsonDatasetReader(__a , features=__a , cache_dir=__a ).read()
_check_json_dataset(__a , __a )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""col_3""": """float64""", """col_1""": """string""", """col_2""": """int64"""},
] , )
def A_ ( __a : Tuple , __a : Tuple , __a : Tuple ):
"""simple docstring"""
a__ = tmp_path / """cache"""
a__ = {"""col_3""": """float64""", """col_1""": """string""", """col_2""": """int64"""}
a__ = features.copy() if features else default_expected_features
a__ = (
Features({feature: Value(__a ) for feature, dtype in features.items()} ) if features is not None else None
)
a__ = JsonDatasetReader(__a , features=__a , cache_dir=__a ).read()
assert isinstance(__a , __a )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_3", "col_1", "col_2"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
def A_ ( __a : int , __a : Optional[Any] ):
"""simple docstring"""
# jsonl_312_path features are {"col_3": "float64", "col_1": "string", "col_2": "int64"}
a__ = {"""col_2""": """int64""", """col_3""": """float64""", """col_1""": """string"""}
a__ = features.copy()
a__ = (
Features({feature: Value(__a ) for feature, dtype in features.items()} ) if features is not None else None
)
a__ = tmp_path / """cache"""
a__ = JsonDatasetReader(__a , features=__a , cache_dir=__a ).read()
assert isinstance(__a , __a )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_2", "col_3", "col_1"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] )
def A_ ( __a : List[Any] , __a : int , __a : List[str] ):
"""simple docstring"""
a__ = tmp_path / """cache"""
a__ = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
a__ = JsonDatasetReader(__a , cache_dir=__a , split=__a ).read()
_check_json_dataset(__a , __a )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("""path_type""" , [str, list] )
def A_ ( __a : Dict , __a : Tuple , __a : List[str] ):
"""simple docstring"""
if issubclass(__a , __a ):
a__ = jsonl_path
elif issubclass(__a , __a ):
a__ = [jsonl_path]
a__ = tmp_path / """cache"""
a__ = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
a__ = JsonDatasetReader(__a , cache_dir=__a ).read()
_check_json_dataset(__a , __a )
def A_ ( __a : str , __a : List[Any] , __a : Dict=("train",) ):
"""simple docstring"""
assert isinstance(__a , __a )
for split in splits:
a__ = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def A_ ( __a : int , __a : int , __a : Dict ):
"""simple docstring"""
a__ = tmp_path / """cache"""
a__ = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
a__ = JsonDatasetReader({"""train""": jsonl_path} , cache_dir=__a , keep_in_memory=__a ).read()
_check_json_datasetdict(__a , __a )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""},
{"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""},
{"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""},
{"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""},
] , )
def A_ ( __a : Tuple , __a : Dict , __a : Optional[Any] ):
"""simple docstring"""
a__ = tmp_path / """cache"""
a__ = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
a__ = features.copy() if features else default_expected_features
a__ = (
Features({feature: Value(__a ) for feature, dtype in features.items()} ) if features is not None else None
)
a__ = JsonDatasetReader({"""train""": jsonl_path} , features=__a , cache_dir=__a ).read()
_check_json_datasetdict(__a , __a )
@pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] )
def A_ ( __a : Dict , __a : Any , __a : Tuple ):
"""simple docstring"""
if split:
a__ = {split: jsonl_path}
else:
a__ = """train"""
a__ = {"""train""": jsonl_path, """test""": jsonl_path}
a__ = tmp_path / """cache"""
a__ = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
a__ = JsonDatasetReader(__a , cache_dir=__a ).read()
_check_json_datasetdict(__a , __a , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def A_ ( __a : Any ):
"""simple docstring"""
return json.load(__a )
def A_ ( __a : Tuple ):
"""simple docstring"""
return [json.loads(__a ) for line in buffer]
class __snake_case :
'''simple docstring'''
@pytest.mark.parametrize("""lines, load_json_function""" , [(True, load_json_lines), (False, load_json)] )
def _a ( self , a_ , a_ , a_ ):
with io.BytesIO() as buffer:
JsonDatasetWriter(a_ , a_ , lines=a_ ).write()
buffer.seek(0 )
a__ = load_json_function(a_ )
assert isinstance(a_ , a_ )
assert isinstance(exported_content[0] , a_ )
assert len(a_ ) == 10
@pytest.mark.parametrize(
"""orient, container, keys, len_at""" , [
("""records""", list, {"""tokens""", """labels""", """answers""", """id"""}, None),
("""split""", dict, {"""columns""", """data"""}, """data"""),
("""index""", dict, set("""0123456789""" ), None),
("""columns""", dict, {"""tokens""", """labels""", """answers""", """id"""}, """tokens"""),
("""values""", list, None, None),
("""table""", dict, {"""schema""", """data"""}, """data"""),
] , )
def _a ( self , a_ , a_ , a_ , a_ , a_ ):
with io.BytesIO() as buffer:
JsonDatasetWriter(a_ , a_ , lines=a_ , orient=a_ ).write()
buffer.seek(0 )
a__ = load_json(a_ )
assert isinstance(a_ , a_ )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(a_ , """keys""" ) and not hasattr(exported_content[0] , """keys""" )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(a_ ) == 10
@pytest.mark.parametrize("""lines, load_json_function""" , [(True, load_json_lines), (False, load_json)] )
def _a ( self , a_ , a_ , a_ ):
with io.BytesIO() as buffer:
JsonDatasetWriter(a_ , a_ , lines=a_ , num_proc=2 ).write()
buffer.seek(0 )
a__ = load_json_function(a_ )
assert isinstance(a_ , a_ )
assert isinstance(exported_content[0] , a_ )
assert len(a_ ) == 10
@pytest.mark.parametrize(
"""orient, container, keys, len_at""" , [
("""records""", list, {"""tokens""", """labels""", """answers""", """id"""}, None),
("""split""", dict, {"""columns""", """data"""}, """data"""),
("""index""", dict, set("""0123456789""" ), None),
("""columns""", dict, {"""tokens""", """labels""", """answers""", """id"""}, """tokens"""),
("""values""", list, None, None),
("""table""", dict, {"""schema""", """data"""}, """data"""),
] , )
def _a ( self , a_ , a_ , a_ , a_ , a_ ):
with io.BytesIO() as buffer:
JsonDatasetWriter(a_ , a_ , lines=a_ , orient=a_ , num_proc=2 ).write()
buffer.seek(0 )
a__ = load_json(a_ )
assert isinstance(a_ , a_ )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(a_ , """keys""" ) and not hasattr(exported_content[0] , """keys""" )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(a_ ) == 10
def _a ( self , a_ ):
with pytest.raises(a_ ):
with io.BytesIO() as buffer:
JsonDatasetWriter(a_ , a_ , num_proc=0 )
@pytest.mark.parametrize("""compression, extension""" , [("""gzip""", """gz"""), ("""bz2""", """bz2"""), ("""xz""", """xz""")] )
def _a ( self , a_ , a_ , a_ , a_ , a_ ):
a__ = tmp_path_factory.mktemp("""data""" ) / F'''test.json.{extension}'''
a__ = str(shared_datadir / F'''test_file.json.{extension}''' )
JsonDatasetWriter(a_ , a_ , compression=a_ ).write()
with fsspec.open(a_ , """rb""" , compression="""infer""" ) as f:
a__ = f.read()
with fsspec.open(a_ , """rb""" , compression="""infer""" ) as f:
a__ = f.read()
assert exported_content == original_content
| 351
| 0
|
import logging
import random
import ray
from transformers import RagConfig, RagRetriever, RagTokenizer
from transformers.models.rag.retrieval_rag import CustomHFIndex
a_ = logging.getLogger(__name__)
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : List[str] ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = False
def __UpperCamelCase ( self : str , a : Dict , a : Union[str, Any] , a : Any , a : Union[str, Any] ) -> Dict:
"""simple docstring"""
if not self.initialized:
SCREAMING_SNAKE_CASE : int = RagRetriever(
_SCREAMING_SNAKE_CASE , question_encoder_tokenizer=_SCREAMING_SNAKE_CASE , generator_tokenizer=_SCREAMING_SNAKE_CASE , index=_SCREAMING_SNAKE_CASE , init_retrieval=_SCREAMING_SNAKE_CASE , )
SCREAMING_SNAKE_CASE : Optional[int] = True
def __UpperCamelCase ( self : Optional[Any] ) -> int:
"""simple docstring"""
self.retriever.index.init_index()
def __UpperCamelCase ( self : Union[str, Any] , a : List[Any] , a : Optional[Any] ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = self.retriever._main_retrieve(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return doc_ids, retrieved_doc_embeds
class _UpperCamelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
def __init__( self : List[Any] , a : Union[str, Any] , a : int , a : List[str] , a : Optional[Any] , a : List[Any]=None ) -> Optional[int]:
"""simple docstring"""
if index is not None and index.is_initialized() and len(_SCREAMING_SNAKE_CASE ) > 0:
raise ValueError(
"When using Ray for distributed fine-tuning, "
"you'll need to provide the paths instead, "
"as the dataset and the index are loaded "
"separately. More info in examples/rag/use_own_knowledge_dataset.py " )
super().__init__(
_SCREAMING_SNAKE_CASE , question_encoder_tokenizer=_SCREAMING_SNAKE_CASE , generator_tokenizer=_SCREAMING_SNAKE_CASE , index=_SCREAMING_SNAKE_CASE , init_retrieval=_SCREAMING_SNAKE_CASE , )
SCREAMING_SNAKE_CASE : Union[str, Any] = retrieval_workers
if len(self.retrieval_workers ) > 0:
ray.get(
[
worker.create_rag_retriever.remote(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
for worker in self.retrieval_workers
] )
def __UpperCamelCase ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
logger.info("initializing retrieval" )
if len(self.retrieval_workers ) > 0:
ray.get([worker.init_retrieval.remote() for worker in self.retrieval_workers] )
else:
# Non-distributed training. Load index into this same process.
self.index.init_index()
def __UpperCamelCase ( self : str , a : List[Any] , a : Any ) -> Dict:
"""simple docstring"""
if len(self.retrieval_workers ) > 0:
# Select a random retrieval actor.
SCREAMING_SNAKE_CASE : Union[str, Any] = self.retrieval_workers[random.randint(0 , len(self.retrieval_workers ) - 1 )]
SCREAMING_SNAKE_CASE : Tuple = ray.get(random_worker.retrieve.remote(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
else:
SCREAMING_SNAKE_CASE : Dict = self._main_retrieve(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(_SCREAMING_SNAKE_CASE )
@classmethod
def __UpperCamelCase ( cls : Union[str, Any] , a : int , a : Dict=None , **a : Dict ) -> Dict:
"""simple docstring"""
return super(_SCREAMING_SNAKE_CASE , cls ).get_tokenizers(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
@classmethod
def __UpperCamelCase ( cls : Dict , a : Dict , a : List[Any] , a : Union[str, Any]=None , **a : List[Any] ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = kwargs.pop("config" , _SCREAMING_SNAKE_CASE ) or RagConfig.from_pretrained(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : Dict = RagTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE , config=_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : Union[str, Any] = rag_tokenizer.question_encoder
SCREAMING_SNAKE_CASE : Optional[Any] = rag_tokenizer.generator
if indexed_dataset is not None:
SCREAMING_SNAKE_CASE : Optional[int] = "custom"
SCREAMING_SNAKE_CASE : int = CustomHFIndex(config.retrieval_vector_size , _SCREAMING_SNAKE_CASE )
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = cls._build_index(_SCREAMING_SNAKE_CASE )
return cls(
_SCREAMING_SNAKE_CASE , question_encoder_tokenizer=_SCREAMING_SNAKE_CASE , generator_tokenizer=_SCREAMING_SNAKE_CASE , retrieval_workers=_SCREAMING_SNAKE_CASE , index=_SCREAMING_SNAKE_CASE , )
| 25
|
def lowerCAmelCase__ ( _a : float , _a : float , _a : float , _a : float , _a : float , ):
snake_case_ : int = [redshift, radiation_density, matter_density, dark_energy]
if any(p < 0 for p in parameters ):
raise ValueError("All input parameters must be positive" )
if any(p > 1 for p in parameters[1:4] ):
raise ValueError("Relative densities cannot be greater than one" )
else:
snake_case_ : List[Any] = 1 - (matter_density + radiation_density + dark_energy)
snake_case_ : List[Any] = (
radiation_density * (redshift + 1) ** 4
+ matter_density * (redshift + 1) ** 3
+ curvature * (redshift + 1) ** 2
+ dark_energy
)
snake_case_ : Optional[int] = hubble_constant * e_a ** (1 / 2)
return hubble
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# demo LCDM approximation
lowercase : Union[str, Any] = 0.3
print(
hubble_parameter(
hubble_constant=68.3,
radiation_density=1e-4,
matter_density=matter_density,
dark_energy=1 - matter_density,
redshift=0,
)
)
| 568
| 0
|
'''simple docstring'''
from scipy.stats import pearsonr
import datasets
lowerCAmelCase__ = '\nPearson correlation coefficient and p-value for testing non-correlation.\nThe Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.\nThe p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.\n'
lowerCAmelCase__ = '\nArgs:\n predictions (`list` of `int`): Predicted class labels, as returned by a model.\n references (`list` of `int`): Ground truth labels.\n return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.\n\nReturns:\n pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.\n p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.\n\nExamples:\n\n Example 1-A simple example using only predictions and references.\n >>> pearsonr_metric = datasets.load_metric("pearsonr")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])\n >>> print(round(results[\'pearsonr\'], 2))\n -0.74\n\n Example 2-The same as Example 1, but that also returns the `p-value`.\n >>> pearsonr_metric = datasets.load_metric("pearsonr")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)\n >>> print(sorted(list(results.keys())))\n [\'p-value\', \'pearsonr\']\n >>> print(round(results[\'pearsonr\'], 2))\n -0.74\n >>> print(round(results[\'p-value\'], 2))\n 0.15\n'
lowerCAmelCase__ = '\n@article{2020SciPy-NMeth,\nauthor = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and\n Haberland, Matt and Reddy, Tyler and Cournapeau, David and\n Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and\n Bright, Jonathan and {van der Walt}, St{\'e}fan J. and\n Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and\n Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and\n Kern, Robert and Larson, Eric and Carey, C J and\n Polat, Ilhan and Feng, Yu and Moore, Eric W. and\n {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and\n Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and\n Harris, Charles R. and Archibald, Anne M. and\n Ribeiro, Antonio H. and Pedregosa, Fabian and\n {van Mulbregt}, Paul and {SciPy 1.0 Contributors}},\ntitle = {{{SciPy} 1.0: Fundamental Algorithms for Scientific\n Computing in Python}},\njournal = {Nature Methods},\nyear = {2020},\nvolume = {17},\npages = {261--272},\nadsurl = {https://rdcu.be/b08Wh},\ndoi = {10.1038/s41592-019-0686-2},\n}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class snake_case ( datasets.Metric ):
"""simple docstring"""
def snake_case__ ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("float" ),
"references": datasets.Value("float" ),
} ) , reference_urls=["https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html"] , )
def snake_case__ ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=False ):
if return_pvalue:
__lowercase = pearsonr(lowerCAmelCase_ , lowerCAmelCase_ )
return {"pearsonr": results[0], "p-value": results[1]}
else:
return {"pearsonr": float(pearsonr(lowerCAmelCase_ , lowerCAmelCase_ )[0] )}
| 720
|
import argparse
import requests
import torch
from PIL import Image
from transformers import SwinConfig, SwinForMaskedImageModeling, ViTImageProcessor
def __lowercase ( _UpperCAmelCase ) -> int:
'''simple docstring'''
__lowercase = SwinConfig(image_size=192 )
if "base" in model_name:
__lowercase = 6
__lowercase = 128
__lowercase = (2, 2, 18, 2)
__lowercase = (4, 8, 16, 32)
elif "large" in model_name:
__lowercase = 12
__lowercase = 192
__lowercase = (2, 2, 18, 2)
__lowercase = (6, 12, 24, 48)
else:
raise ValueError("Model not supported, only supports base and large variants" )
__lowercase = window_size
__lowercase = embed_dim
__lowercase = depths
__lowercase = num_heads
return config
def __lowercase ( _UpperCAmelCase ) -> List[str]:
'''simple docstring'''
if "encoder.mask_token" in name:
__lowercase = name.replace("encoder.mask_token" , "embeddings.mask_token" )
if "encoder.patch_embed.proj" in name:
__lowercase = name.replace("encoder.patch_embed.proj" , "embeddings.patch_embeddings.projection" )
if "encoder.patch_embed.norm" in name:
__lowercase = name.replace("encoder.patch_embed.norm" , "embeddings.norm" )
if "attn.proj" in name:
__lowercase = name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name:
__lowercase = name.replace("attn" , "attention.self" )
if "norm1" in name:
__lowercase = name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
__lowercase = name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
__lowercase = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
__lowercase = name.replace("mlp.fc2" , "output.dense" )
if name == "encoder.norm.weight":
__lowercase = "layernorm.weight"
if name == "encoder.norm.bias":
__lowercase = "layernorm.bias"
if "decoder" in name:
pass
else:
__lowercase = "swin." + name
return name
def __lowercase ( _UpperCAmelCase , _UpperCAmelCase ) -> Any:
'''simple docstring'''
for key in orig_state_dict.copy().keys():
__lowercase = orig_state_dict.pop(_UpperCAmelCase )
if "attn_mask" in key:
pass
elif "qkv" in key:
__lowercase = key.split("." )
__lowercase = int(key_split[2] )
__lowercase = int(key_split[4] )
__lowercase = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
__lowercase = val[:dim, :]
__lowercase = val[
dim : dim * 2, :
]
__lowercase = val[-dim:, :]
else:
__lowercase = val[
:dim
]
__lowercase = val[
dim : dim * 2
]
__lowercase = val[
-dim:
]
else:
__lowercase = val
return orig_state_dict
def __lowercase ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> int:
'''simple docstring'''
__lowercase = torch.load(_UpperCAmelCase , map_location="cpu" )["model"]
__lowercase = get_swin_config(_UpperCAmelCase )
__lowercase = SwinForMaskedImageModeling(_UpperCAmelCase )
model.eval()
__lowercase = convert_state_dict(_UpperCAmelCase , _UpperCAmelCase )
model.load_state_dict(_UpperCAmelCase )
__lowercase = "http://images.cocodataset.org/val2017/000000039769.jpg"
__lowercase = ViTImageProcessor(size={"height": 192, "width": 192} )
__lowercase = Image.open(requests.get(_UpperCAmelCase , stream=_UpperCAmelCase ).raw )
__lowercase = image_processor(images=_UpperCAmelCase , return_tensors="pt" )
with torch.no_grad():
__lowercase = model(**_UpperCAmelCase ).logits
print(outputs.keys() )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(_UpperCAmelCase )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(_UpperCAmelCase )
if push_to_hub:
print(f'''Pushing model and image processor for {model_name} to hub''' )
model.push_to_hub(f'''microsoft/{model_name}''' )
image_processor.push_to_hub(f'''microsoft/{model_name}''' )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='swin-base-simmim-window6-192',
type=str,
choices=['swin-base-simmim-window6-192', 'swin-large-simmim-window12-192'],
help='Name of the Swin SimMIM model you\'d like to convert.',
)
parser.add_argument(
'--checkpoint_path',
default='/Users/nielsrogge/Documents/SwinSimMIM/simmim_pretrain__swin_base__img192_window6__100ep.pth',
type=str,
help='Path to the original PyTorch checkpoint (.pth file).',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
lowerCAmelCase__ = parser.parse_args()
convert_swin_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 576
| 0
|
import re
from pathlib import Path
from unittest import TestCase
import pytest
@pytest.mark.integration
class _lowerCAmelCase ( __lowerCamelCase ):
"""simple docstring"""
def __lowerCAmelCase ( self : List[Any] , SCREAMING_SNAKE_CASE__ : str ):
"""simple docstring"""
with open(UpperCAmelCase_ , encoding='utf-8' ) as input_file:
UpperCamelCase = re.compile(r'(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)' )
UpperCamelCase = input_file.read()
UpperCamelCase = regexp.search(UpperCAmelCase_ )
return match
def __lowerCAmelCase ( self : str , SCREAMING_SNAKE_CASE__ : str ):
"""simple docstring"""
with open(UpperCAmelCase_ , encoding='utf-8' ) as input_file:
UpperCamelCase = re.compile(r'#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()' , re.DOTALL )
UpperCamelCase = input_file.read()
# use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search`
UpperCamelCase = regexp.finditer(UpperCAmelCase_ )
UpperCamelCase = [match for match in matches if match is not None and match.group(1 ) is not None]
return matches[0] if matches else None
def __lowerCAmelCase ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = Path('./datasets' )
UpperCamelCase = list(dataset_paths.absolute().glob('**/*.py' ) )
for dataset in dataset_files:
if self._no_encoding_on_file_open(str(UpperCAmelCase_ ) ):
raise AssertionError(F'open(...) must use utf-8 encoding in {dataset}' )
def __lowerCAmelCase ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase = Path('./datasets' )
UpperCamelCase = list(dataset_paths.absolute().glob('**/*.py' ) )
for dataset in dataset_files:
if self._no_print_statements(str(UpperCAmelCase_ ) ):
raise AssertionError(F'print statement found in {dataset}. Use datasets.logger/logging instead.' )
| 282
|
'''simple docstring'''
import os
import socket
from contextlib import contextmanager
import torch
from ..commands.config.default import write_basic_config # noqa: F401
from ..state import PartialState
from .dataclasses import DistributedType
from .imports import is_deepspeed_available, is_tpu_available
from .transformer_engine import convert_model
from .versions import is_torch_version
if is_deepspeed_available():
from deepspeed import DeepSpeedEngine
if is_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
def __UpperCAmelCase ( lowerCamelCase_) -> int:
if is_torch_version('<' , '2.0.0') or not hasattr(lowerCamelCase_ , '_dynamo'):
return False
return isinstance(lowerCamelCase_ , torch._dynamo.eval_frame.OptimizedModule)
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ = True) -> List[str]:
UpperCamelCase__ : Any = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel)
UpperCamelCase__ : List[Any] = is_compiled_module(lowerCamelCase_)
if is_compiled:
UpperCamelCase__ : Union[str, Any] = model
UpperCamelCase__ : Optional[Any] = model._orig_mod
if is_deepspeed_available():
options += (DeepSpeedEngine,)
while isinstance(lowerCamelCase_ , lowerCamelCase_):
UpperCamelCase__ : str = model.module
if not keep_fpaa_wrapper:
UpperCamelCase__ : Tuple = getattr(lowerCamelCase_ , 'forward')
UpperCamelCase__ : Tuple = model.__dict__.pop('_original_forward' , lowerCamelCase_)
if original_forward is not None:
while hasattr(lowerCamelCase_ , '__wrapped__'):
UpperCamelCase__ : Any = forward.__wrapped__
if forward == original_forward:
break
UpperCamelCase__ : Dict = forward
if getattr(lowerCamelCase_ , '_converted_to_transformer_engine' , lowerCamelCase_):
convert_model(lowerCamelCase_ , to_transformer_engine=lowerCamelCase_)
if is_compiled:
UpperCamelCase__ : List[str] = model
UpperCamelCase__ : List[str] = compiled_model
return model
def __UpperCAmelCase ( ) -> int:
PartialState().wait_for_everyone()
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_) -> Optional[int]:
if PartialState().distributed_type == DistributedType.TPU:
xm.save(lowerCamelCase_ , lowerCamelCase_)
elif PartialState().local_process_index == 0:
torch.save(lowerCamelCase_ , lowerCamelCase_)
@contextmanager
def __UpperCAmelCase ( **lowerCamelCase_) -> Any:
for key, value in kwargs.items():
UpperCamelCase__ : str = str(lowerCamelCase_)
yield
for key in kwargs:
if key.upper() in os.environ:
del os.environ[key.upper()]
def __UpperCAmelCase ( lowerCamelCase_) -> List[str]:
if not hasattr(lowerCamelCase_ , '__qualname__') and not hasattr(lowerCamelCase_ , '__name__'):
UpperCamelCase__ : Optional[Any] = getattr(lowerCamelCase_ , '__class__' , lowerCamelCase_)
if hasattr(lowerCamelCase_ , '__qualname__'):
return obj.__qualname__
if hasattr(lowerCamelCase_ , '__name__'):
return obj.__name__
return str(lowerCamelCase_)
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_) -> List[str]:
for key, value in source.items():
if isinstance(lowerCamelCase_ , lowerCamelCase_):
UpperCamelCase__ : List[Any] = destination.setdefault(lowerCamelCase_ , {})
merge_dicts(lowerCamelCase_ , lowerCamelCase_)
else:
UpperCamelCase__ : Union[str, Any] = value
return destination
def __UpperCAmelCase ( lowerCamelCase_ = None) -> bool:
if port is None:
UpperCamelCase__ : List[Any] = 29_500
with socket.socket(socket.AF_INET , socket.SOCK_STREAM) as s:
return s.connect_ex(('localhost', port)) == 0
| 596
| 0
|
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_tf_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_tf_available():
import tensorflow as tf
lowerCAmelCase_ = logging.get_logger(__name__)
@dataclass
class _lowerCAmelCase ( _lowercase ):
A__ = [
'no_inference',
'no_cuda',
'no_tpu',
'no_speed',
'no_memory',
'no_env_print',
'no_multi_process',
]
def __init__( self , **__UpperCAmelCase ):
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
lowerCAmelCase__ : Optional[int] = deprecated_arg[3:]
lowerCAmelCase__ : List[Any] = not kwargs.pop(__UpperCAmelCase )
logger.warning(
f"""{deprecated_arg} is depreciated. Please use --no-{positive_arg} or"""
f""" {positive_arg}={kwargs[positive_arg]}""" )
lowerCAmelCase__ : str = kwargs.pop('''tpu_name''' , self.tpu_name )
lowerCAmelCase__ : Union[str, Any] = kwargs.pop('''device_idx''' , self.device_idx )
lowerCAmelCase__ : Tuple = kwargs.pop('''eager_mode''' , self.eager_mode )
lowerCAmelCase__ : int = kwargs.pop('''use_xla''' , self.use_xla )
super().__init__(**__UpperCAmelCase )
A__ = field(
default=_lowercase , metadata={'help': 'Name of TPU'} , )
A__ = field(
default=0 , metadata={'help': 'CPU / GPU device index. Defaults to 0.'} , )
A__ = field(default=_lowercase , metadata={'help': 'Benchmark models in eager model.'} )
A__ = field(
default=_lowercase , metadata={
'help': 'Benchmark models using XLA JIT compilation. Note that `eager_model` has to be set to `False`.'
} , )
@cached_property
def __magic_name__( self ):
requires_backends(self , ['''tf'''] )
lowerCAmelCase__ : Optional[Any] = None
if self.tpu:
try:
if self.tpu_name:
lowerCAmelCase__ : Optional[Any] = tf.distribute.cluster_resolver.TPUClusterResolver(self.tpu_name )
else:
lowerCAmelCase__ : Any = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
lowerCAmelCase__ : Any = None
return tpu
@cached_property
def __magic_name__( self ):
requires_backends(self , ['''tf'''] )
if self.is_tpu:
tf.config.experimental_connect_to_cluster(self._setup_tpu )
tf.tpu.experimental.initialize_tpu_system(self._setup_tpu )
lowerCAmelCase__ : List[Any] = tf.distribute.TPUStrategy(self._setup_tpu )
else:
# currently no multi gpu is allowed
if self.is_gpu:
# TODO: Currently only single GPU is supported
tf.config.set_visible_devices(self.gpu_list[self.device_idx] , '''GPU''' )
lowerCAmelCase__ : Optional[Any] = tf.distribute.OneDeviceStrategy(device=f"""/gpu:{self.device_idx}""" )
else:
tf.config.set_visible_devices([] , '''GPU''' ) # disable GPU
lowerCAmelCase__ : int = tf.distribute.OneDeviceStrategy(device=f"""/cpu:{self.device_idx}""" )
return strategy
@property
def __magic_name__( self ):
requires_backends(self , ['''tf'''] )
return self._setup_tpu is not None
@property
def __magic_name__( self ):
requires_backends(self , ['''tf'''] )
return self._setup_strategy
@property
def __magic_name__( self ):
requires_backends(self , ['''tf'''] )
return tf.config.list_physical_devices('''GPU''' )
@property
def __magic_name__( self ):
requires_backends(self , ['''tf'''] )
if self.cuda:
return len(self.gpu_list )
return 0
@property
def __magic_name__( self ):
return self.n_gpu > 0
| 470
|
import baseaa
import io
import json
import os
from copy import deepcopy
from ..optimizer import AcceleratedOptimizer
from ..scheduler import AcceleratedScheduler
class _lowerCAmelCase :
def __init__( self , __UpperCAmelCase ):
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
# Don't modify user's data should they want to reuse it (e.g. in tests), because once we
# modified it, it will not be accepted here again, since `auto` values would have been overridden
lowerCAmelCase__ : Union[str, Any] = deepcopy(__UpperCAmelCase )
elif os.path.exists(__UpperCAmelCase ):
with io.open(__UpperCAmelCase , '''r''' , encoding='''utf-8''' ) as f:
lowerCAmelCase__ : int = json.load(__UpperCAmelCase )
else:
try:
lowerCAmelCase__ : Union[str, Any] = baseaa.urlsafe_baadecode(__UpperCAmelCase ).decode('''utf-8''' )
lowerCAmelCase__ : Optional[int] = json.loads(__UpperCAmelCase )
except (UnicodeDecodeError, AttributeError, ValueError):
raise ValueError(
f"""Expected a string path to an existing deepspeed config, or a dictionary, or a base64 encoded string. Received: {config_file_or_dict}""" )
lowerCAmelCase__ : Optional[Any] = config
self.set_stage_and_offload()
def __magic_name__( self ):
# zero stage - this is done as early as possible, before model is created, to allow
# ``is_deepspeed_zero3_enabled`` query and getting to the early deepspeed config object
# during ``zero.Init()`` which needs to know the dtype, and some other hparams.
lowerCAmelCase__ : Optional[Any] = self.get_value('''zero_optimization.stage''' , -1 )
# offload
lowerCAmelCase__ : Dict = False
if self.is_zeroa() or self.is_zeroa():
lowerCAmelCase__ : Dict = set(['''cpu''', '''nvme'''] )
lowerCAmelCase__ : Optional[Any] = set(
[
self.get_value('''zero_optimization.offload_optimizer.device''' ),
self.get_value('''zero_optimization.offload_param.device''' ),
] )
if len(offload_devices & offload_devices_valid ) > 0:
lowerCAmelCase__ : List[Any] = True
def __magic_name__( self , __UpperCAmelCase ):
lowerCAmelCase__ : int = self.config
# find the config node of interest if it exists
lowerCAmelCase__ : Tuple = ds_key_long.split('''.''' )
lowerCAmelCase__ : int = nodes.pop()
for node in nodes:
lowerCAmelCase__ : int = config.get(__UpperCAmelCase )
if config is None:
return None, ds_key
return config, ds_key
def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase=None ):
lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = self.find_config_node(__UpperCAmelCase )
if config is None:
return default
return config.get(__UpperCAmelCase , __UpperCAmelCase )
def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase=False ):
lowerCAmelCase__ : Dict = self.config
# find the config node of interest if it exists
lowerCAmelCase__ : str = ds_key_long.split('''.''' )
for node in nodes:
lowerCAmelCase__ : Dict = config
lowerCAmelCase__ : List[str] = config.get(__UpperCAmelCase )
if config is None:
if must_exist:
raise ValueError(f"""Can't find {ds_key_long} entry in the config: {self.config}""" )
else:
return
# if found remove it
if parent_config is not None:
parent_config.pop(__UpperCAmelCase )
def __magic_name__( self , __UpperCAmelCase ):
lowerCAmelCase__ : Union[str, Any] = self.get_value(__UpperCAmelCase )
return False if value is None else bool(__UpperCAmelCase )
def __magic_name__( self , __UpperCAmelCase ):
lowerCAmelCase__ : Optional[Any] = self.get_value(__UpperCAmelCase )
return False if value is None else not bool(__UpperCAmelCase )
def __magic_name__( self ):
return self._stage == 2
def __magic_name__( self ):
return self._stage == 3
def __magic_name__( self ):
return self._offload
class _lowerCAmelCase :
def __init__( self , __UpperCAmelCase ):
lowerCAmelCase__ : int = engine
def __magic_name__( self , __UpperCAmelCase , **__UpperCAmelCase ):
# runs backpropagation and handles mixed precision
self.engine.backward(__UpperCAmelCase , **__UpperCAmelCase )
# Deepspeed's `engine.step` performs the following operations:
# - gradient accumulation check
# - gradient clipping
# - optimizer step
# - zero grad
# - checking overflow
# - lr_scheduler step (only if engine.lr_scheduler is not None)
self.engine.step()
# and this plugin overrides the above calls with no-ops when Accelerate runs under
# Deepspeed, but allows normal functionality for non-Deepspeed cases thus enabling a simple
# training loop that works transparently under many training regimes.
class _lowerCAmelCase ( _lowercase ):
def __init__( self , __UpperCAmelCase ):
super().__init__(__UpperCAmelCase , device_placement=__UpperCAmelCase , scaler=__UpperCAmelCase )
lowerCAmelCase__ : Optional[int] = hasattr(self.optimizer , '''overflow''' )
def __magic_name__( self , __UpperCAmelCase=None ):
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
def __magic_name__( self ):
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
@property
def __magic_name__( self ):
if self.__has_overflow__:
return self.optimizer.overflow
return False
class _lowerCAmelCase ( _lowercase ):
def __init__( self , __UpperCAmelCase , __UpperCAmelCase ):
super().__init__(__UpperCAmelCase , __UpperCAmelCase )
def __magic_name__( self ):
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
class _lowerCAmelCase :
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=0.001 , __UpperCAmelCase=0 , **__UpperCAmelCase ):
lowerCAmelCase__ : Any = params
lowerCAmelCase__ : str = lr
lowerCAmelCase__ : List[str] = weight_decay
lowerCAmelCase__ : Dict = kwargs
class _lowerCAmelCase :
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase=0 , **__UpperCAmelCase ):
lowerCAmelCase__ : Union[str, Any] = optimizer
lowerCAmelCase__ : Optional[Any] = total_num_steps
lowerCAmelCase__ : str = warmup_num_steps
lowerCAmelCase__ : Dict = kwargs
| 470
| 1
|
import unittest
from transformers import AutoTokenizer, FalconConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
)
class _SCREAMING_SNAKE_CASE :
def __init__(self , UpperCAmelCase , UpperCAmelCase=3 , UpperCAmelCase=7 , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=False , UpperCAmelCase=True , UpperCAmelCase=9_9 , UpperCAmelCase=3_2 , UpperCAmelCase=5 , UpperCAmelCase=4 , UpperCAmelCase=3_7 , UpperCAmelCase="gelu" , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=5_1_2 , UpperCAmelCase=1_6 , UpperCAmelCase=2 , UpperCAmelCase=0.02 , UpperCAmelCase=3 , UpperCAmelCase=4 , UpperCAmelCase=None , ):
'''simple docstring'''
__UpperCAmelCase =parent
__UpperCAmelCase =batch_size
__UpperCAmelCase =seq_length
__UpperCAmelCase =is_training
__UpperCAmelCase =use_input_mask
__UpperCAmelCase =use_token_type_ids
__UpperCAmelCase =use_labels
__UpperCAmelCase =vocab_size
__UpperCAmelCase =hidden_size
__UpperCAmelCase =num_hidden_layers
__UpperCAmelCase =num_attention_heads
__UpperCAmelCase =intermediate_size
__UpperCAmelCase =hidden_act
__UpperCAmelCase =hidden_dropout_prob
__UpperCAmelCase =attention_probs_dropout_prob
__UpperCAmelCase =max_position_embeddings
__UpperCAmelCase =type_vocab_size
__UpperCAmelCase =type_sequence_label_size
__UpperCAmelCase =initializer_range
__UpperCAmelCase =num_labels
__UpperCAmelCase =num_choices
__UpperCAmelCase =scope
def A__ (self):
'''simple docstring'''
__UpperCAmelCase =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
__UpperCAmelCase =None
if self.use_input_mask:
__UpperCAmelCase =random_attention_mask([self.batch_size, self.seq_length])
__UpperCAmelCase =None
__UpperCAmelCase =None
__UpperCAmelCase =None
__UpperCAmelCase =None
if self.use_labels:
__UpperCAmelCase =ids_tensor([self.batch_size] , self.type_sequence_label_size)
__UpperCAmelCase =ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
__UpperCAmelCase =ids_tensor([self.batch_size] , self.num_choices)
__UpperCAmelCase =self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def A__ (self):
'''simple docstring'''
return FalconConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCAmelCase , initializer_range=self.initializer_range , pad_token_id=1 , new_decoder_architecture=UpperCAmelCase , )
def A__ (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase):
'''simple docstring'''
__UpperCAmelCase =FalconModel(config=UpperCAmelCase)
model.to(UpperCAmelCase)
model.eval()
__UpperCAmelCase =model(UpperCAmelCase , attention_mask=UpperCAmelCase)
__UpperCAmelCase =model(UpperCAmelCase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def A__ (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , ):
'''simple docstring'''
__UpperCAmelCase =True
__UpperCAmelCase =FalconModel(UpperCAmelCase)
model.to(UpperCAmelCase)
model.eval()
__UpperCAmelCase =model(
UpperCAmelCase , attention_mask=UpperCAmelCase , encoder_hidden_states=UpperCAmelCase , encoder_attention_mask=UpperCAmelCase , )
__UpperCAmelCase =model(
UpperCAmelCase , attention_mask=UpperCAmelCase , encoder_hidden_states=UpperCAmelCase , )
__UpperCAmelCase =model(UpperCAmelCase , attention_mask=UpperCAmelCase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def A__ (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , ):
'''simple docstring'''
__UpperCAmelCase =FalconForCausalLM(config=UpperCAmelCase)
model.to(UpperCAmelCase)
model.eval()
__UpperCAmelCase =model(UpperCAmelCase , attention_mask=UpperCAmelCase , labels=UpperCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def A__ (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , ):
'''simple docstring'''
__UpperCAmelCase =True
__UpperCAmelCase =True
__UpperCAmelCase =FalconForCausalLM(config=UpperCAmelCase)
model.to(UpperCAmelCase)
model.eval()
# first forward pass
__UpperCAmelCase =model(
UpperCAmelCase , attention_mask=UpperCAmelCase , encoder_hidden_states=UpperCAmelCase , encoder_attention_mask=UpperCAmelCase , use_cache=UpperCAmelCase , )
__UpperCAmelCase =outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
__UpperCAmelCase =ids_tensor((self.batch_size, 3) , config.vocab_size)
__UpperCAmelCase =ids_tensor((self.batch_size, 3) , vocab_size=2)
# append to next input_ids and
__UpperCAmelCase =torch.cat([input_ids, next_tokens] , dim=-1)
__UpperCAmelCase =torch.cat([input_mask, next_mask] , dim=-1)
__UpperCAmelCase =model(
UpperCAmelCase , attention_mask=UpperCAmelCase , encoder_hidden_states=UpperCAmelCase , encoder_attention_mask=UpperCAmelCase , output_hidden_states=UpperCAmelCase , )['''hidden_states'''][0]
__UpperCAmelCase =model(
UpperCAmelCase , attention_mask=UpperCAmelCase , encoder_hidden_states=UpperCAmelCase , encoder_attention_mask=UpperCAmelCase , past_key_values=UpperCAmelCase , output_hidden_states=UpperCAmelCase , )['''hidden_states'''][0]
# select random slice
__UpperCAmelCase =ids_tensor((1,) , output_from_past.shape[-1]).item()
__UpperCAmelCase =output_from_no_past[:, -3:, random_slice_idx].detach()
__UpperCAmelCase =output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1])
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(UpperCAmelCase , UpperCAmelCase , atol=1e-3))
def A__ (self):
'''simple docstring'''
__UpperCAmelCase =self.prepare_config_and_inputs()
(
(
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) ,
) =config_and_inputs
__UpperCAmelCase ={'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class _SCREAMING_SNAKE_CASE ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
a_ : Optional[int] = (
(
FalconModel,
FalconForCausalLM,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconForQuestionAnswering,
)
if is_torch_available()
else ()
)
a_ : Optional[int] = (FalconForCausalLM,) if is_torch_available() else ()
a_ : Dict = (
{
'''feature-extraction''': FalconModel,
'''text-classification''': FalconForSequenceClassification,
'''text-generation''': FalconForCausalLM,
'''question-answering''': FalconForQuestionAnswering,
'''token-classification''': FalconForTokenClassification,
'''zero-shot''': FalconForSequenceClassification,
}
if is_torch_available()
else {}
)
a_ : Optional[int] = False
a_ : int = False
def A__ (self):
'''simple docstring'''
__UpperCAmelCase =FalconModelTester(self)
__UpperCAmelCase =ConfigTester(self , config_class=UpperCAmelCase , hidden_size=3_7)
def A__ (self):
'''simple docstring'''
self.config_tester.run_common_tests()
def A__ (self):
'''simple docstring'''
__UpperCAmelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase)
def A__ (self):
'''simple docstring'''
__UpperCAmelCase , *__UpperCAmelCase =self.model_tester.prepare_config_and_inputs()
for alibi in [True, False]:
__UpperCAmelCase =alibi
self.model_tester.create_and_check_model(UpperCAmelCase , *UpperCAmelCase)
def A__ (self):
'''simple docstring'''
__UpperCAmelCase , __UpperCAmelCase =self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase =3
__UpperCAmelCase =input_dict['''input_ids''']
__UpperCAmelCase =input_ids.ne(1).to(UpperCAmelCase)
__UpperCAmelCase =ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size)
__UpperCAmelCase =FalconForSequenceClassification(UpperCAmelCase)
model.to(UpperCAmelCase)
model.eval()
__UpperCAmelCase =model(UpperCAmelCase , attention_mask=UpperCAmelCase , labels=UpperCAmelCase)
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels))
def A__ (self):
'''simple docstring'''
__UpperCAmelCase , __UpperCAmelCase =self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase =3
__UpperCAmelCase ='''single_label_classification'''
__UpperCAmelCase =input_dict['''input_ids''']
__UpperCAmelCase =input_ids.ne(1).to(UpperCAmelCase)
__UpperCAmelCase =ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size)
__UpperCAmelCase =FalconForSequenceClassification(UpperCAmelCase)
model.to(UpperCAmelCase)
model.eval()
__UpperCAmelCase =model(UpperCAmelCase , attention_mask=UpperCAmelCase , labels=UpperCAmelCase)
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels))
def A__ (self):
'''simple docstring'''
__UpperCAmelCase , __UpperCAmelCase =self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase =input_dict['''input_ids''']
__UpperCAmelCase =FalconForCausalLM(UpperCAmelCase)
model.to(UpperCAmelCase)
model.eval()
__UpperCAmelCase =model(UpperCAmelCase , use_cache=UpperCAmelCase)
__UpperCAmelCase =input_ids.shape[0]
__UpperCAmelCase =model._convert_to_rw_cache(result.past_key_values)
__UpperCAmelCase =model._convert_cache_to_standard_format(UpperCAmelCase , UpperCAmelCase)
for layer in range(len(UpperCAmelCase)):
for tensor_idx in range(2):
self.assertTrue(rw_cache[layer][tensor_idx].ndim == 3)
self.assertTrue(result.past_key_values[layer][tensor_idx].ndim == 4)
self.assertTrue(
torch.all(result.past_key_values[layer][tensor_idx] == standard_cache[layer][tensor_idx]))
def A__ (self):
'''simple docstring'''
__UpperCAmelCase , __UpperCAmelCase =self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase =3
__UpperCAmelCase ='''multi_label_classification'''
__UpperCAmelCase =input_dict['''input_ids''']
__UpperCAmelCase =input_ids.ne(1).to(UpperCAmelCase)
__UpperCAmelCase =ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size).to(torch.float)
__UpperCAmelCase =FalconForSequenceClassification(UpperCAmelCase)
model.to(UpperCAmelCase)
model.eval()
__UpperCAmelCase =model(UpperCAmelCase , attention_mask=UpperCAmelCase , labels=UpperCAmelCase)
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels))
def A__ (self):
'''simple docstring'''
for model_class in self.all_generative_model_classes:
__UpperCAmelCase , __UpperCAmelCase =self.model_tester.prepare_config_and_inputs_for_common()
# If it doesn't support cache, pass the test
if not hasattr(UpperCAmelCase , '''use_cache'''):
return
__UpperCAmelCase =model_class(UpperCAmelCase).to(UpperCAmelCase)
if "use_cache" not in inputs:
__UpperCAmelCase =True
__UpperCAmelCase =model(**UpperCAmelCase)
# If "past_key_values" is not returned, pass the test (e.g. RWKV uses a different cache name and format)
if "past_key_values" not in outputs:
return
__UpperCAmelCase =(
getattr(UpperCAmelCase , '''decoder_layers''' , UpperCAmelCase)
or getattr(UpperCAmelCase , '''num_decoder_layers''' , UpperCAmelCase)
or config.num_hidden_layers
)
__UpperCAmelCase =getattr(UpperCAmelCase , '''num_kv_heads''' , config.num_attention_heads)
__UpperCAmelCase =getattr(UpperCAmelCase , '''d_model''' , config.hidden_size)
__UpperCAmelCase =embed_dim // num_attention_heads
__UpperCAmelCase =outputs['''past_key_values''']
self.assertEqual(len(UpperCAmelCase) , UpperCAmelCase)
__UpperCAmelCase , __UpperCAmelCase =inputs['''input_ids'''].shape
for i in range(UpperCAmelCase):
if config.new_decoder_architecture:
__UpperCAmelCase =config.num_attention_heads
elif config.multi_query:
__UpperCAmelCase =1
self.assertEqual(len(past_kv[0]) , 2) # K V for the decoder = 2
self.assertEqual(
past_kv[i][0].shape , (batch_size, num_attention_heads, seq_length, per_head_embed_dim))
self.assertEqual(
past_kv[i][1].shape , (batch_size, num_attention_heads, seq_length, per_head_embed_dim))
@require_torch
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@slow
def A__ (self):
'''simple docstring'''
__UpperCAmelCase =AutoTokenizer.from_pretrained('''Rocketknight1/falcon-rw-1b''')
__UpperCAmelCase =FalconForCausalLM.from_pretrained('''Rocketknight1/falcon-rw-1b''')
model.eval()
model.to(UpperCAmelCase)
__UpperCAmelCase =tokenizer('''My favorite food is''' , return_tensors='''pt''').to(UpperCAmelCase)
__UpperCAmelCase =(
'''My favorite food is pizza. I love it so much that I have a pizza party every year for my birthday.'''
)
__UpperCAmelCase =model.generate(**UpperCAmelCase , do_sample=UpperCAmelCase , max_new_tokens=1_9)
__UpperCAmelCase =tokenizer.batch_decode(UpperCAmelCase)[0]
self.assertEqual(UpperCAmelCase , UpperCAmelCase)
@slow
def A__ (self):
'''simple docstring'''
for repo in ["Rocketknight1/tiny-random-falcon-7b", "Rocketknight1/tiny-random-falcon-40b"]:
__UpperCAmelCase =AutoTokenizer.from_pretrained(UpperCAmelCase)
__UpperCAmelCase =FalconForCausalLM.from_pretrained(UpperCAmelCase)
model.eval()
model.to(UpperCAmelCase)
__UpperCAmelCase =tokenizer('''My favorite food is''' , return_tensors='''pt''').to(UpperCAmelCase)
# We just test that these run without errors - the models are randomly initialized
# and so the actual text outputs will be garbage
model.generate(**UpperCAmelCase , do_sample=UpperCAmelCase , max_new_tokens=4)
model.generate(**UpperCAmelCase , do_sample=UpperCAmelCase , max_new_tokens=4)
model.generate(**UpperCAmelCase , num_beams=2 , max_new_tokens=4)
@slow
def A__ (self):
'''simple docstring'''
with torch.no_grad():
for repo in [
"Rocketknight1/falcon-rw-1b",
"Rocketknight1/tiny-random-falcon-7b",
"Rocketknight1/tiny-random-falcon-40b",
]:
__UpperCAmelCase =AutoTokenizer.from_pretrained(UpperCAmelCase)
__UpperCAmelCase =FalconForCausalLM.from_pretrained(UpperCAmelCase)
model.eval()
model.to(device=UpperCAmelCase)
__UpperCAmelCase =tokenizer('''My favorite food is''' , return_tensors='''pt''').to(UpperCAmelCase)
# Test results are the same with and without cache
__UpperCAmelCase =model.generate(**UpperCAmelCase , do_sample=UpperCAmelCase , max_new_tokens=2_0 , use_cache=UpperCAmelCase)
__UpperCAmelCase =model.generate(**UpperCAmelCase , do_sample=UpperCAmelCase , max_new_tokens=2_0 , use_cache=UpperCAmelCase)
self.assertTrue((outputs_cache - outputs_no_cache).sum().item() == 0)
| 132
|
import logging
from transformers.configuration_utils import PretrainedConfig
UpperCamelCase_ = logging.getLogger(__name__)
class _SCREAMING_SNAKE_CASE ( _lowerCAmelCase ):
a_ : Optional[Any] = '''masked_bert'''
def __init__(self , UpperCAmelCase=3_0_5_2_2 , UpperCAmelCase=7_6_8 , UpperCAmelCase=1_2 , UpperCAmelCase=1_2 , UpperCAmelCase=3_0_7_2 , UpperCAmelCase="gelu" , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=5_1_2 , UpperCAmelCase=2 , UpperCAmelCase=0.02 , UpperCAmelCase=1e-12 , UpperCAmelCase=0 , UpperCAmelCase="topK" , UpperCAmelCase="constant" , UpperCAmelCase=0.0 , **UpperCAmelCase , ):
'''simple docstring'''
super().__init__(pad_token_id=UpperCAmelCase , **UpperCAmelCase)
__UpperCAmelCase =vocab_size
__UpperCAmelCase =hidden_size
__UpperCAmelCase =num_hidden_layers
__UpperCAmelCase =num_attention_heads
__UpperCAmelCase =hidden_act
__UpperCAmelCase =intermediate_size
__UpperCAmelCase =hidden_dropout_prob
__UpperCAmelCase =attention_probs_dropout_prob
__UpperCAmelCase =max_position_embeddings
__UpperCAmelCase =type_vocab_size
__UpperCAmelCase =initializer_range
__UpperCAmelCase =layer_norm_eps
__UpperCAmelCase =pruning_method
__UpperCAmelCase =mask_init
__UpperCAmelCase =mask_scale
| 132
| 1
|
"""simple docstring"""
import json
from typing import Dict, List, Optional, Tuple, Union
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_led import LEDTokenizer
__UpperCAmelCase =logging.get_logger(__name__)
__UpperCAmelCase ={"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
__UpperCAmelCase ={
"""vocab_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json""",
},
"""merges_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt""",
},
"""tokenizer_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json""",
},
}
__UpperCAmelCase ={
"""allenai/led-base-16384""": 1_6384,
}
class lowerCAmelCase__ ( UpperCAmelCase_ ):
lowercase__ : Union[str, Any] = VOCAB_FILES_NAMES
lowercase__ : str = PRETRAINED_VOCAB_FILES_MAP
lowercase__ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ : List[str] = LEDTokenizer
lowercase__ : Optional[Any] = ["""input_ids""", """attention_mask"""]
def __init__( self , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__="replace" , UpperCamelCase__="<s>" , UpperCamelCase__="</s>" , UpperCamelCase__="</s>" , UpperCamelCase__="<s>" , UpperCamelCase__="<unk>" , UpperCamelCase__="<pad>" , UpperCamelCase__="<mask>" , UpperCamelCase__=False , UpperCamelCase__=True , **UpperCamelCase__ , ):
'''simple docstring'''
super().__init__(
UpperCamelCase__ , UpperCamelCase__ , tokenizer_file=UpperCamelCase__ , errors=UpperCamelCase__ , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , sep_token=UpperCamelCase__ , cls_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , mask_token=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ , trim_offsets=UpperCamelCase__ , **UpperCamelCase__ , )
A__ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , UpperCamelCase__ ) != add_prefix_space:
A__ = getattr(UpperCamelCase__ , pre_tok_state.pop("type" ) )
A__ = add_prefix_space
A__ = pre_tok_class(**UpperCamelCase__ )
A__ = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
A__ = "post_processor"
A__ = getattr(self.backend_tokenizer , UpperCamelCase__ , UpperCamelCase__ )
if tokenizer_component_instance:
A__ = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
A__ = tuple(state["sep"] )
if "cls" in state:
A__ = tuple(state["cls"] )
A__ = False
if state.get("add_prefix_space" , UpperCamelCase__ ) != add_prefix_space:
A__ = add_prefix_space
A__ = True
if state.get("trim_offsets" , UpperCamelCase__ ) != trim_offsets:
A__ = trim_offsets
A__ = True
if changes_to_apply:
A__ = getattr(UpperCamelCase__ , state.pop("type" ) )
A__ = component_class(**UpperCamelCase__ )
setattr(self.backend_tokenizer , UpperCamelCase__ , UpperCamelCase__ )
@property
# Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED
def lowercase_ ( self ):
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def lowercase_ ( self , UpperCamelCase__ ):
'''simple docstring'''
A__ = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else value
A__ = value
def lowercase_ ( self , *UpperCamelCase__ , **UpperCamelCase__ ):
'''simple docstring'''
A__ = kwargs.get("is_split_into_words" , UpperCamelCase__ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs." )
return super()._batch_encode_plus(*UpperCamelCase__ , **UpperCamelCase__ )
def lowercase_ ( self , *UpperCamelCase__ , **UpperCamelCase__ ):
'''simple docstring'''
A__ = kwargs.get("is_split_into_words" , UpperCamelCase__ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs." )
return super()._encode_plus(*UpperCamelCase__ , **UpperCamelCase__ )
def lowercase_ ( self , UpperCamelCase__ , UpperCamelCase__ = None ):
'''simple docstring'''
A__ = self._tokenizer.model.save(UpperCamelCase__ , name=UpperCamelCase__ )
return tuple(UpperCamelCase__ )
def lowercase_ ( self , UpperCamelCase__ , UpperCamelCase__=None ):
'''simple docstring'''
A__ = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def lowercase_ ( self , UpperCamelCase__ , UpperCamelCase__ = None ):
'''simple docstring'''
A__ = [self.sep_token_id]
A__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowercase_ ( self , UpperCamelCase__ , UpperCamelCase__ = None , UpperCamelCase__ = PaddingStrategy.DO_NOT_PAD , UpperCamelCase__ = None , UpperCamelCase__ = None , ):
'''simple docstring'''
A__ = super()._pad(
encoded_inputs=UpperCamelCase__ , max_length=UpperCamelCase__ , padding_strategy=UpperCamelCase__ , pad_to_multiple_of=UpperCamelCase__ , return_attention_mask=UpperCamelCase__ , )
# Load from model defaults
if return_attention_mask is None:
A__ = "attention_mask" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
A__ = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
A__ = len(encoded_inputs["global_attention_mask"] ) != len(UpperCamelCase__ )
if needs_to_be_padded:
A__ = len(UpperCamelCase__ ) - len(encoded_inputs["global_attention_mask"] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
A__ = (
encoded_inputs["global_attention_mask"] + [-1] * difference
)
elif self.padding_side == "left":
A__ = [-1] * difference + encoded_inputs[
"global_attention_mask"
]
else:
raise ValueError("Invalid padding strategy:" + str(self.padding_side ) )
return encoded_inputs
| 261
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase =logging.get_logger(__name__)
__UpperCAmelCase ={
"""weiweishi/roc-bert-base-zh""": """https://huggingface.co/weiweishi/roc-bert-base-zh/resolve/main/config.json""",
}
class lowerCAmelCase__ ( UpperCAmelCase_ ):
lowercase__ : List[str] = """roc_bert"""
def __init__( self , UpperCamelCase__=3_05_22 , UpperCamelCase__=7_68 , UpperCamelCase__=12 , UpperCamelCase__=12 , UpperCamelCase__=30_72 , UpperCamelCase__="gelu" , UpperCamelCase__=0.1 , UpperCamelCase__=0.1 , UpperCamelCase__=5_12 , UpperCamelCase__=2 , UpperCamelCase__=0.02 , UpperCamelCase__=1e-12 , UpperCamelCase__=True , UpperCamelCase__=0 , UpperCamelCase__="absolute" , UpperCamelCase__=None , UpperCamelCase__=True , UpperCamelCase__=True , UpperCamelCase__=7_68 , UpperCamelCase__=9_10 , UpperCamelCase__=5_12 , UpperCamelCase__=2_48_58 , UpperCamelCase__=True , **UpperCamelCase__ , ):
'''simple docstring'''
A__ = vocab_size
A__ = max_position_embeddings
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = intermediate_size
A__ = hidden_act
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = initializer_range
A__ = type_vocab_size
A__ = layer_norm_eps
A__ = use_cache
A__ = enable_pronunciation
A__ = enable_shape
A__ = pronunciation_embed_dim
A__ = pronunciation_vocab_size
A__ = shape_embed_dim
A__ = shape_vocab_size
A__ = concat_input
A__ = position_embedding_type
A__ = classifier_dropout
super().__init__(pad_token_id=UpperCamelCase__ , **UpperCamelCase__ )
| 261
| 1
|
'''simple docstring'''
import flax.linen as nn
import jax
import jax.numpy as jnp
class __lowerCAmelCase ( nn.Module ):
'''simple docstring'''
a_ = 42
a_ = jnp.floataa
def _a ( self : Dict ):
'''simple docstring'''
A_ : List[str] = nn.Conv(
self.out_channels ,kernel_size=(3, 3) ,strides=(1, 1) ,padding=((1, 1), (1, 1)) ,dtype=self.dtype ,)
def __call__( self : Optional[Any] ,_a : str ):
'''simple docstring'''
A_ , A_ , A_ , A_ : Tuple = hidden_states.shape
A_ : Optional[int] = jax.image.resize(
_a ,shape=(batch, height * 2, width * 2, channels) ,method="""nearest""" ,)
A_ : List[Any] = self.conv(_a )
return hidden_states
class __lowerCAmelCase ( nn.Module ):
'''simple docstring'''
a_ = 42
a_ = jnp.floataa
def _a ( self : Union[str, Any] ):
'''simple docstring'''
A_ : List[str] = nn.Conv(
self.out_channels ,kernel_size=(3, 3) ,strides=(2, 2) ,padding=((1, 1), (1, 1)) ,dtype=self.dtype ,)
def __call__( self : Dict ,_a : int ):
'''simple docstring'''
A_ : Optional[Any] = self.conv(_a )
return hidden_states
class __lowerCAmelCase ( nn.Module ):
'''simple docstring'''
a_ = 42
a_ = None
a_ = 0.0
a_ = None
a_ = jnp.floataa
def _a ( self : int ):
'''simple docstring'''
A_ : Any = self.in_channels if self.out_channels is None else self.out_channels
A_ : Dict = nn.GroupNorm(num_groups=32 ,epsilon=1e-5 )
A_ : Union[str, Any] = nn.Conv(
_a ,kernel_size=(3, 3) ,strides=(1, 1) ,padding=((1, 1), (1, 1)) ,dtype=self.dtype ,)
A_ : List[Any] = nn.Dense(_a ,dtype=self.dtype )
A_ : int = nn.GroupNorm(num_groups=32 ,epsilon=1e-5 )
A_ : int = nn.Dropout(self.dropout_prob )
A_ : str = nn.Conv(
_a ,kernel_size=(3, 3) ,strides=(1, 1) ,padding=((1, 1), (1, 1)) ,dtype=self.dtype ,)
A_ : Dict = self.in_channels != out_channels if self.use_nin_shortcut is None else self.use_nin_shortcut
A_ : Tuple = None
if use_nin_shortcut:
A_ : List[Any] = nn.Conv(
_a ,kernel_size=(1, 1) ,strides=(1, 1) ,padding="""VALID""" ,dtype=self.dtype ,)
def __call__( self : List[Any] ,_a : Optional[int] ,_a : Optional[Any] ,_a : List[str]=True ):
'''simple docstring'''
A_ : Tuple = hidden_states
A_ : str = self.norma(_a )
A_ : Any = nn.swish(_a )
A_ : Optional[Any] = self.conva(_a )
A_ : Optional[int] = self.time_emb_proj(nn.swish(_a ) )
A_ : int = jnp.expand_dims(jnp.expand_dims(_a ,1 ) ,1 )
A_ : Any = hidden_states + temb
A_ : Union[str, Any] = self.norma(_a )
A_ : Union[str, Any] = nn.swish(_a )
A_ : Dict = self.dropout(_a ,_a )
A_ : int = self.conva(_a )
if self.conv_shortcut is not None:
A_ : List[Any] = self.conv_shortcut(_a )
return hidden_states + residual
| 665
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__magic_name__ = {
'configuration_deberta': ['DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'DebertaConfig', 'DebertaOnnxConfig'],
'tokenization_deberta': ['DebertaTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ['DebertaTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'DebertaForMaskedLM',
'DebertaForQuestionAnswering',
'DebertaForSequenceClassification',
'DebertaForTokenClassification',
'DebertaModel',
'DebertaPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFDebertaForMaskedLM',
'TFDebertaForQuestionAnswering',
'TFDebertaForSequenceClassification',
'TFDebertaForTokenClassification',
'TFDebertaModel',
'TFDebertaPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_deberta import DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, DebertaConfig, DebertaOnnxConfig
from .tokenization_deberta import DebertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_deberta_fast import DebertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deberta import (
DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
DebertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deberta import (
TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDebertaForMaskedLM,
TFDebertaForQuestionAnswering,
TFDebertaForSequenceClassification,
TFDebertaForTokenClassification,
TFDebertaModel,
TFDebertaPreTrainedModel,
)
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 665
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
_UpperCAmelCase : List[Any] ={}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Any =["""BartphoTokenizer"""]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bartpho import BartphoTokenizer
else:
import sys
_UpperCAmelCase : Optional[int] =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 709
|
_UpperCAmelCase : int =frozenset(
[
"""prompt""",
"""height""",
"""width""",
"""guidance_scale""",
"""negative_prompt""",
"""prompt_embeds""",
"""negative_prompt_embeds""",
"""cross_attention_kwargs""",
]
)
_UpperCAmelCase : List[Any] =frozenset(["""prompt""", """negative_prompt"""])
_UpperCAmelCase : Dict =frozenset([])
_UpperCAmelCase : int =frozenset(["""image"""])
_UpperCAmelCase : Tuple =frozenset(
[
"""image""",
"""height""",
"""width""",
"""guidance_scale""",
]
)
_UpperCAmelCase : int =frozenset(["""image"""])
_UpperCAmelCase : str =frozenset(
[
"""prompt""",
"""image""",
"""height""",
"""width""",
"""guidance_scale""",
"""negative_prompt""",
"""prompt_embeds""",
"""negative_prompt_embeds""",
]
)
_UpperCAmelCase : int =frozenset(["""prompt""", """image""", """negative_prompt"""])
_UpperCAmelCase : Optional[int] =frozenset(
[
# Text guided image variation with an image mask
"""prompt""",
"""image""",
"""mask_image""",
"""height""",
"""width""",
"""guidance_scale""",
"""negative_prompt""",
"""prompt_embeds""",
"""negative_prompt_embeds""",
]
)
_UpperCAmelCase : Optional[int] =frozenset(["""prompt""", """image""", """mask_image""", """negative_prompt"""])
_UpperCAmelCase : Optional[Any] =frozenset(
[
# image variation with an image mask
"""image""",
"""mask_image""",
"""height""",
"""width""",
"""guidance_scale""",
]
)
_UpperCAmelCase : Optional[Any] =frozenset(["""image""", """mask_image"""])
_UpperCAmelCase : Union[str, Any] =frozenset(
[
"""example_image""",
"""image""",
"""mask_image""",
"""height""",
"""width""",
"""guidance_scale""",
]
)
_UpperCAmelCase : Tuple =frozenset(["""example_image""", """image""", """mask_image"""])
_UpperCAmelCase : Any =frozenset(["""class_labels"""])
_UpperCAmelCase : List[Any] =frozenset(["""class_labels"""])
_UpperCAmelCase : int =frozenset(["""batch_size"""])
_UpperCAmelCase : str =frozenset([])
_UpperCAmelCase : str =frozenset(["""batch_size"""])
_UpperCAmelCase : Optional[Any] =frozenset([])
_UpperCAmelCase : Tuple =frozenset(
[
"""prompt""",
"""audio_length_in_s""",
"""guidance_scale""",
"""negative_prompt""",
"""prompt_embeds""",
"""negative_prompt_embeds""",
"""cross_attention_kwargs""",
]
)
_UpperCAmelCase : Tuple =frozenset(["""prompt""", """negative_prompt"""])
_UpperCAmelCase : List[str] =frozenset(["""input_tokens"""])
_UpperCAmelCase : Optional[Any] =frozenset(["""input_tokens"""])
| 619
| 0
|
'''simple docstring'''
from ....utils import logging
A__ : Tuple = logging.get_logger(__name__)
class UpperCAmelCase_ (_UpperCAmelCase ):
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=20_48 ) -> Tuple:
__lowerCamelCase : int = config.__dict__
__lowerCamelCase : Any = modal_hidden_size
if num_labels:
__lowerCamelCase : Dict = num_labels
| 13
|
'''simple docstring'''
def UpperCAmelCase__ ( UpperCAmelCase_ : int = 1_00 ) -> int:
__lowerCamelCase : Union[str, Any] = n * (n + 1) * (2 * n + 1) / 6
__lowerCamelCase : Union[str, Any] = (n * (n + 1) / 2) ** 2
return int(square_of_sum - sum_of_squares )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 13
| 1
|
'''simple docstring'''
snake_case_ : List[str] = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
def __snake_case ( ):
UpperCamelCase = input('''Enter message: ''')
UpperCamelCase = input('''Enter key [alphanumeric]: ''')
UpperCamelCase = input('''Encrypt/Decrypt [e/d]: ''')
if mode.lower().startswith('''e'''):
UpperCamelCase = '''encrypt'''
UpperCamelCase = encrypt_message(_UpperCAmelCase, _UpperCAmelCase)
elif mode.lower().startswith('''d'''):
UpperCamelCase = '''decrypt'''
UpperCamelCase = decrypt_message(_UpperCAmelCase, _UpperCAmelCase)
print(f'\n{mode.title()}ed message:')
print(_UpperCAmelCase)
def __snake_case ( _UpperCAmelCase : str, _UpperCAmelCase : str):
return translate_message(_UpperCAmelCase, _UpperCAmelCase, '''encrypt''')
def __snake_case ( _UpperCAmelCase : str, _UpperCAmelCase : str):
return translate_message(_UpperCAmelCase, _UpperCAmelCase, '''decrypt''')
def __snake_case ( _UpperCAmelCase : str, _UpperCAmelCase : str, _UpperCAmelCase : str):
UpperCamelCase = []
UpperCamelCase = 0
UpperCamelCase = key.upper()
for symbol in message:
UpperCamelCase = LETTERS.find(symbol.upper())
if num != -1:
if mode == "encrypt":
num += LETTERS.find(key[key_index])
elif mode == "decrypt":
num -= LETTERS.find(key[key_index])
num %= len(_UpperCAmelCase)
if symbol.isupper():
translated.append(LETTERS[num])
elif symbol.islower():
translated.append(LETTERS[num].lower())
key_index += 1
if key_index == len(_UpperCAmelCase):
UpperCamelCase = 0
else:
translated.append(_UpperCAmelCase)
return "".join(_UpperCAmelCase)
if __name__ == "__main__":
main()
| 350
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
snake_case_ : Any = {
'configuration_electra': ['ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ElectraConfig', 'ElectraOnnxConfig'],
'tokenization_electra': ['ElectraTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Union[str, Any] = ['ElectraTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Tuple = [
'ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST',
'ElectraForCausalLM',
'ElectraForMaskedLM',
'ElectraForMultipleChoice',
'ElectraForPreTraining',
'ElectraForQuestionAnswering',
'ElectraForSequenceClassification',
'ElectraForTokenClassification',
'ElectraModel',
'ElectraPreTrainedModel',
'load_tf_weights_in_electra',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : int = [
'TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFElectraForMaskedLM',
'TFElectraForMultipleChoice',
'TFElectraForPreTraining',
'TFElectraForQuestionAnswering',
'TFElectraForSequenceClassification',
'TFElectraForTokenClassification',
'TFElectraModel',
'TFElectraPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Optional[int] = [
'FlaxElectraForCausalLM',
'FlaxElectraForMaskedLM',
'FlaxElectraForMultipleChoice',
'FlaxElectraForPreTraining',
'FlaxElectraForQuestionAnswering',
'FlaxElectraForSequenceClassification',
'FlaxElectraForTokenClassification',
'FlaxElectraModel',
'FlaxElectraPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig
from .tokenization_electra import ElectraTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_electra_fast import ElectraTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_electra import (
ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
ElectraForCausalLM,
ElectraForMaskedLM,
ElectraForMultipleChoice,
ElectraForPreTraining,
ElectraForQuestionAnswering,
ElectraForSequenceClassification,
ElectraForTokenClassification,
ElectraModel,
ElectraPreTrainedModel,
load_tf_weights_in_electra,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_electra import (
TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFElectraForMaskedLM,
TFElectraForMultipleChoice,
TFElectraForPreTraining,
TFElectraForQuestionAnswering,
TFElectraForSequenceClassification,
TFElectraForTokenClassification,
TFElectraModel,
TFElectraPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_electra import (
FlaxElectraForCausalLM,
FlaxElectraForMaskedLM,
FlaxElectraForMultipleChoice,
FlaxElectraForPreTraining,
FlaxElectraForQuestionAnswering,
FlaxElectraForSequenceClassification,
FlaxElectraForTokenClassification,
FlaxElectraModel,
FlaxElectraPreTrainedModel,
)
else:
import sys
snake_case_ : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 350
| 1
|
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
lowercase__ =logging.get_logger(__name__)
@add_end_docstrings(__lowercase )
class UpperCamelCase__ ( __lowercase ):
def __init__(self : Optional[int] , *snake_case_ : Any , **snake_case_ : Dict ):
super().__init__(*snake_case_ , **snake_case_ )
requires_backends(self , '''vision''' )
self.check_model_type(
TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
if self.framework == '''tf'''
else MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING )
def lowerCAmelCase (self : Tuple , snake_case_ : Tuple=None ):
__a : int = {}
if top_k is not None:
__a : List[str] = top_k
return {}, {}, postprocess_params
def __call__(self : Optional[int] , snake_case_ : Union[str, List[str], "Image.Image", List["Image.Image"]] , **snake_case_ : List[str] ):
return super().__call__(snake_case_ , **snake_case_ )
def lowerCAmelCase (self : Any , snake_case_ : Dict ):
__a : int = load_image(snake_case_ )
__a : Dict = self.image_processor(images=snake_case_ , return_tensors=self.framework )
return model_inputs
def lowerCAmelCase (self : str , snake_case_ : List[Any] ):
__a : Any = self.model(**snake_case_ )
return model_outputs
def lowerCAmelCase (self : Tuple , snake_case_ : Optional[Any] , snake_case_ : Tuple=5 ):
if top_k > self.model.config.num_labels:
__a : Dict = self.model.config.num_labels
if self.framework == "pt":
__a : Tuple = model_outputs.logits.softmax(-1 )[0]
__a , __a : Optional[int] = probs.topk(snake_case_ )
elif self.framework == "tf":
__a : Union[str, Any] = stable_softmax(model_outputs.logits , axis=-1 )[0]
__a : Union[str, Any] = tf.math.top_k(snake_case_ , k=snake_case_ )
__a , __a : Dict = topk.values.numpy(), topk.indices.numpy()
else:
raise ValueError(f"Unsupported framework: {self.framework}" )
__a : str = scores.tolist()
__a : List[str] = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(snake_case_ , snake_case_ )]
| 521
|
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxCrossAttnUpBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
FlaxUpBlockaD,
)
@flax.struct.dataclass
class UpperCamelCase__ ( __lowercase ):
_SCREAMING_SNAKE_CASE : jnp.ndarray
@flax_register_to_config
class UpperCamelCase__ ( nn.Module ,__lowercase ,__lowercase ):
_SCREAMING_SNAKE_CASE : int = 32
_SCREAMING_SNAKE_CASE : int = 4
_SCREAMING_SNAKE_CASE : int = 4
_SCREAMING_SNAKE_CASE : Tuple[str] = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
_SCREAMING_SNAKE_CASE : Tuple[str] = ("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D")
_SCREAMING_SNAKE_CASE : Union[bool, Tuple[bool]] = False
_SCREAMING_SNAKE_CASE : Tuple[int] = (320, 640, 1_280, 1_280)
_SCREAMING_SNAKE_CASE : int = 2
_SCREAMING_SNAKE_CASE : Union[int, Tuple[int]] = 8
_SCREAMING_SNAKE_CASE : Optional[Union[int, Tuple[int]]] = None
_SCREAMING_SNAKE_CASE : int = 1_280
_SCREAMING_SNAKE_CASE : float = 0.0
_SCREAMING_SNAKE_CASE : bool = False
_SCREAMING_SNAKE_CASE : jnp.dtype = jnp.floataa
_SCREAMING_SNAKE_CASE : bool = True
_SCREAMING_SNAKE_CASE : int = 0
_SCREAMING_SNAKE_CASE : bool = False
def lowerCAmelCase (self : Dict , snake_case_ : jax.random.KeyArray ):
# init input tensors
__a : Tuple = (1, self.in_channels, self.sample_size, self.sample_size)
__a : List[str] = jnp.zeros(snake_case_ , dtype=jnp.floataa )
__a : Optional[Any] = jnp.ones((1,) , dtype=jnp.intaa )
__a : Optional[int] = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa )
__a , __a : Dict = jax.random.split(snake_case_ )
__a : Optional[Any] = {'''params''': params_rng, '''dropout''': dropout_rng}
return self.init(snake_case_ , snake_case_ , snake_case_ , snake_case_ )["params"]
def lowerCAmelCase (self : Tuple ):
__a : Tuple = self.block_out_channels
__a : List[str] = block_out_channels[0] * 4
if self.num_attention_heads is not None:
raise ValueError(
'''At the moment it is not possible to define the number of attention heads via `num_attention_heads` because of a naming issue as described in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131. Passing `num_attention_heads` will only be supported in diffusers v0.19.''' )
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
__a : Optional[Any] = self.num_attention_heads or self.attention_head_dim
# input
__a : int = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
__a : Union[str, Any] = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift )
__a : Any = FlaxTimestepEmbedding(snake_case_ , dtype=self.dtype )
__a : str = self.only_cross_attention
if isinstance(snake_case_ , snake_case_ ):
__a : Tuple = (only_cross_attention,) * len(self.down_block_types )
if isinstance(snake_case_ , snake_case_ ):
__a : Union[str, Any] = (num_attention_heads,) * len(self.down_block_types )
# down
__a : Dict = []
__a : Union[str, Any] = block_out_channels[0]
for i, down_block_type in enumerate(self.down_block_types ):
__a : Any = output_channel
__a : Union[str, Any] = block_out_channels[i]
__a : List[str] = i == len(snake_case_ ) - 1
if down_block_type == "CrossAttnDownBlock2D":
__a : Any = FlaxCrossAttnDownBlockaD(
in_channels=snake_case_ , out_channels=snake_case_ , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
__a : int = FlaxDownBlockaD(
in_channels=snake_case_ , out_channels=snake_case_ , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(snake_case_ )
__a : Dict = down_blocks
# mid
__a : Optional[Any] = FlaxUNetMidBlockaDCrossAttn(
in_channels=block_out_channels[-1] , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
# up
__a : List[str] = []
__a : str = list(reversed(snake_case_ ) )
__a : Optional[int] = list(reversed(snake_case_ ) )
__a : Optional[int] = list(reversed(snake_case_ ) )
__a : Optional[Any] = reversed_block_out_channels[0]
for i, up_block_type in enumerate(self.up_block_types ):
__a : Optional[int] = output_channel
__a : List[Any] = reversed_block_out_channels[i]
__a : str = reversed_block_out_channels[min(i + 1 , len(snake_case_ ) - 1 )]
__a : List[Any] = i == len(snake_case_ ) - 1
if up_block_type == "CrossAttnUpBlock2D":
__a : Optional[Any] = FlaxCrossAttnUpBlockaD(
in_channels=snake_case_ , out_channels=snake_case_ , prev_output_channel=snake_case_ , num_layers=self.layers_per_block + 1 , num_attention_heads=reversed_num_attention_heads[i] , add_upsample=not is_final_block , dropout=self.dropout , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
__a : Dict = FlaxUpBlockaD(
in_channels=snake_case_ , out_channels=snake_case_ , prev_output_channel=snake_case_ , num_layers=self.layers_per_block + 1 , add_upsample=not is_final_block , dropout=self.dropout , dtype=self.dtype , )
up_blocks.append(snake_case_ )
__a : str = output_channel
__a : Any = up_blocks
# out
__a : List[str] = nn.GroupNorm(num_groups=3_2 , epsilon=1E-5 )
__a : Optional[Any] = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__(self : str , snake_case_ : List[Any] , snake_case_ : str , snake_case_ : Tuple , snake_case_ : Any=None , snake_case_ : Any=None , snake_case_ : bool = True , snake_case_ : bool = False , ):
# 1. time
if not isinstance(snake_case_ , jnp.ndarray ):
__a : Dict = jnp.array([timesteps] , dtype=jnp.intaa )
elif isinstance(snake_case_ , jnp.ndarray ) and len(timesteps.shape ) == 0:
__a : Optional[int] = timesteps.astype(dtype=jnp.floataa )
__a : List[Any] = jnp.expand_dims(snake_case_ , 0 )
__a : Tuple = self.time_proj(snake_case_ )
__a : Tuple = self.time_embedding(snake_case_ )
# 2. pre-process
__a : Union[str, Any] = jnp.transpose(snake_case_ , (0, 2, 3, 1) )
__a : List[Any] = self.conv_in(snake_case_ )
# 3. down
__a : List[str] = (sample,)
for down_block in self.down_blocks:
if isinstance(snake_case_ , snake_case_ ):
__a , __a : Dict = down_block(snake_case_ , snake_case_ , snake_case_ , deterministic=not train )
else:
__a , __a : Dict = down_block(snake_case_ , snake_case_ , deterministic=not train )
down_block_res_samples += res_samples
if down_block_additional_residuals is not None:
__a : Tuple = ()
for down_block_res_sample, down_block_additional_residual in zip(
snake_case_ , snake_case_ ):
down_block_res_sample += down_block_additional_residual
new_down_block_res_samples += (down_block_res_sample,)
__a : List[str] = new_down_block_res_samples
# 4. mid
__a : Union[str, Any] = self.mid_block(snake_case_ , snake_case_ , snake_case_ , deterministic=not train )
if mid_block_additional_residual is not None:
sample += mid_block_additional_residual
# 5. up
for up_block in self.up_blocks:
__a : int = down_block_res_samples[-(self.layers_per_block + 1) :]
__a : Dict = down_block_res_samples[: -(self.layers_per_block + 1)]
if isinstance(snake_case_ , snake_case_ ):
__a : int = up_block(
snake_case_ , temb=snake_case_ , encoder_hidden_states=snake_case_ , res_hidden_states_tuple=snake_case_ , deterministic=not train , )
else:
__a : Optional[Any] = up_block(snake_case_ , temb=snake_case_ , res_hidden_states_tuple=snake_case_ , deterministic=not train )
# 6. post-process
__a : str = self.conv_norm_out(snake_case_ )
__a : Union[str, Any] = nn.silu(snake_case_ )
__a : Any = self.conv_out(snake_case_ )
__a : Dict = jnp.transpose(snake_case_ , (0, 3, 1, 2) )
if not return_dict:
return (sample,)
return FlaxUNetaDConditionOutput(sample=snake_case_ )
| 521
| 1
|
'''simple docstring'''
from ....configuration_utils import PretrainedConfig
from ....utils import logging
UpperCAmelCase = logging.get_logger(__name__)
# TODO: upload to AWS
UpperCAmelCase = {
"yjernite/retribert-base-uncased": (
"https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/config.json"
),
}
class a ( __magic_name__ ):
_snake_case = '''retribert'''
def __init__( self : List[str], SCREAMING_SNAKE_CASE_ : Optional[int]=3_05_22, SCREAMING_SNAKE_CASE_ : Union[str, Any]=7_68, SCREAMING_SNAKE_CASE_ : str=8, SCREAMING_SNAKE_CASE_ : Optional[Any]=12, SCREAMING_SNAKE_CASE_ : int=30_72, SCREAMING_SNAKE_CASE_ : Union[str, Any]="gelu", SCREAMING_SNAKE_CASE_ : Union[str, Any]=0.1, SCREAMING_SNAKE_CASE_ : Union[str, Any]=0.1, SCREAMING_SNAKE_CASE_ : Optional[Any]=5_12, SCREAMING_SNAKE_CASE_ : Dict=2, SCREAMING_SNAKE_CASE_ : Optional[Any]=0.02, SCREAMING_SNAKE_CASE_ : Optional[int]=1e-12, SCREAMING_SNAKE_CASE_ : int=True, SCREAMING_SNAKE_CASE_ : Optional[int]=1_28, SCREAMING_SNAKE_CASE_ : Optional[Any]=0, **SCREAMING_SNAKE_CASE_ : Optional[int], ):
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ )
snake_case : Any = vocab_size
snake_case : Any = hidden_size
snake_case : List[Any] = num_hidden_layers
snake_case : Any = num_attention_heads
snake_case : Tuple = hidden_act
snake_case : Optional[int] = intermediate_size
snake_case : str = hidden_dropout_prob
snake_case : List[Any] = attention_probs_dropout_prob
snake_case : Optional[int] = max_position_embeddings
snake_case : int = type_vocab_size
snake_case : Tuple = initializer_range
snake_case : Any = layer_norm_eps
snake_case : Optional[int] = share_encoders
snake_case : List[Any] = projection_dim
| 555
|
'''simple docstring'''
import json
import re
from typing import TYPE_CHECKING, List, Optional, Tuple, Union
import numpy as np
from ...utils import is_tf_available, is_torch_available, logging
if TYPE_CHECKING:
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_codegen import CodeGenTokenizer
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
UpperCAmelCase = {
"vocab_file": {
"Salesforce/codegen-350M-mono": "https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/vocab.json",
},
"merges_file": {
"Salesforce/codegen-350M-mono": "https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/merges.txt",
},
"tokenizer_file": {
"Salesforce/codegen-350M-mono": (
"https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/tokenizer.json"
),
},
}
UpperCAmelCase = {
"Salesforce/codegen-350M-mono": 2_048,
}
class a ( __magic_name__ ):
_snake_case = VOCAB_FILES_NAMES
_snake_case = PRETRAINED_VOCAB_FILES_MAP
_snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_snake_case = ['''input_ids''', '''attention_mask''']
_snake_case = CodeGenTokenizer
def __init__( self : Any, SCREAMING_SNAKE_CASE_ : Dict=None, SCREAMING_SNAKE_CASE_ : Optional[Any]=None, SCREAMING_SNAKE_CASE_ : Optional[Any]=None, SCREAMING_SNAKE_CASE_ : Optional[int]="<|endoftext|>", SCREAMING_SNAKE_CASE_ : List[Any]="<|endoftext|>", SCREAMING_SNAKE_CASE_ : List[str]="<|endoftext|>", SCREAMING_SNAKE_CASE_ : int=False, **SCREAMING_SNAKE_CASE_ : Optional[int], ):
super().__init__(
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, tokenizer_file=SCREAMING_SNAKE_CASE_, unk_token=SCREAMING_SNAKE_CASE_, bos_token=SCREAMING_SNAKE_CASE_, eos_token=SCREAMING_SNAKE_CASE_, add_prefix_space=SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_, )
if kwargs.pop('''add_bos_token''', SCREAMING_SNAKE_CASE_ ):
snake_case : Dict = kwargs.pop('''name_or_path''', '''''' )
raise ValueError(
'''Currenty GPT2\'s fast tokenizer does NOT support adding a BOS token.'''
'''Instead you should use GPT2\'s slow tokenizer class `CodeGenTokenizer` as follows: \n'''
F"""`CodeGenTokenizer.from_pretrained('{model_id}')`\nor\n"""
F"""`AutoTokenizer.from_pretrained('{model_id}', use_fast=False)`\n"""
'''This issue will be fixed soon, see: https://github.com/huggingface/tokenizers/pull/1005.'''
''' so that the fast tokenizer works correctly.''' )
snake_case : int = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''', SCREAMING_SNAKE_CASE_ ) != add_prefix_space:
snake_case : Optional[Any] = getattr(SCREAMING_SNAKE_CASE_, pre_tok_state.pop('''type''' ) )
snake_case : Union[str, Any] = add_prefix_space
snake_case : Optional[Any] = pre_tok_class(**SCREAMING_SNAKE_CASE_ )
snake_case : int = add_prefix_space
def __snake_case ( self : List[Any], *SCREAMING_SNAKE_CASE_ : Tuple, **SCREAMING_SNAKE_CASE_ : int ):
snake_case : Dict = kwargs.get('''is_split_into_words''', SCREAMING_SNAKE_CASE_ )
assert self.add_prefix_space or not is_split_into_words, (
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : List[str], *SCREAMING_SNAKE_CASE_ : str, **SCREAMING_SNAKE_CASE_ : Optional[int] ):
snake_case : Dict = kwargs.get('''is_split_into_words''', SCREAMING_SNAKE_CASE_ )
assert self.add_prefix_space or not is_split_into_words, (
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._encode_plus(*SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : Tuple, SCREAMING_SNAKE_CASE_ : str, SCREAMING_SNAKE_CASE_ : Optional[str] = None ):
snake_case : Optional[int] = self._tokenizer.model.save(SCREAMING_SNAKE_CASE_, name=SCREAMING_SNAKE_CASE_ )
return tuple(SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : Dict, SCREAMING_SNAKE_CASE_ : Union[int, List[int], "np.ndarray", "torch.Tensor", "tf.Tensor"], SCREAMING_SNAKE_CASE_ : bool = False, SCREAMING_SNAKE_CASE_ : bool = None, SCREAMING_SNAKE_CASE_ : Optional[List[str]] = None, **SCREAMING_SNAKE_CASE_ : Union[str, Any], ):
snake_case : Dict = super().decode(
token_ids=SCREAMING_SNAKE_CASE_, skip_special_tokens=SCREAMING_SNAKE_CASE_, clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_, )
if truncate_before_pattern is not None and len(SCREAMING_SNAKE_CASE_ ) > 0:
snake_case : Optional[int] = self.truncate(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
return decoded_text
def __snake_case ( self : int, SCREAMING_SNAKE_CASE_ : Optional[Any], SCREAMING_SNAKE_CASE_ : Tuple ):
def find_re(SCREAMING_SNAKE_CASE_ : Optional[int], SCREAMING_SNAKE_CASE_ : Dict, SCREAMING_SNAKE_CASE_ : Optional[Any] ):
snake_case : Optional[Any] = pattern.search(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
return m.start() if m else -1
snake_case : Union[str, Any] = [re.compile(SCREAMING_SNAKE_CASE_, re.MULTILINE ) for pattern in truncate_before_pattern]
snake_case : Union[str, Any] = list(re.finditer('''^print''', SCREAMING_SNAKE_CASE_, re.MULTILINE ) )
if len(SCREAMING_SNAKE_CASE_ ) > 1:
snake_case : Tuple = completion[: prints[1].start()]
snake_case : List[str] = list(re.finditer('''^def''', SCREAMING_SNAKE_CASE_, re.MULTILINE ) )
if len(SCREAMING_SNAKE_CASE_ ) > 1:
snake_case : Optional[Any] = completion[: defs[1].start()]
snake_case : Tuple = 0
snake_case : List[Any] = [
pos for pos in [find_re(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) for terminal in terminals] if pos != -1
]
if len(SCREAMING_SNAKE_CASE_ ) > 0:
return completion[: min(SCREAMING_SNAKE_CASE_ )]
else:
return completion
| 555
| 1
|
"""simple docstring"""
import unittest
from transformers import SPIECE_UNDERLINE
from transformers.models.speechta import SpeechTaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.tokenization_utils import AddedToken
from ...test_tokenization_common import TokenizerTesterMixin
_lowerCAmelCase : Dict = get_tests_dir('''fixtures/test_sentencepiece_bpe_char.model''')
@require_sentencepiece
@require_tokenizers
class A_ ( _a , unittest.TestCase ):
lowerCAmelCase__ = SpeechTaTokenizer
lowerCAmelCase__ = False
lowerCAmelCase__ = True
def _lowercase ( self: List[Any] ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
_lowerCamelCase : str = SpeechTaTokenizer(__lowerCAmelCase )
_lowerCamelCase : Tuple = AddedToken("<mask>" ,lstrip=__lowerCAmelCase ,rstrip=__lowerCAmelCase )
_lowerCamelCase : Optional[int] = mask_token
tokenizer.add_special_tokens({"mask_token": mask_token} )
tokenizer.add_tokens(["<ctc_blank>"] )
tokenizer.save_pretrained(self.tmpdirname )
def _lowercase ( self: List[str] ,__lowerCAmelCase: str ):
'''simple docstring'''
_lowerCamelCase : Dict = "this is a test"
_lowerCamelCase : Optional[Any] = "this is a test"
return input_text, output_text
def _lowercase ( self: List[str] ,__lowerCAmelCase: List[Any] ,__lowerCAmelCase: Any=False ,__lowerCAmelCase: str=20 ,__lowerCAmelCase: List[Any]=5 ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase : List[str] = self.get_input_output_texts(__lowerCAmelCase )
_lowerCamelCase : Optional[int] = tokenizer.encode(__lowerCAmelCase ,add_special_tokens=__lowerCAmelCase )
_lowerCamelCase : Tuple = tokenizer.decode(__lowerCAmelCase ,clean_up_tokenization_spaces=__lowerCAmelCase )
return text, ids
def _lowercase ( self: Optional[int] ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = "<pad>"
_lowerCamelCase : List[str] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__lowerCAmelCase ) ,__lowerCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__lowerCAmelCase ) ,__lowerCAmelCase )
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
_lowerCamelCase : Any = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] ,"<s>" )
self.assertEqual(vocab_keys[1] ,"<pad>" )
self.assertEqual(vocab_keys[-4] ,"œ" )
self.assertEqual(vocab_keys[-2] ,"<mask>" )
self.assertEqual(vocab_keys[-1] ,"<ctc_blank>" )
self.assertEqual(len(__lowerCAmelCase ) ,81 )
def _lowercase ( self: Dict ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size ,79 )
def _lowercase ( self: Any ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = self.get_tokenizers(do_lower_case=__lowerCAmelCase )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
_lowerCamelCase : Tuple = tokenizer.vocab_size
_lowerCamelCase : Optional[Any] = len(__lowerCAmelCase )
self.assertNotEqual(__lowerCAmelCase ,0 )
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
_lowerCamelCase : Optional[int] = ["aaaaa bbbbbb", "cccccccccdddddddd"]
_lowerCamelCase : Any = tokenizer.add_tokens(__lowerCAmelCase )
_lowerCamelCase : Tuple = tokenizer.vocab_size
_lowerCamelCase : Union[str, Any] = len(__lowerCAmelCase )
self.assertNotEqual(__lowerCAmelCase ,0 )
self.assertEqual(__lowerCAmelCase ,__lowerCAmelCase )
self.assertEqual(__lowerCAmelCase ,len(__lowerCAmelCase ) )
self.assertEqual(__lowerCAmelCase ,all_size + len(__lowerCAmelCase ) )
_lowerCamelCase : Any = tokenizer.encode("aaaaa bbbbbb low cccccccccdddddddd l" ,add_special_tokens=__lowerCAmelCase )
self.assertGreaterEqual(len(__lowerCAmelCase ) ,4 )
self.assertGreater(tokens[0] ,tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] ,tokenizer.vocab_size - 1 )
_lowerCamelCase : List[Any] = {"eos_token": ">>>>|||<||<<|<<", "pad_token": "<<<<<|||>|>>>>|>"}
_lowerCamelCase : str = tokenizer.add_special_tokens(__lowerCAmelCase )
_lowerCamelCase : int = tokenizer.vocab_size
_lowerCamelCase : str = len(__lowerCAmelCase )
self.assertNotEqual(__lowerCAmelCase ,0 )
self.assertEqual(__lowerCAmelCase ,__lowerCAmelCase )
self.assertEqual(__lowerCAmelCase ,len(__lowerCAmelCase ) )
self.assertEqual(__lowerCAmelCase ,all_size_a + len(__lowerCAmelCase ) )
_lowerCamelCase : Optional[int] = tokenizer.encode(
">>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l" ,add_special_tokens=__lowerCAmelCase )
self.assertGreaterEqual(len(__lowerCAmelCase ) ,6 )
self.assertGreater(tokens[0] ,tokenizer.vocab_size - 1 )
self.assertGreater(tokens[0] ,tokens[1] )
self.assertGreater(tokens[-3] ,tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] ,tokens[-4] )
self.assertEqual(tokens[0] ,tokenizer.eos_token_id )
self.assertEqual(tokens[-3] ,tokenizer.pad_token_id )
def _lowercase ( self: Any ):
'''simple docstring'''
pass
def _lowercase ( self: Tuple ):
'''simple docstring'''
pass
def _lowercase ( self: str ):
'''simple docstring'''
_lowerCamelCase : Tuple = self.get_tokenizer()
_lowerCamelCase : Optional[int] = tokenizer.tokenize("This is a test" )
# fmt: off
self.assertListEqual(__lowerCAmelCase ,[SPIECE_UNDERLINE, "T", "h", "i", "s", SPIECE_UNDERLINE, "i", "s", SPIECE_UNDERLINE, "a", SPIECE_UNDERLINE, "t", "e", "s", "t"] )
# fmt: on
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__lowerCAmelCase ) ,[4, 32, 11, 10, 12, 4, 10, 12, 4, 7, 4, 6, 5, 12, 6] ,)
_lowerCamelCase : int = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
__lowerCAmelCase ,[SPIECE_UNDERLINE, "I", SPIECE_UNDERLINE, "w", "a", "s", SPIECE_UNDERLINE, "b", "o", "r", "n", SPIECE_UNDERLINE, "i", "n", SPIECE_UNDERLINE, "92000", ",", SPIECE_UNDERLINE, "a", "n", "d", SPIECE_UNDERLINE, "t", "h", "i", "s", SPIECE_UNDERLINE, "i", "s", SPIECE_UNDERLINE, "f", "a", "l", "s", "é", "."] )
_lowerCamelCase : List[str] = tokenizer.convert_tokens_to_ids(__lowerCAmelCase )
# fmt: off
self.assertListEqual(__lowerCAmelCase ,[4, 30, 4, 20, 7, 12, 4, 25, 8, 13, 9, 4, 10, 9, 4, 3, 23, 4, 7, 9, 14, 4, 6, 11, 10, 12, 4, 10, 12, 4, 19, 7, 15, 12, 73, 26] )
# fmt: on
_lowerCamelCase : Any = tokenizer.convert_ids_to_tokens(__lowerCAmelCase )
self.assertListEqual(
__lowerCAmelCase ,[SPIECE_UNDERLINE, "I", SPIECE_UNDERLINE, "w", "a", "s", SPIECE_UNDERLINE, "b", "o", "r", "n", SPIECE_UNDERLINE, "i", "n", SPIECE_UNDERLINE, "<unk>", ",", SPIECE_UNDERLINE, "a", "n", "d", SPIECE_UNDERLINE, "t", "h", "i", "s", SPIECE_UNDERLINE, "i", "s", SPIECE_UNDERLINE, "f", "a", "l", "s", "é", "."] )
@slow
def _lowercase ( self: List[Any] ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = [
"Transformers (formerly known as pytorch-transformers and pytorch-pretrained-bert) provides "
"general-purpose architectures (BERT, GPT, RoBERTa, XLM, DistilBert, XLNet...) for Natural "
"Language Understanding (NLU) and Natural Language Generation (NLG) with over thirty-two pretrained "
"models in one hundred plus languages and deep interoperability between Jax, PyTorch and TensorFlow.",
"BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly "
"conditioning on both left and right context in all layers.",
"The quick brown fox jumps over the lazy dog.",
]
# fmt: off
_lowerCamelCase : Tuple = {
"input_ids": [
[4, 32, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 64, 19, 8, 13, 18, 5, 13, 15, 22, 4, 28, 9, 8, 20, 9, 4, 7, 12, 4, 24, 22, 6, 8, 13, 17, 11, 39, 6, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 7, 9, 14, 4, 24, 22, 6, 8, 13, 17, 11, 39, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 39, 25, 5, 13, 6, 63, 4, 24, 13, 8, 27, 10, 14, 5, 12, 4, 21, 5, 9, 5, 13, 7, 15, 39, 24, 16, 13, 24, 8, 12, 5, 4, 7, 13, 17, 11, 10, 6, 5, 17, 6, 16, 13, 5, 12, 4, 64, 40, 47, 54, 32, 23, 4, 53, 49, 32, 23, 4, 54, 8, 40, 47, 54, 32, 7, 23, 4, 69, 52, 43, 23, 4, 51, 10, 12, 6, 10, 15, 40, 5, 13, 6, 23, 4, 69, 52, 48, 5, 6, 26, 26, 26, 63, 4, 19, 8, 13, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 61, 9, 14, 5, 13, 12, 6, 7, 9, 14, 10, 9, 21, 4, 64, 48, 52, 61, 63, 4, 7, 9, 14, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 53, 5, 9, 5, 13, 7, 6, 10, 8, 9, 4, 64, 48, 52, 53, 63, 4, 20, 10, 6, 11, 4, 8, 27, 5, 13, 4, 6, 11, 10, 13, 6, 22, 39, 6, 20, 8, 4, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 4, 18, 8, 14, 5, 15, 12, 4, 10, 9, 4, 8, 9, 5, 4, 11, 16, 9, 14, 13, 5, 14, 4, 24, 15, 16, 12, 4, 15, 7, 9, 21, 16, 7, 21, 5, 12, 4, 7, 9, 14, 4, 14, 5, 5, 24, 4, 10, 9, 6, 5, 13, 8, 24, 5, 13, 7, 25, 10, 15, 10, 6, 22, 4, 25, 5, 6, 20, 5, 5, 9, 4, 58, 7, 37, 23, 4, 49, 22, 32, 8, 13, 17, 11, 4, 7, 9, 14, 4, 32, 5, 9, 12, 8, 13, 55, 15, 8, 20, 26, 2],
[4, 40, 47, 54, 32, 4, 10, 12, 4, 14, 5, 12, 10, 21, 9, 5, 14, 4, 6, 8, 4, 24, 13, 5, 39, 6, 13, 7, 10, 9, 4, 14, 5, 5, 24, 4, 25, 10, 14, 10, 13, 5, 17, 6, 10, 8, 9, 7, 15, 4, 13, 5, 24, 13, 5, 12, 5, 9, 6, 7, 6, 10, 8, 9, 12, 4, 19, 13, 8, 18, 4, 16, 9, 15, 7, 25, 5, 15, 5, 14, 4, 6, 5, 37, 6, 4, 25, 22, 4, 46, 8, 10, 9, 6, 15, 22, 4, 17, 8, 9, 14, 10, 6, 10, 8, 9, 10, 9, 21, 4, 8, 9, 4, 25, 8, 6, 11, 4, 15, 5, 19, 6, 4, 7, 9, 14, 4, 13, 10, 21, 11, 6, 4, 17, 8, 9, 6, 5, 37, 6, 4, 10, 9, 4, 7, 15, 15, 4, 15, 7, 22, 5, 13, 12, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[4, 32, 11, 5, 4, 45, 16, 10, 17, 28, 4, 25, 13, 8, 20, 9, 4, 19, 8, 37, 4, 46, 16, 18, 24, 12, 4, 8, 27, 5, 13, 4, 6, 11, 5, 4, 15, 7, 57, 22, 4, 14, 8, 21, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
],
"attention_mask": [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__lowerCAmelCase ,model_name="microsoft/speecht5_asr" ,revision="c5ef64c71905caeccde0e4462ef3f9077224c524" ,sequences=__lowerCAmelCase ,)
| 46
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A : List[str] = logging.get_logger(__name__)
A : Tuple = {
'kssteven/ibert-roberta-base': 'https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json',
'kssteven/ibert-roberta-large': 'https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json',
'kssteven/ibert-roberta-large-mnli': (
'https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json'
),
}
class A ( UpperCAmelCase__ ):
'''simple docstring'''
A__ = '''ibert'''
def __init__(self : int , _UpperCAmelCase : Union[str, Any]=3_0522 , _UpperCAmelCase : Any=768 , _UpperCAmelCase : Optional[Any]=12 , _UpperCAmelCase : Union[str, Any]=12 , _UpperCAmelCase : List[Any]=3072 , _UpperCAmelCase : List[Any]="gelu" , _UpperCAmelCase : List[str]=0.1 , _UpperCAmelCase : Union[str, Any]=0.1 , _UpperCAmelCase : Optional[int]=512 , _UpperCAmelCase : str=2 , _UpperCAmelCase : Union[str, Any]=0.02 , _UpperCAmelCase : Dict=1E-1_2 , _UpperCAmelCase : int=1 , _UpperCAmelCase : str=0 , _UpperCAmelCase : Tuple=2 , _UpperCAmelCase : int="absolute" , _UpperCAmelCase : Any=False , _UpperCAmelCase : List[Any]="none" , **_UpperCAmelCase : List[Any] , ) -> Any:
"""simple docstring"""
super().__init__(pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , **_UpperCAmelCase )
lowercase__ = vocab_size
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = hidden_act
lowercase__ = intermediate_size
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = max_position_embeddings
lowercase__ = type_vocab_size
lowercase__ = initializer_range
lowercase__ = layer_norm_eps
lowercase__ = position_embedding_type
lowercase__ = quant_mode
lowercase__ = force_dequant
class A ( UpperCAmelCase__ ):
'''simple docstring'''
@property
def lowerCamelCase__ (self : Dict ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
lowercase__ = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
lowercase__ = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 15
| 0
|
"""simple docstring"""
from sympy import diff, lambdify, symbols
from sympy.functions import * # noqa: F403
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = "x" , _UpperCamelCase = 10**-10 , _UpperCamelCase = 1 , ):
__lowerCAmelCase : List[Any] = symbols(_UpperCamelCase )
__lowerCAmelCase : Dict = lambdify(_UpperCamelCase , _UpperCamelCase )
__lowerCAmelCase : List[str] = lambdify(_UpperCamelCase , diff(_UpperCamelCase , _UpperCamelCase ) )
__lowerCAmelCase : List[str] = starting_point
while True:
if diff_function(_UpperCamelCase ) != 0:
__lowerCAmelCase : Optional[int] = prev_guess - multiplicity * func(_UpperCamelCase ) / diff_function(
_UpperCamelCase )
else:
raise ZeroDivisionError('Could not find root' ) from None
# Precision is checked by comparing the difference of consecutive guesses
if abs(next_guess - prev_guess ) < precision:
return next_guess
__lowerCAmelCase : str = next_guess
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(f'The root of sin(x) = 0 is {newton_raphson("sin(x)", 2)}')
# Find root of polynomial
# Find fourth Root of 5
print(f'The root of x**4 - 5 = 0 is {newton_raphson("x**4 -5", 0.4 +5j)}')
# Find value of e
print(
"""The root of log(y) - 1 = 0 is """,
f'{newton_raphson("log(y) - 1", 2, variable="y")}',
)
# Exponential Roots
print(
"""The root of exp(x) - 1 = 0 is""",
f'{newton_raphson("exp(x) - 1", 10, precision=0.0_0_5)}',
)
# Find root of cos(x)
print(f'The root of cos(x) = 0 is {newton_raphson("cos(x)", 0)}')
| 549
|
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DetrConfig, DetrForObjectDetection, DetrForSegmentation, DetrImageProcessor, ResNetConfig
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase__ = logging.get_logger(__name__)
def __lowerCAmelCase (_UpperCamelCase ):
# initialize config
if "resnet-50" in model_name:
__lowerCAmelCase : int = ResNetConfig.from_pretrained('microsoft/resnet-50' )
elif "resnet-101" in model_name:
__lowerCAmelCase : Union[str, Any] = ResNetConfig.from_pretrained('microsoft/resnet-101' )
else:
raise ValueError('Model name should include either resnet50 or resnet101' )
__lowerCAmelCase : Union[str, Any] = DetrConfig(use_timm_backbone=_UpperCamelCase , backbone_config=_UpperCamelCase )
# set label attributes
__lowerCAmelCase : Optional[int] = 'panoptic' in model_name
if is_panoptic:
__lowerCAmelCase : Dict = 250
else:
__lowerCAmelCase : List[str] = 91
__lowerCAmelCase : Tuple = 'huggingface/label-files'
__lowerCAmelCase : str = 'coco-detection-id2label.json'
__lowerCAmelCase : Optional[int] = json.load(open(hf_hub_download(_UpperCamelCase , _UpperCamelCase , repo_type='dataset' ) , 'r' ) )
__lowerCAmelCase : List[Any] = {int(_UpperCamelCase ): v for k, v in idalabel.items()}
__lowerCAmelCase : List[Any] = idalabel
__lowerCAmelCase : Dict = {v: k for k, v in idalabel.items()}
return config, is_panoptic
def __lowerCAmelCase (_UpperCamelCase ):
# here we list all keys to be renamed (original name on the left, our name on the right)
__lowerCAmelCase : Tuple = []
# stem
# fmt: off
rename_keys.append(('backbone.0.body.conv1.weight', 'backbone.conv_encoder.model.embedder.embedder.convolution.weight') )
rename_keys.append(('backbone.0.body.bn1.weight', 'backbone.conv_encoder.model.embedder.embedder.normalization.weight') )
rename_keys.append(('backbone.0.body.bn1.bias', 'backbone.conv_encoder.model.embedder.embedder.normalization.bias') )
rename_keys.append(('backbone.0.body.bn1.running_mean', 'backbone.conv_encoder.model.embedder.embedder.normalization.running_mean') )
rename_keys.append(('backbone.0.body.bn1.running_var', 'backbone.conv_encoder.model.embedder.embedder.normalization.running_var') )
# stages
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
# shortcut
if layer_idx == 0:
rename_keys.append(
(
F"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.0.weight",
F"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.convolution.weight",
) )
rename_keys.append(
(
F"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.weight",
F"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.weight",
) )
rename_keys.append(
(
F"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.bias",
F"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.bias",
) )
rename_keys.append(
(
F"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_mean",
F"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_mean",
) )
rename_keys.append(
(
F"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_var",
F"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_var",
) )
# 3 convs
for i in range(3 ):
rename_keys.append(
(
F"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.conv{i+1}.weight",
F"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.convolution.weight",
) )
rename_keys.append(
(
F"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.weight",
F"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.weight",
) )
rename_keys.append(
(
F"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.bias",
F"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.bias",
) )
rename_keys.append(
(
F"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_mean",
F"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_mean",
) )
rename_keys.append(
(
F"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_var",
F"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_var",
) )
# fmt: on
for i in range(config.encoder_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(
F"transformer.encoder.layers.{i}.self_attn.out_proj.weight",
F"encoder.layers.{i}.self_attn.out_proj.weight",
) )
rename_keys.append(
(F"transformer.encoder.layers.{i}.self_attn.out_proj.bias", F"encoder.layers.{i}.self_attn.out_proj.bias") )
rename_keys.append((F"transformer.encoder.layers.{i}.linear1.weight", F"encoder.layers.{i}.fc1.weight") )
rename_keys.append((F"transformer.encoder.layers.{i}.linear1.bias", F"encoder.layers.{i}.fc1.bias") )
rename_keys.append((F"transformer.encoder.layers.{i}.linear2.weight", F"encoder.layers.{i}.fc2.weight") )
rename_keys.append((F"transformer.encoder.layers.{i}.linear2.bias", F"encoder.layers.{i}.fc2.bias") )
rename_keys.append(
(F"transformer.encoder.layers.{i}.norm1.weight", F"encoder.layers.{i}.self_attn_layer_norm.weight") )
rename_keys.append(
(F"transformer.encoder.layers.{i}.norm1.bias", F"encoder.layers.{i}.self_attn_layer_norm.bias") )
rename_keys.append(
(F"transformer.encoder.layers.{i}.norm2.weight", F"encoder.layers.{i}.final_layer_norm.weight") )
rename_keys.append((F"transformer.encoder.layers.{i}.norm2.bias", F"encoder.layers.{i}.final_layer_norm.bias") )
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(
F"transformer.decoder.layers.{i}.self_attn.out_proj.weight",
F"decoder.layers.{i}.self_attn.out_proj.weight",
) )
rename_keys.append(
(F"transformer.decoder.layers.{i}.self_attn.out_proj.bias", F"decoder.layers.{i}.self_attn.out_proj.bias") )
rename_keys.append(
(
F"transformer.decoder.layers.{i}.multihead_attn.out_proj.weight",
F"decoder.layers.{i}.encoder_attn.out_proj.weight",
) )
rename_keys.append(
(
F"transformer.decoder.layers.{i}.multihead_attn.out_proj.bias",
F"decoder.layers.{i}.encoder_attn.out_proj.bias",
) )
rename_keys.append((F"transformer.decoder.layers.{i}.linear1.weight", F"decoder.layers.{i}.fc1.weight") )
rename_keys.append((F"transformer.decoder.layers.{i}.linear1.bias", F"decoder.layers.{i}.fc1.bias") )
rename_keys.append((F"transformer.decoder.layers.{i}.linear2.weight", F"decoder.layers.{i}.fc2.weight") )
rename_keys.append((F"transformer.decoder.layers.{i}.linear2.bias", F"decoder.layers.{i}.fc2.bias") )
rename_keys.append(
(F"transformer.decoder.layers.{i}.norm1.weight", F"decoder.layers.{i}.self_attn_layer_norm.weight") )
rename_keys.append(
(F"transformer.decoder.layers.{i}.norm1.bias", F"decoder.layers.{i}.self_attn_layer_norm.bias") )
rename_keys.append(
(F"transformer.decoder.layers.{i}.norm2.weight", F"decoder.layers.{i}.encoder_attn_layer_norm.weight") )
rename_keys.append(
(F"transformer.decoder.layers.{i}.norm2.bias", F"decoder.layers.{i}.encoder_attn_layer_norm.bias") )
rename_keys.append(
(F"transformer.decoder.layers.{i}.norm3.weight", F"decoder.layers.{i}.final_layer_norm.weight") )
rename_keys.append((F"transformer.decoder.layers.{i}.norm3.bias", F"decoder.layers.{i}.final_layer_norm.bias") )
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
('input_proj.weight', 'input_projection.weight'),
('input_proj.bias', 'input_projection.bias'),
('query_embed.weight', 'query_position_embeddings.weight'),
('transformer.decoder.norm.weight', 'decoder.layernorm.weight'),
('transformer.decoder.norm.bias', 'decoder.layernorm.bias'),
('class_embed.weight', 'class_labels_classifier.weight'),
('class_embed.bias', 'class_labels_classifier.bias'),
('bbox_embed.layers.0.weight', 'bbox_predictor.layers.0.weight'),
('bbox_embed.layers.0.bias', 'bbox_predictor.layers.0.bias'),
('bbox_embed.layers.1.weight', 'bbox_predictor.layers.1.weight'),
('bbox_embed.layers.1.bias', 'bbox_predictor.layers.1.bias'),
('bbox_embed.layers.2.weight', 'bbox_predictor.layers.2.weight'),
('bbox_embed.layers.2.bias', 'bbox_predictor.layers.2.bias'),
] )
return rename_keys
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
__lowerCAmelCase : str = state_dict.pop(_UpperCamelCase )
__lowerCAmelCase : Dict = val
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase=False ):
__lowerCAmelCase : Tuple = ''
if is_panoptic:
__lowerCAmelCase : Union[str, Any] = 'detr.'
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
__lowerCAmelCase : Optional[int] = state_dict.pop(F"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight" )
__lowerCAmelCase : List[Any] = state_dict.pop(F"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias" )
# next, add query, keys and values (in that order) to the state dict
__lowerCAmelCase : int = in_proj_weight[:256, :]
__lowerCAmelCase : Tuple = in_proj_bias[:256]
__lowerCAmelCase : Dict = in_proj_weight[256:512, :]
__lowerCAmelCase : Optional[int] = in_proj_bias[256:512]
__lowerCAmelCase : int = in_proj_weight[-256:, :]
__lowerCAmelCase : int = in_proj_bias[-256:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
__lowerCAmelCase : int = state_dict.pop(F"{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight" )
__lowerCAmelCase : List[str] = state_dict.pop(F"{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias" )
# next, add query, keys and values (in that order) to the state dict
__lowerCAmelCase : List[str] = in_proj_weight[:256, :]
__lowerCAmelCase : int = in_proj_bias[:256]
__lowerCAmelCase : Any = in_proj_weight[256:512, :]
__lowerCAmelCase : Any = in_proj_bias[256:512]
__lowerCAmelCase : Any = in_proj_weight[-256:, :]
__lowerCAmelCase : Optional[int] = in_proj_bias[-256:]
# read in weights + bias of input projection layer of cross-attention
__lowerCAmelCase : Union[str, Any] = state_dict.pop(
F"{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight" )
__lowerCAmelCase : Optional[Any] = state_dict.pop(F"{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias" )
# next, add query, keys and values (in that order) of cross-attention to the state dict
__lowerCAmelCase : List[Any] = in_proj_weight_cross_attn[:256, :]
__lowerCAmelCase : Union[str, Any] = in_proj_bias_cross_attn[:256]
__lowerCAmelCase : Optional[Any] = in_proj_weight_cross_attn[256:512, :]
__lowerCAmelCase : Any = in_proj_bias_cross_attn[256:512]
__lowerCAmelCase : str = in_proj_weight_cross_attn[-256:, :]
__lowerCAmelCase : Optional[int] = in_proj_bias_cross_attn[-256:]
def __lowerCAmelCase ():
__lowerCAmelCase : List[str] = 'http://images.cocodataset.org/val2017/000000039769.jpg'
__lowerCAmelCase : Any = Image.open(requests.get(_UpperCamelCase , stream=_UpperCamelCase ).raw )
return im
@torch.no_grad()
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase=None , _UpperCamelCase=False ):
__lowerCAmelCase , __lowerCAmelCase : int = get_detr_config(_UpperCamelCase )
# load original model from torch hub
__lowerCAmelCase : List[str] = {
'detr-resnet-50': 'detr_resnet50',
'detr-resnet-101': 'detr_resnet101',
}
logger.info(F"Converting model {model_name}..." )
__lowerCAmelCase : List[Any] = torch.hub.load('facebookresearch/detr' , model_name_to_original_name[model_name] , pretrained=_UpperCamelCase ).eval()
__lowerCAmelCase : str = detr.state_dict()
# rename keys
for src, dest in create_rename_keys(_UpperCamelCase ):
if is_panoptic:
__lowerCAmelCase : List[str] = 'detr.' + src
rename_key(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# query, key and value matrices need special treatment
read_in_q_k_v(_UpperCamelCase , is_panoptic=_UpperCamelCase )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
__lowerCAmelCase : Optional[Any] = 'detr.model.' if is_panoptic else 'model.'
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith('detr' )
and not key.startswith('class_labels_classifier' )
and not key.startswith('bbox_predictor' )
):
__lowerCAmelCase : Optional[Any] = state_dict.pop(_UpperCamelCase )
__lowerCAmelCase : Tuple = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
__lowerCAmelCase : List[Any] = state_dict.pop(_UpperCamelCase )
__lowerCAmelCase : Union[str, Any] = val
elif key.startswith('bbox_attention' ) or key.startswith('mask_head' ):
continue
else:
__lowerCAmelCase : Optional[Any] = state_dict.pop(_UpperCamelCase )
__lowerCAmelCase : Union[str, Any] = val
else:
if not key.startswith('class_labels_classifier' ) and not key.startswith('bbox_predictor' ):
__lowerCAmelCase : Dict = state_dict.pop(_UpperCamelCase )
__lowerCAmelCase : Optional[int] = val
# finally, create HuggingFace model and load state dict
__lowerCAmelCase : Tuple = DetrForSegmentation(_UpperCamelCase ) if is_panoptic else DetrForObjectDetection(_UpperCamelCase )
model.load_state_dict(_UpperCamelCase )
model.eval()
# verify our conversion on an image
__lowerCAmelCase : Optional[Any] = 'coco_panoptic' if is_panoptic else 'coco_detection'
__lowerCAmelCase : List[str] = DetrImageProcessor(format=_UpperCamelCase )
__lowerCAmelCase : Union[str, Any] = processor(images=prepare_img() , return_tensors='pt' )
__lowerCAmelCase : Optional[int] = encoding['pixel_values']
__lowerCAmelCase : Optional[Any] = detr(_UpperCamelCase )
__lowerCAmelCase : Union[str, Any] = model(_UpperCamelCase )
assert torch.allclose(outputs.logits , original_outputs['pred_logits'] , atol=1e-3 )
assert torch.allclose(outputs.pred_boxes , original_outputs['pred_boxes'] , atol=1e-3 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs['pred_masks'] , atol=1e-4 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(F"Saving PyTorch model and image processor to {pytorch_dump_folder_path}..." )
Path(_UpperCamelCase ).mkdir(exist_ok=_UpperCamelCase )
model.save_pretrained(_UpperCamelCase )
processor.save_pretrained(_UpperCamelCase )
if push_to_hub:
# Upload model and image processor to the hub
logger.info('Uploading PyTorch model and image processor to the hub...' )
model.push_to_hub(F"nielsr/{model_name}" )
processor.push_to_hub(F"nielsr/{model_name}" )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
parser.add_argument(
"""--model_name""",
default="""detr-resnet-50""",
type=str,
choices=["""detr-resnet-50""", """detr-resnet-101"""],
help="""Name of the DETR model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Whether to push the model to the hub or not.""")
lowerCamelCase__ = parser.parse_args()
convert_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 549
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__lowerCAmelCase : Union[str, Any] = {'''configuration_vit_mae''': ['''VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ViTMAEConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : str = [
'''VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ViTMAEForPreTraining''',
'''ViTMAELayer''',
'''ViTMAEModel''',
'''ViTMAEPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : str = [
'''TFViTMAEForPreTraining''',
'''TFViTMAEModel''',
'''TFViTMAEPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_vit_mae import VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMAEConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_mae import (
VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMAEForPreTraining,
ViTMAELayer,
ViTMAEModel,
ViTMAEPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit_mae import TFViTMAEForPreTraining, TFViTMAEModel, TFViTMAEPreTrainedModel
else:
import sys
__lowerCAmelCase : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 58
|
"""simple docstring"""
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModel, PreTrainedModel
from ...utils import logging
__lowercase : List[str] = logging.get_logger(__name__)
def lowerCamelCase_ ( _lowerCamelCase : str , _lowerCamelCase : Optional[int] ):
lowerCamelCase_ = nn.functional.normalize(_lowerCamelCase )
lowerCamelCase_ = nn.functional.normalize(_lowerCamelCase )
return torch.mm(_lowerCamelCase , normalized_text_embeds.t() )
class lowerCAmelCase ( a ):
"""simple docstring"""
__lowercase :Union[str, Any] = CLIPConfig
__lowercase :int = ["CLIPEncoderLayer"]
def __init__( self , UpperCamelCase__ ) -> Any:
'''simple docstring'''
super().__init__(UpperCamelCase__ )
lowerCamelCase_ = CLIPVisionModel(config.vision_config )
lowerCamelCase_ = nn.Linear(config.vision_config.hidden_size , config.projection_dim , bias=UpperCamelCase__ )
lowerCamelCase_ = nn.Parameter(torch.ones(17 , config.projection_dim ) , requires_grad=UpperCamelCase__ )
lowerCamelCase_ = nn.Parameter(torch.ones(3 , config.projection_dim ) , requires_grad=UpperCamelCase__ )
lowerCamelCase_ = nn.Parameter(torch.ones(17 ) , requires_grad=UpperCamelCase__ )
lowerCamelCase_ = nn.Parameter(torch.ones(3 ) , requires_grad=UpperCamelCase__ )
@torch.no_grad()
def _lowerCAmelCase ( self , UpperCamelCase__ , UpperCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase_ = self.vision_model(UpperCamelCase__ )[1] # pooled_output
lowerCamelCase_ = self.visual_projection(UpperCamelCase__ )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
lowerCamelCase_ = cosine_distance(UpperCamelCase__ , self.special_care_embeds ).cpu().float().numpy()
lowerCamelCase_ = cosine_distance(UpperCamelCase__ , self.concept_embeds ).cpu().float().numpy()
lowerCamelCase_ = []
lowerCamelCase_ = image_embeds.shape[0]
for i in range(UpperCamelCase__ ):
lowerCamelCase_ = {'''special_scores''': {}, '''special_care''': [], '''concept_scores''': {}, '''bad_concepts''': []}
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign images
lowerCamelCase_ = 0.0
for concept_idx in range(len(special_cos_dist[0] ) ):
lowerCamelCase_ = special_cos_dist[i][concept_idx]
lowerCamelCase_ = self.special_care_embeds_weights[concept_idx].item()
lowerCamelCase_ = round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["special_scores"][concept_idx] > 0:
result_img["special_care"].append({concept_idx, result_img['''special_scores'''][concept_idx]} )
lowerCamelCase_ = 0.01
for concept_idx in range(len(cos_dist[0] ) ):
lowerCamelCase_ = cos_dist[i][concept_idx]
lowerCamelCase_ = self.concept_embeds_weights[concept_idx].item()
lowerCamelCase_ = round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["concept_scores"][concept_idx] > 0:
result_img["bad_concepts"].append(UpperCamelCase__ )
result.append(UpperCamelCase__ )
lowerCamelCase_ = [len(res['''bad_concepts'''] ) > 0 for res in result]
return images, has_nsfw_concepts
@torch.no_grad()
def _lowerCAmelCase ( self , UpperCamelCase__ , UpperCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase_ = self.vision_model(UpperCamelCase__ )[1] # pooled_output
lowerCamelCase_ = self.visual_projection(UpperCamelCase__ )
lowerCamelCase_ = cosine_distance(UpperCamelCase__ , self.special_care_embeds )
lowerCamelCase_ = cosine_distance(UpperCamelCase__ , self.concept_embeds )
# increase this value to create a stronger `nsfw` filter
# at the cost of increasing the possibility of filtering benign images
lowerCamelCase_ = 0.0
lowerCamelCase_ = special_cos_dist - self.special_care_embeds_weights + adjustment
# special_scores = special_scores.round(decimals=3)
lowerCamelCase_ = torch.any(special_scores > 0 , dim=1 )
lowerCamelCase_ = special_care * 0.01
lowerCamelCase_ = special_adjustment.unsqueeze(1 ).expand(-1 , cos_dist.shape[1] )
lowerCamelCase_ = (cos_dist - self.concept_embeds_weights) + special_adjustment
# concept_scores = concept_scores.round(decimals=3)
lowerCamelCase_ = torch.any(concept_scores > 0 , dim=1 )
return images, has_nsfw_concepts
| 142
| 0
|
__UpperCAmelCase = [
999,
800,
799,
600,
599,
500,
400,
399,
377,
355,
333,
311,
288,
266,
244,
222,
200,
199,
177,
155,
133,
111,
88,
66,
44,
22,
0,
]
__UpperCAmelCase = [
999,
976,
952,
928,
905,
882,
858,
857,
810,
762,
715,
714,
572,
429,
428,
286,
285,
238,
190,
143,
142,
118,
95,
71,
47,
24,
0,
]
__UpperCAmelCase = [
999,
988,
977,
966,
955,
944,
933,
922,
911,
900,
899,
879,
859,
840,
820,
800,
799,
766,
733,
700,
699,
650,
600,
599,
500,
499,
400,
399,
350,
300,
299,
266,
233,
200,
199,
179,
159,
140,
120,
100,
99,
88,
77,
66,
55,
44,
33,
22,
11,
0,
]
__UpperCAmelCase = [
999,
995,
992,
989,
985,
981,
978,
975,
971,
967,
964,
961,
957,
956,
951,
947,
942,
937,
933,
928,
923,
919,
914,
913,
908,
903,
897,
892,
887,
881,
876,
871,
870,
864,
858,
852,
846,
840,
834,
828,
827,
820,
813,
806,
799,
792,
785,
784,
777,
770,
763,
756,
749,
742,
741,
733,
724,
716,
707,
699,
698,
688,
677,
666,
656,
655,
645,
634,
623,
613,
612,
598,
584,
570,
569,
555,
541,
527,
526,
505,
484,
483,
462,
440,
439,
396,
395,
352,
351,
308,
307,
264,
263,
220,
219,
176,
132,
88,
44,
0,
]
__UpperCAmelCase = [
999,
997,
995,
992,
990,
988,
986,
984,
981,
979,
977,
975,
972,
970,
968,
966,
964,
961,
959,
957,
956,
954,
951,
949,
946,
944,
941,
939,
936,
934,
931,
929,
926,
924,
921,
919,
916,
914,
913,
910,
907,
905,
902,
899,
896,
893,
891,
888,
885,
882,
879,
877,
874,
871,
870,
867,
864,
861,
858,
855,
852,
849,
846,
843,
840,
837,
834,
831,
828,
827,
824,
821,
817,
814,
811,
808,
804,
801,
798,
795,
791,
788,
785,
784,
780,
777,
774,
770,
766,
763,
760,
756,
752,
749,
746,
742,
741,
737,
733,
730,
726,
722,
718,
714,
710,
707,
703,
699,
698,
694,
690,
685,
681,
677,
673,
669,
664,
660,
656,
655,
650,
646,
641,
636,
632,
627,
622,
618,
613,
612,
607,
602,
596,
591,
586,
580,
575,
570,
569,
563,
557,
551,
545,
539,
533,
527,
526,
519,
512,
505,
498,
491,
484,
483,
474,
466,
457,
449,
440,
439,
428,
418,
407,
396,
395,
381,
366,
352,
351,
330,
308,
307,
286,
264,
263,
242,
220,
219,
176,
175,
132,
131,
88,
44,
0,
]
__UpperCAmelCase = [
999,
991,
982,
974,
966,
958,
950,
941,
933,
925,
916,
908,
900,
899,
874,
850,
825,
800,
799,
700,
600,
500,
400,
300,
200,
100,
0,
]
__UpperCAmelCase = [
999,
992,
985,
978,
971,
964,
957,
949,
942,
935,
928,
921,
914,
907,
900,
899,
879,
859,
840,
820,
800,
799,
766,
733,
700,
699,
650,
600,
599,
500,
499,
400,
399,
300,
299,
200,
199,
100,
99,
0,
]
__UpperCAmelCase = [
999,
996,
992,
989,
985,
982,
979,
975,
972,
968,
965,
961,
958,
955,
951,
948,
944,
941,
938,
934,
931,
927,
924,
920,
917,
914,
910,
907,
903,
900,
899,
891,
884,
876,
869,
861,
853,
846,
838,
830,
823,
815,
808,
800,
799,
788,
777,
766,
755,
744,
733,
722,
711,
700,
699,
688,
677,
666,
655,
644,
633,
622,
611,
600,
599,
585,
571,
557,
542,
528,
514,
500,
499,
485,
471,
457,
442,
428,
414,
400,
399,
379,
359,
340,
320,
300,
299,
279,
259,
240,
220,
200,
199,
166,
133,
100,
99,
66,
33,
0,
]
| 703
|
from __future__ import annotations
def snake_case_ (__A : list[int] , __A : int ) -> list[int]:
__lowerCAmelCase : List[Any] = 0
__lowerCAmelCase : Optional[Any] = len(__A ) - 1
while i < j:
if nums[i] + nums[j] == target:
return [i, j]
elif nums[i] + nums[j] < target:
__lowerCAmelCase : int = i + 1
else:
__lowerCAmelCase : List[str] = j - 1
return []
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F'{two_pointer([2, 7, 11, 15], 9) = }')
| 218
| 0
|
'''simple docstring'''
import os
from collections.abc import Iterator
def __a ( A__ = "." ) -> Optional[Any]:
for dir_path, dir_names, filenames in os.walk(A__ ):
lowerCAmelCase = [d for d in dir_names if d != "scripts" and d[0] not in "._"]
for filename in filenames:
if filename == "__init__.py":
continue
if os.path.splitext(A__ )[1] in (".py", ".ipynb"):
yield os.path.join(A__ , A__ ).lstrip("./" )
def __a ( A__ ) -> str:
return f"{i * ' '}*" if i else "\n##"
def __a ( A__ , A__ ) -> Optional[int]:
lowerCAmelCase = old_path.split(os.sep )
for i, new_part in enumerate(new_path.split(os.sep ) ):
if (i + 1 > len(A__ ) or old_parts[i] != new_part) and new_part:
print(f"{md_prefix(A__ )} {new_part.replace('_' , ' ' ).title()}" )
return new_path
def __a ( A__ = "." ) -> Union[str, Any]:
lowerCAmelCase = ""
for filepath in sorted(good_file_paths(A__ ) ):
lowerCAmelCase = os.path.split(A__ )
if filepath != old_path:
lowerCAmelCase = print_path(A__ , A__ )
lowerCAmelCase = (filepath.count(os.sep ) + 1) if filepath else 0
lowerCAmelCase = f"{filepath}/{filename}".replace(" " , "%20" )
lowerCAmelCase = os.path.splitext(filename.replace("_" , " " ).title() )[0]
print(f"{md_prefix(A__ )} [{filename}]({url})" )
if __name__ == "__main__":
print_directory_md('.')
| 649
|
# This model implementation is heavily inspired by https://github.com/haofanwang/ControlNet-for-Diffusers/
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
ControlNetModel,
DDIMScheduler,
StableDiffusionControlNetImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet import MultiControlNetModel
from diffusers.utils import floats_tensor, load_image, load_numpy, randn_tensor, slow, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
)
enable_full_determinism()
class SCREAMING_SNAKE_CASE ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase_ : int = StableDiffusionControlNetImgaImgPipeline
UpperCamelCase_ : List[Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width'''}
UpperCamelCase_ : Optional[Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
UpperCamelCase_ : Tuple = IMAGE_TO_IMAGE_IMAGE_PARAMS.union({'''control_image'''} )
UpperCamelCase_ : Any = IMAGE_TO_IMAGE_IMAGE_PARAMS
def _A ( self : List[str] ):
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Optional[int] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : int = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Optional[Any] = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=UpperCAmelCase_ , set_alpha_to_one=UpperCAmelCase_ , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Any = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : int = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
SCREAMING_SNAKE_CASE : Union[str, Any] = CLIPTextModel(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
SCREAMING_SNAKE_CASE : str = {
"unet": unet,
"controlnet": controlnet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def _A ( self : str , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : int=0 ):
if str(UpperCAmelCase_ ).startswith("mps" ):
SCREAMING_SNAKE_CASE : Any = torch.manual_seed(UpperCAmelCase_ )
else:
SCREAMING_SNAKE_CASE : Optional[Any] = torch.Generator(device=UpperCAmelCase_ ).manual_seed(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = 2
SCREAMING_SNAKE_CASE : Union[str, Any] = randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=UpperCAmelCase_ , device=torch.device(UpperCAmelCase_ ) , )
SCREAMING_SNAKE_CASE : Tuple = floats_tensor(control_image.shape , rng=random.Random(UpperCAmelCase_ ) ).to(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Dict = image.cpu().permute(0 , 2 , 3 , 1 )[0]
SCREAMING_SNAKE_CASE : str = Image.fromarray(np.uinta(UpperCAmelCase_ ) ).convert("RGB" ).resize((64, 64) )
SCREAMING_SNAKE_CASE : List[str] = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "numpy",
"image": image,
"control_image": control_image,
}
return inputs
def _A ( self : int ):
return self._test_attention_slicing_forward_pass(expected_max_diff=2E-3 )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def _A ( self : str ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2E-3 )
def _A ( self : Union[str, Any] ):
self._test_inference_batch_single_identical(expected_max_diff=2E-3 )
class SCREAMING_SNAKE_CASE ( lowerCAmelCase , lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase_ : List[str] = StableDiffusionControlNetImgaImgPipeline
UpperCamelCase_ : str = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width'''}
UpperCamelCase_ : Optional[int] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
UpperCamelCase_ : Dict = frozenset([] ) # TO_DO: add image_params once refactored VaeImageProcessor.preprocess
def _A ( self : Optional[Any] ):
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Optional[Any] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
torch.manual_seed(0 )
def init_weights(UpperCAmelCase_ : List[Any] ):
if isinstance(UpperCAmelCase_ , torch.nn.Convad ):
torch.nn.init.normal(m.weight )
m.bias.data.fill_(1.0 )
SCREAMING_SNAKE_CASE : List[str] = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(UpperCAmelCase_ )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Any = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(UpperCAmelCase_ )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Any = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=UpperCAmelCase_ , set_alpha_to_one=UpperCAmelCase_ , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Dict = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Tuple = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
SCREAMING_SNAKE_CASE : Any = CLIPTextModel(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
SCREAMING_SNAKE_CASE : Tuple = MultiControlNetModel([controlneta, controlneta] )
SCREAMING_SNAKE_CASE : Optional[int] = {
"unet": unet,
"controlnet": controlnet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def _A ( self : List[str] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Any=0 ):
if str(UpperCAmelCase_ ).startswith("mps" ):
SCREAMING_SNAKE_CASE : Dict = torch.manual_seed(UpperCAmelCase_ )
else:
SCREAMING_SNAKE_CASE : str = torch.Generator(device=UpperCAmelCase_ ).manual_seed(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Any = 2
SCREAMING_SNAKE_CASE : Tuple = [
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=UpperCAmelCase_ , device=torch.device(UpperCAmelCase_ ) , ),
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=UpperCAmelCase_ , device=torch.device(UpperCAmelCase_ ) , ),
]
SCREAMING_SNAKE_CASE : Optional[int] = floats_tensor(control_image[0].shape , rng=random.Random(UpperCAmelCase_ ) ).to(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : int = image.cpu().permute(0 , 2 , 3 , 1 )[0]
SCREAMING_SNAKE_CASE : Union[str, Any] = Image.fromarray(np.uinta(UpperCAmelCase_ ) ).convert("RGB" ).resize((64, 64) )
SCREAMING_SNAKE_CASE : Optional[Any] = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "numpy",
"image": image,
"control_image": control_image,
}
return inputs
def _A ( self : Tuple ):
SCREAMING_SNAKE_CASE : Any = self.get_dummy_components()
SCREAMING_SNAKE_CASE : str = self.pipeline_class(**UpperCAmelCase_ )
pipe.to(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : str = 10.0
SCREAMING_SNAKE_CASE : Any = 4
SCREAMING_SNAKE_CASE : Optional[int] = self.get_dummy_inputs(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Tuple = steps
SCREAMING_SNAKE_CASE : int = scale
SCREAMING_SNAKE_CASE : Optional[int] = pipe(**UpperCAmelCase_ )[0]
SCREAMING_SNAKE_CASE : Optional[Any] = self.get_dummy_inputs(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Tuple = steps
SCREAMING_SNAKE_CASE : Any = scale
SCREAMING_SNAKE_CASE : List[str] = pipe(**UpperCAmelCase_ , control_guidance_start=0.1 , control_guidance_end=0.2 )[0]
SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_dummy_inputs(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : List[str] = steps
SCREAMING_SNAKE_CASE : int = scale
SCREAMING_SNAKE_CASE : List[Any] = pipe(**UpperCAmelCase_ , control_guidance_start=[0.1, 0.3] , control_guidance_end=[0.2, 0.7] )[0]
SCREAMING_SNAKE_CASE : Optional[Any] = self.get_dummy_inputs(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : str = steps
SCREAMING_SNAKE_CASE : Dict = scale
SCREAMING_SNAKE_CASE : Dict = pipe(**UpperCAmelCase_ , control_guidance_start=0.4 , control_guidance_end=[0.5, 0.8] )[0]
# make sure that all outputs are different
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
def _A ( self : Union[str, Any] ):
return self._test_attention_slicing_forward_pass(expected_max_diff=2E-3 )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def _A ( self : str ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2E-3 )
def _A ( self : List[Any] ):
self._test_inference_batch_single_identical(expected_max_diff=2E-3 )
def _A ( self : Any ):
SCREAMING_SNAKE_CASE : Dict = self.get_dummy_components()
SCREAMING_SNAKE_CASE : Dict = self.pipeline_class(**UpperCAmelCase_ )
pipe.to(UpperCAmelCase_ )
pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
with tempfile.TemporaryDirectory() as tmpdir:
try:
# save_pretrained is not implemented for Multi-ControlNet
pipe.save_pretrained(UpperCAmelCase_ )
except NotImplementedError:
pass
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def _A ( self : Optional[Any] ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _A ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE : str = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-canny" )
SCREAMING_SNAKE_CASE : Union[str, Any] = StableDiffusionControlNetImgaImgPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , safety_checker=UpperCAmelCase_ , controlnet=UpperCAmelCase_ )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : str = torch.Generator(device="cpu" ).manual_seed(0 )
SCREAMING_SNAKE_CASE : str = "evil space-punk bird"
SCREAMING_SNAKE_CASE : Optional[Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png" ).resize((512, 512) )
SCREAMING_SNAKE_CASE : Optional[int] = load_image(
"https://huggingface.co/lllyasviel/sd-controlnet-canny/resolve/main/images/bird.png" ).resize((512, 512) )
SCREAMING_SNAKE_CASE : str = pipe(
UpperCAmelCase_ , UpperCAmelCase_ , control_image=UpperCAmelCase_ , generator=UpperCAmelCase_ , output_type="np" , num_inference_steps=50 , strength=0.6 , )
SCREAMING_SNAKE_CASE : int = output.images[0]
assert image.shape == (512, 512, 3)
SCREAMING_SNAKE_CASE : Dict = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/img2img.npy" )
assert np.abs(expected_image - image ).max() < 9E-2
| 62
| 0
|
'''simple docstring'''
__lowerCAmelCase = [
'Audio',
'Array2D',
'Array3D',
'Array4D',
'Array5D',
'ClassLabel',
'Features',
'Sequence',
'Value',
'Image',
'Translation',
'TranslationVariableLanguages',
]
from .audio import Audio
from .features import ArrayaD, ArrayaD, ArrayaD, ArrayaD, ClassLabel, Features, Sequence, Value
from .image import Image
from .translation import Translation, TranslationVariableLanguages
| 666
|
'''simple docstring'''
import sys
__lowerCAmelCase = (
'73167176531330624919225119674426574742355349194934'
'96983520312774506326239578318016984801869478851843'
'85861560789112949495459501737958331952853208805511'
'12540698747158523863050715693290963295227443043557'
'66896648950445244523161731856403098711121722383113'
'62229893423380308135336276614282806444486645238749'
'30358907296290491560440772390713810515859307960866'
'70172427121883998797908792274921901699720888093776'
'65727333001053367881220235421809751254540594752243'
'52584907711670556013604839586446706324415722155397'
'53697817977846174064955149290862569321978468622482'
'83972241375657056057490261407972968652414535100474'
'82166370484403199890008895243450658541227588666881'
'16427171479924442928230863465674813919123162824586'
'17866458359124566529476545682848912883142607690042'
'24219022671055626321111109370544217506941658960408'
'07198403850962455444362981230987879927244284909188'
'84580156166097919133875499200524063689912560717606'
'05886116467109405077541002256983155200055935729725'
'71636269561882670428252483600823257530420752963450'
)
def _UpperCAmelCase ( __A : str ):
a_ : Tuple = 1
for digit in s:
product *= int(__A )
return product
def _UpperCAmelCase ( __A : str = N ):
a_ : Dict = -sys.maxsize - 1
a_ : Optional[int] = n[:13]
a_ : str = 13
while cur_index < len(__A ) - 13:
if int(n[cur_index] ) >= int(substr[0] ):
a_ : Tuple = substr[1:] + n[cur_index]
cur_index += 1
else:
a_ : Dict = max(__A , str_eval(__A ) )
a_ : List[str] = n[cur_index : cur_index + 13]
cur_index += 13
return largest_product
if __name__ == "__main__":
print(F"""{solution() = }""")
| 666
| 1
|
"""simple docstring"""
import inspect
import unittest
from transformers import MobileViTConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTModel
from transformers.models.mobilevit.modeling_mobilevit import MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class lowerCamelCase ( _UpperCAmelCase ):
def a_ ( self ):
UpperCamelCase : Dict = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , """hidden_sizes""" ) )
self.parent.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , """neck_hidden_sizes""" ) )
self.parent.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , """num_attention_heads""" ) )
class lowerCamelCase :
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=13 , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=640 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_="silu" , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.02 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=10 , SCREAMING_SNAKE_CASE_=None , ):
UpperCamelCase : List[Any] = parent
UpperCamelCase : List[str] = batch_size
UpperCamelCase : Any = image_size
UpperCamelCase : str = patch_size
UpperCamelCase : Dict = num_channels
UpperCamelCase : List[str] = last_hidden_size
UpperCamelCase : Optional[int] = num_attention_heads
UpperCamelCase : List[str] = hidden_act
UpperCamelCase : Tuple = conv_kernel_size
UpperCamelCase : str = output_stride
UpperCamelCase : Tuple = hidden_dropout_prob
UpperCamelCase : int = attention_probs_dropout_prob
UpperCamelCase : Any = classifier_dropout_prob
UpperCamelCase : Tuple = use_labels
UpperCamelCase : Dict = is_training
UpperCamelCase : Union[str, Any] = num_labels
UpperCamelCase : Optional[Any] = initializer_range
UpperCamelCase : Optional[Any] = scope
def a_ ( self ):
UpperCamelCase : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase : Any = None
UpperCamelCase : Tuple = None
if self.use_labels:
UpperCamelCase : str = ids_tensor([self.batch_size] , self.num_labels )
UpperCamelCase : Optional[int] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
UpperCamelCase : Optional[Any] = self.get_config()
return config, pixel_values, labels, pixel_labels
def a_ ( self ):
return MobileViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def a_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : Optional[int] = MobileViTModel(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCamelCase : List[str] = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def a_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : Optional[Any] = self.num_labels
UpperCamelCase : Union[str, Any] = MobileViTForImageClassification(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCamelCase : str = model(SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def a_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : List[Any] = self.num_labels
UpperCamelCase : List[Any] = MobileViTForSemanticSegmentation(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCamelCase : List[Any] = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
UpperCamelCase : int = model(SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def a_ ( self ):
UpperCamelCase : Tuple = self.prepare_config_and_inputs()
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase : Tuple = config_and_inputs
UpperCamelCase : Any = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class lowerCamelCase ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
lowercase : int = (
(MobileViTModel, MobileViTForImageClassification, MobileViTForSemanticSegmentation)
if is_torch_available()
else ()
)
lowercase : str = (
{
'feature-extraction': MobileViTModel,
'image-classification': MobileViTForImageClassification,
'image-segmentation': MobileViTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
lowercase : Union[str, Any] = False
lowercase : Union[str, Any] = False
lowercase : str = False
lowercase : str = False
def a_ ( self ):
UpperCamelCase : Optional[Any] = MobileViTModelTester(self )
UpperCamelCase : str = MobileViTConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , has_text_modality=SCREAMING_SNAKE_CASE_ )
def a_ ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""MobileViT does not use inputs_embeds""" )
def a_ ( self ):
pass
@unittest.skip(reason="""MobileViT does not support input and output embeddings""" )
def a_ ( self ):
pass
@unittest.skip(reason="""MobileViT does not output attentions""" )
def a_ ( self ):
pass
def a_ ( self ):
UpperCamelCase , UpperCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase : int = model_class(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase : Union[str, Any] = [*signature.parameters.keys()]
UpperCamelCase : Any = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE_ )
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def a_ ( self ):
pass
def a_ ( self ):
UpperCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
def a_ ( self ):
def check_hidden_states_output(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : Dict = model_class(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
with torch.no_grad():
UpperCamelCase : Optional[int] = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
UpperCamelCase : int = outputs.hidden_states
UpperCamelCase : Dict = 5
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
# MobileViT's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
UpperCamelCase : int = 2
for i in range(len(SCREAMING_SNAKE_CASE_ ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
UpperCamelCase , UpperCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase : int = True
check_hidden_states_output(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCamelCase : Optional[Any] = True
check_hidden_states_output(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def a_ ( self ):
UpperCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE_ )
def a_ ( self ):
UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*SCREAMING_SNAKE_CASE_ )
@slow
def a_ ( self ):
for model_name in MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase : str = MobileViTModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
def A_ ( ):
'''simple docstring'''
UpperCamelCase : int = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class lowerCamelCase ( unittest.TestCase ):
@cached_property
def a_ ( self ):
return MobileViTImageProcessor.from_pretrained("""apple/mobilevit-xx-small""" ) if is_vision_available() else None
@slow
def a_ ( self ):
UpperCamelCase : Any = MobileViTForImageClassification.from_pretrained("""apple/mobilevit-xx-small""" ).to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = self.default_image_processor
UpperCamelCase : Optional[int] = prepare_img()
UpperCamelCase : List[str] = image_processor(images=SCREAMING_SNAKE_CASE_ , return_tensors="""pt""" ).to(SCREAMING_SNAKE_CASE_ )
# forward pass
with torch.no_grad():
UpperCamelCase : Tuple = model(**SCREAMING_SNAKE_CASE_ )
# verify the logits
UpperCamelCase : Optional[int] = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[Any] = torch.tensor([-1.9364, -1.2327, -0.4653] ).to(SCREAMING_SNAKE_CASE_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , SCREAMING_SNAKE_CASE_ , atol=1e-4 ) )
@slow
def a_ ( self ):
UpperCamelCase : Dict = MobileViTForSemanticSegmentation.from_pretrained("""apple/deeplabv3-mobilevit-xx-small""" )
UpperCamelCase : List[Any] = model.to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : str = MobileViTImageProcessor.from_pretrained("""apple/deeplabv3-mobilevit-xx-small""" )
UpperCamelCase : List[str] = prepare_img()
UpperCamelCase : Tuple = image_processor(images=SCREAMING_SNAKE_CASE_ , return_tensors="""pt""" ).to(SCREAMING_SNAKE_CASE_ )
# forward pass
with torch.no_grad():
UpperCamelCase : str = model(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Dict = outputs.logits
# verify the logits
UpperCamelCase : Tuple = torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape , SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Union[str, Any] = torch.tensor(
[
[[6.9713, 6.9786, 7.2422], [7.2893, 7.2825, 7.4446], [7.6580, 7.8797, 7.9420]],
[[-10.6869, -10.3250, -10.3471], [-10.4228, -9.9868, -9.7132], [-11.0405, -11.0221, -10.7318]],
[[-3.3089, -2.8539, -2.6740], [-3.2706, -2.5621, -2.5108], [-3.2534, -2.6615, -2.6651]],
] , device=SCREAMING_SNAKE_CASE_ , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , SCREAMING_SNAKE_CASE_ , atol=1e-4 ) )
@slow
def a_ ( self ):
UpperCamelCase : Optional[int] = MobileViTForSemanticSegmentation.from_pretrained("""apple/deeplabv3-mobilevit-xx-small""" )
UpperCamelCase : List[Any] = model.to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Any = MobileViTImageProcessor.from_pretrained("""apple/deeplabv3-mobilevit-xx-small""" )
UpperCamelCase : int = prepare_img()
UpperCamelCase : Dict = image_processor(images=SCREAMING_SNAKE_CASE_ , return_tensors="""pt""" ).to(SCREAMING_SNAKE_CASE_ )
# forward pass
with torch.no_grad():
UpperCamelCase : Tuple = model(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[int] = outputs.logits.detach().cpu()
UpperCamelCase : Union[str, Any] = image_processor.post_process_semantic_segmentation(outputs=SCREAMING_SNAKE_CASE_ , target_sizes=[(50, 60)] )
UpperCamelCase : Tuple = torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape , SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[Any] = image_processor.post_process_semantic_segmentation(outputs=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Any = torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape , SCREAMING_SNAKE_CASE_ )
| 499
|
"""simple docstring"""
import math
def A_ ( snake_case_ : list ,snake_case_ : int = 0 ,snake_case_ : int = 0 ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = end or len(snake_case_ )
for i in range(snake_case_ ,snake_case_ ):
UpperCamelCase : List[str] = i
UpperCamelCase : List[str] = array[i]
while temp_index != start and temp_index_value < array[temp_index - 1]:
UpperCamelCase : Dict = array[temp_index - 1]
temp_index -= 1
UpperCamelCase : Dict = temp_index_value
return array
def A_ ( snake_case_ : list ,snake_case_ : int ,snake_case_ : int ): # Max Heap
'''simple docstring'''
UpperCamelCase : Optional[Any] = index
UpperCamelCase : Union[str, Any] = 2 * index + 1 # Left Node
UpperCamelCase : Tuple = 2 * index + 2 # Right Node
if left_index < heap_size and array[largest] < array[left_index]:
UpperCamelCase : Union[str, Any] = left_index
if right_index < heap_size and array[largest] < array[right_index]:
UpperCamelCase : List[str] = right_index
if largest != index:
UpperCamelCase , UpperCamelCase : Any = array[largest], array[index]
heapify(snake_case_ ,snake_case_ ,snake_case_ )
def A_ ( snake_case_ : list ):
'''simple docstring'''
UpperCamelCase : Any = len(snake_case_ )
for i in range(n // 2 ,-1 ,-1 ):
heapify(snake_case_ ,snake_case_ ,snake_case_ )
for i in range(n - 1 ,0 ,-1 ):
UpperCamelCase , UpperCamelCase : str = array[0], array[i]
heapify(snake_case_ ,0 ,snake_case_ )
return array
def A_ ( snake_case_ : list ,snake_case_ : int ,snake_case_ : int ,snake_case_ : int ):
'''simple docstring'''
if (array[first_index] > array[middle_index]) != (
array[first_index] > array[last_index]
):
return array[first_index]
elif (array[middle_index] > array[first_index]) != (
array[middle_index] > array[last_index]
):
return array[middle_index]
else:
return array[last_index]
def A_ ( snake_case_ : list ,snake_case_ : int ,snake_case_ : int ,snake_case_ : int ):
'''simple docstring'''
UpperCamelCase : Any = low
UpperCamelCase : Optional[Any] = high
while True:
while array[i] < pivot:
i += 1
j -= 1
while pivot < array[j]:
j -= 1
if i >= j:
return i
UpperCamelCase , UpperCamelCase : Union[str, Any] = array[j], array[i]
i += 1
def A_ ( snake_case_ : list ):
'''simple docstring'''
if len(snake_case_ ) == 0:
return array
UpperCamelCase : Union[str, Any] = 2 * math.ceil(math.loga(len(snake_case_ ) ) )
UpperCamelCase : int = 1_6
return intro_sort(snake_case_ ,0 ,len(snake_case_ ) ,snake_case_ ,snake_case_ )
def A_ ( snake_case_ : list ,snake_case_ : int ,snake_case_ : int ,snake_case_ : int ,snake_case_ : int ):
'''simple docstring'''
while end - start > size_threshold:
if max_depth == 0:
return heap_sort(snake_case_ )
max_depth -= 1
UpperCamelCase : Dict = median_of_a(snake_case_ ,snake_case_ ,start + ((end - start) // 2) + 1 ,end - 1 )
UpperCamelCase : str = partition(snake_case_ ,snake_case_ ,snake_case_ ,snake_case_ )
intro_sort(snake_case_ ,snake_case_ ,snake_case_ ,snake_case_ ,snake_case_ )
UpperCamelCase : List[str] = p
return insertion_sort(snake_case_ ,snake_case_ ,snake_case_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
__A : Optional[int] = input('''Enter numbers separated by a comma : ''').strip()
__A : List[Any] = [float(item) for item in user_input.split(''',''')]
print(sort(unsorted))
| 499
| 1
|
from __future__ import annotations
from collections.abc import Callable
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 100 , ) -> float:
lowercase__ = x_start
lowercase__ = fnc(_SCREAMING_SNAKE_CASE )
lowercase__ = 0.0
for _ in range(_SCREAMING_SNAKE_CASE ):
# Approximates small segments of curve as linear and solve
# for trapezoidal area
lowercase__ = (x_end - x_start) / steps + xa
lowercase__ = fnc(_SCREAMING_SNAKE_CASE )
area += abs(fxa + fxa ) * (xa - xa) / 2
# Increment step
lowercase__ = xa
lowercase__ = fxa
return area
if __name__ == "__main__":
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
return x**3 + x**2
print("""f(x) = x^3 + x^2""")
print("""The area between the curve, x = -5, x = 5 and the x axis is:""")
lowercase_ = 10
while i <= 100_000:
print(f'''with {i} steps: {trapezoidal_area(f, -5, 5, i)}''')
i *= 10
| 45
|
from __future__ import annotations
import math
from collections.abc import Callable
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 100 , ) -> float:
lowercase__ = x_start
lowercase__ = fnc(_SCREAMING_SNAKE_CASE )
lowercase__ = 0.0
for _ in range(_SCREAMING_SNAKE_CASE ):
# Approximates curve as a sequence of linear lines and sums their length
lowercase__ = (x_end - x_start) / steps + xa
lowercase__ = fnc(_SCREAMING_SNAKE_CASE )
length += math.hypot(xa - xa , fxa - fxa )
# Increment step
lowercase__ = xa
lowercase__ = fxa
return length
if __name__ == "__main__":
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> str:
return math.sin(10 * x )
print("""f(x) = sin(10 * x)""")
print("""The length of the curve from x = -10 to x = 10 is:""")
lowercase_ = 10
while i <= 100_000:
print(f'''With {i} steps: {line_length(f, -10, 10, i)}''')
i *= 10
| 45
| 1
|
"""simple docstring"""
import argparse
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_dummies.py
a__ : Union[str, Any] = """src/diffusers"""
# Matches is_xxx_available()
a__ : Optional[Any] = re.compile(r"""is\_([a-z_]*)_available\(\)""")
# Matches from xxx import bla
a__ : Dict = re.compile(r"""\s+from\s+\S*\s+import\s+([^\(\s].*)\n""")
a__ : int = """
{0} = None
"""
a__ : Optional[Any] = """
class {0}(metaclass=DummyObject):
_backends = {1}
def __init__(self, *args, **kwargs):
requires_backends(self, {1})
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, {1})
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, {1})
"""
a__ : Tuple = """
def {0}(*args, **kwargs):
requires_backends({0}, {1})
"""
def A__ ( __lowerCamelCase ):
"""simple docstring"""
_lowerCAmelCase = _re_backend.findall(_UpperCamelCase )
if len(_UpperCamelCase ) == 0:
return None
return "_and_".join(_UpperCamelCase )
def A__ ( ):
"""simple docstring"""
with open(os.path.join(_UpperCamelCase, '__init__.py' ), 'r', encoding='utf-8', newline='\n' ) as f:
_lowerCAmelCase = f.readlines()
# Get to the point we do the actual imports for type checking
_lowerCAmelCase = 0
_lowerCAmelCase = {}
# Go through the end of the file
while line_index < len(_UpperCamelCase ):
# If the line contains is_backend_available, we grab all objects associated with the `else` block
_lowerCAmelCase = find_backend(lines[line_index] )
if backend is not None:
while not lines[line_index].startswith('else:' ):
line_index += 1
line_index += 1
_lowerCAmelCase = []
# Until we unindent, add backend objects to the list
while line_index < len(_UpperCamelCase ) and len(lines[line_index] ) > 1:
_lowerCAmelCase = lines[line_index]
_lowerCAmelCase = _re_single_line_import.search(_UpperCamelCase )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(', ' ) )
elif line.startswith(' ' * 8 ):
objects.append(line[8:-2] )
line_index += 1
if len(_UpperCamelCase ) > 0:
_lowerCAmelCase = objects
else:
line_index += 1
return backend_specific_objects
def A__ ( __lowerCamelCase, __lowerCamelCase ):
"""simple docstring"""
if name.isupper():
return DUMMY_CONSTANT.format(_UpperCamelCase )
elif name.islower():
return DUMMY_FUNCTION.format(_UpperCamelCase, _UpperCamelCase )
else:
return DUMMY_CLASS.format(_UpperCamelCase, _UpperCamelCase )
def A__ ( __lowerCamelCase=None ):
"""simple docstring"""
if backend_specific_objects is None:
_lowerCAmelCase = read_init()
# For special correspondence backend to module name as used in the function requires_modulename
_lowerCAmelCase = {}
for backend, objects in backend_specific_objects.items():
_lowerCAmelCase = '''[''' + ''', '''.join(F'''"{b}"''' for b in backend.split('_and_' ) ) + ''']'''
_lowerCAmelCase = '''# This file is autogenerated by the command `make fix-copies`, do not edit.\n'''
dummy_file += "from ..utils import DummyObject, requires_backends\n\n"
dummy_file += "\n".join([create_dummy_object(_UpperCamelCase, _UpperCamelCase ) for o in objects] )
_lowerCAmelCase = dummy_file
return dummy_files
def A__ ( __lowerCamelCase=False ):
"""simple docstring"""
_lowerCAmelCase = create_dummy_files()
# For special correspondence backend to shortcut as used in utils/dummy_xxx_objects.py
_lowerCAmelCase = {'''torch''': '''pt'''}
# Locate actual dummy modules and read their content.
_lowerCAmelCase = os.path.join(_UpperCamelCase, 'utils' )
_lowerCAmelCase = {
backend: os.path.join(_UpperCamelCase, F'''dummy_{short_names.get(_UpperCamelCase, _UpperCamelCase )}_objects.py''' )
for backend in dummy_files.keys()
}
_lowerCAmelCase = {}
for backend, file_path in dummy_file_paths.items():
if os.path.isfile(_UpperCamelCase ):
with open(_UpperCamelCase, 'r', encoding='utf-8', newline='\n' ) as f:
_lowerCAmelCase = f.read()
else:
_lowerCAmelCase = ''''''
for backend in dummy_files.keys():
if dummy_files[backend] != actual_dummies[backend]:
if overwrite:
print(
F'''Updating diffusers.utils.dummy_{short_names.get(_UpperCamelCase, _UpperCamelCase )}_objects.py as the main '''
'__init__ has new objects.' )
with open(dummy_file_paths[backend], 'w', encoding='utf-8', newline='\n' ) as f:
f.write(dummy_files[backend] )
else:
raise ValueError(
'The main __init__ has objects that are not present in '
F'''diffusers.utils.dummy_{short_names.get(_UpperCamelCase, _UpperCamelCase )}_objects.py. Run `make fix-copies` '''
'to fix this.' )
if __name__ == "__main__":
a__ : Optional[int] = argparse.ArgumentParser()
parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""")
a__ : Optional[int] = parser.parse_args()
check_dummies(args.fix_and_overwrite)
| 589
|
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class __lowerCAmelCase ( unittest.TestCase ):
def __init__(self , __magic_name__ , __magic_name__=7 , __magic_name__=3 , __magic_name__=18 , __magic_name__=30 , __magic_name__=400 , __magic_name__=True , __magic_name__=None , __magic_name__=True , __magic_name__=None , __magic_name__=True , __magic_name__=[0.5, 0.5, 0.5] , __magic_name__=[0.5, 0.5, 0.5] , __magic_name__=False , ) -> int:
'''simple docstring'''
snake_case_ : int = size if size is not None else {'''height''': 20, '''width''': 20}
snake_case_ : int = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
snake_case_ : str = parent
snake_case_ : Optional[int] = batch_size
snake_case_ : Dict = num_channels
snake_case_ : List[Any] = image_size
snake_case_ : Union[str, Any] = min_resolution
snake_case_ : Tuple = max_resolution
snake_case_ : str = do_resize
snake_case_ : Tuple = size
snake_case_ : int = do_center_crop
snake_case_ : Tuple = crop_size
snake_case_ : int = do_normalize
snake_case_ : Optional[Any] = image_mean
snake_case_ : List[str] = image_std
snake_case_ : str = do_reduce_labels
def lowerCamelCase (self ) -> Dict:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_reduce_labels": self.do_reduce_labels,
}
def lowerCamelCase_ ( ) -> List[Any]:
"""simple docstring"""
snake_case_ : Any = load_dataset('''hf-internal-testing/fixtures_ade20k''' , split='''test''' )
snake_case_ : Union[str, Any] = Image.open(dataset[0]['''file'''] )
snake_case_ : str = Image.open(dataset[1]['''file'''] )
return image, map
def lowerCamelCase_ ( ) -> List[Any]:
"""simple docstring"""
snake_case_ : str = load_dataset('''hf-internal-testing/fixtures_ade20k''' , split='''test''' )
snake_case_ : Optional[Any] = Image.open(ds[0]['''file'''] )
snake_case_ : Optional[Any] = Image.open(ds[1]['''file'''] )
snake_case_ : List[str] = Image.open(ds[2]['''file'''] )
snake_case_ : str = Image.open(ds[3]['''file'''] )
return [imagea, imagea], [mapa, mapa]
@require_torch
@require_vision
class __lowerCAmelCase ( _a, unittest.TestCase ):
lowerCamelCase_ : List[Any] = BeitImageProcessor if is_vision_available() else None
def lowerCamelCase (self ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : int = BeitImageProcessingTester(self )
@property
def lowerCamelCase (self ) -> str:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCamelCase (self ) -> int:
'''simple docstring'''
snake_case_ : Dict = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__magic_name__ , '''do_resize''' ) )
self.assertTrue(hasattr(__magic_name__ , '''size''' ) )
self.assertTrue(hasattr(__magic_name__ , '''do_center_crop''' ) )
self.assertTrue(hasattr(__magic_name__ , '''center_crop''' ) )
self.assertTrue(hasattr(__magic_name__ , '''do_normalize''' ) )
self.assertTrue(hasattr(__magic_name__ , '''image_mean''' ) )
self.assertTrue(hasattr(__magic_name__ , '''image_std''' ) )
def lowerCamelCase (self ) -> Any:
'''simple docstring'''
snake_case_ : Any = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 20, '''width''': 20} )
self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} )
self.assertEqual(image_processor.do_reduce_labels , __magic_name__ )
snake_case_ : Union[str, Any] = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , crop_size=84 , reduce_labels=__magic_name__ )
self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} )
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} )
self.assertEqual(image_processor.do_reduce_labels , __magic_name__ )
def lowerCamelCase (self ) -> Any:
'''simple docstring'''
pass
def lowerCamelCase (self ) -> str:
'''simple docstring'''
snake_case_ : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case_ : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__magic_name__ )
for image in image_inputs:
self.assertIsInstance(__magic_name__ , Image.Image )
# Test not batched input
snake_case_ : Optional[int] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
snake_case_ : Any = image_processing(__magic_name__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def lowerCamelCase (self ) -> Dict:
'''simple docstring'''
snake_case_ : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case_ : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__magic_name__ , numpify=__magic_name__ )
for image in image_inputs:
self.assertIsInstance(__magic_name__ , np.ndarray )
# Test not batched input
snake_case_ : Tuple = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
snake_case_ : Optional[int] = image_processing(__magic_name__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def lowerCamelCase (self ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case_ : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__magic_name__ , torchify=__magic_name__ )
for image in image_inputs:
self.assertIsInstance(__magic_name__ , torch.Tensor )
# Test not batched input
snake_case_ : Tuple = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
snake_case_ : List[str] = image_processing(__magic_name__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def lowerCamelCase (self ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Any = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case_ : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__magic_name__ , torchify=__magic_name__ )
snake_case_ : Union[str, Any] = []
for image in image_inputs:
self.assertIsInstance(__magic_name__ , torch.Tensor )
maps.append(torch.zeros(image.shape[-2:] ).long() )
# Test not batched input
snake_case_ : List[str] = image_processing(image_inputs[0] , maps[0] , return_tensors='''pt''' )
self.assertEqual(
encoding['''pixel_values'''].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
1,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 255 )
# Test batched
snake_case_ : Any = image_processing(__magic_name__ , __magic_name__ , return_tensors='''pt''' )
self.assertEqual(
encoding['''pixel_values'''].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 255 )
# Test not batched input (PIL images)
snake_case_ , snake_case_ : Optional[int] = prepare_semantic_single_inputs()
snake_case_ : int = image_processing(__magic_name__ , __magic_name__ , return_tensors='''pt''' )
self.assertEqual(
encoding['''pixel_values'''].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
1,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 255 )
# Test batched input (PIL images)
snake_case_ , snake_case_ : Dict = prepare_semantic_batch_inputs()
snake_case_ : Optional[int] = image_processing(__magic_name__ , __magic_name__ , return_tensors='''pt''' )
self.assertEqual(
encoding['''pixel_values'''].shape , (
2,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
2,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 255 )
def lowerCamelCase (self ) -> Dict:
'''simple docstring'''
snake_case_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# ADE20k has 150 classes, and the background is included, so labels should be between 0 and 150
snake_case_ , snake_case_ : Tuple = prepare_semantic_single_inputs()
snake_case_ : Optional[int] = image_processing(__magic_name__ , __magic_name__ , return_tensors='''pt''' )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 150 )
snake_case_ : List[Any] = True
snake_case_ : int = image_processing(__magic_name__ , __magic_name__ , return_tensors='''pt''' )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 255 )
| 60
| 0
|
import json
import os
import shutil
import tempfile
import unittest
from transformers import BatchEncoding, CanineTokenizer
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.tokenization_utils import AddedToken
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
class a ( __lowerCAmelCase , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase :List[Any] = CanineTokenizer
lowerCamelCase :Tuple = False
def UpperCAmelCase ( self ) -> Dict:
super().setUp()
_A = CanineTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def UpperCAmelCase ( self ) -> Union[str, Any]:
return CanineTokenizer.from_pretrained("""google/canine-s""" )
def UpperCAmelCase ( self , **lowerCAmelCase_ ) -> CanineTokenizer:
_A = self.tokenizer_class.from_pretrained(self.tmpdirname , **lowerCAmelCase_ )
_A = 10_24
return tokenizer
@require_torch
def UpperCAmelCase ( self ) -> int:
_A = self.canine_tokenizer
_A = ["""Life is like a box of chocolates.""", """You never know what you're gonna get."""]
# fmt: off
_A = [5_73_44, 76, 1_05, 1_02, 1_01, 32, 1_05, 1_15, 32, 1_08, 1_05, 1_07, 1_01, 32, 97, 32, 98, 1_11, 1_20, 32, 1_11, 1_02, 32, 99, 1_04, 1_11, 99, 1_11, 1_08, 97, 1_16, 1_01, 1_15, 46, 5_73_45, 0, 0, 0, 0]
# fmt: on
_A = tokenizer(lowerCAmelCase_ , padding=lowerCAmelCase_ , return_tensors="""pt""" )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
_A = list(batch.input_ids.numpy()[0] )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertEqual((2, 39) , batch.input_ids.shape )
self.assertEqual((2, 39) , batch.attention_mask.shape )
@require_torch
def UpperCAmelCase ( self ) -> Optional[int]:
_A = self.canine_tokenizer
_A = ["""Once there was a man.""", """He wrote a test in HuggingFace Tranformers."""]
_A = tokenizer(lowerCAmelCase_ , padding=lowerCAmelCase_ , return_tensors="""pt""" )
# check if input_ids, attention_mask and token_type_ids are returned
self.assertIn("""input_ids""" , lowerCAmelCase_ )
self.assertIn("""attention_mask""" , lowerCAmelCase_ )
self.assertIn("""token_type_ids""" , lowerCAmelCase_ )
@require_torch
def UpperCAmelCase ( self ) -> Any:
_A = self.canine_tokenizer
_A = [
"""What's the weater?""",
"""It's about 25 degrees.""",
]
_A = tokenizer(
text_target=lowerCAmelCase_ , max_length=32 , padding="""max_length""" , truncation=lowerCAmelCase_ , return_tensors="""pt""" )
self.assertEqual(32 , targets["""input_ids"""].shape[1] )
def UpperCAmelCase ( self ) -> int:
# safety check on max_len default value so we are sure the test works
_A = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
_A = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
_A = tempfile.mkdtemp()
_A = """ He is very happy, UNwant\u00E9d,running"""
_A = tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
tokenizer.save_pretrained(lowerCAmelCase_ )
_A = tokenizer.__class__.from_pretrained(lowerCAmelCase_ )
_A = after_tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
shutil.rmtree(lowerCAmelCase_ )
_A = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
_A = tempfile.mkdtemp()
_A = """ He is very happy, UNwant\u00E9d,running"""
_A = tokenizer.additional_special_tokens
# We can add a new special token for Canine as follows:
_A = chr(0xe_007 )
additional_special_tokens.append(lowerCAmelCase_ )
tokenizer.add_special_tokens({"""additional_special_tokens""": additional_special_tokens} )
_A = tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
tokenizer.save_pretrained(lowerCAmelCase_ )
_A = tokenizer.__class__.from_pretrained(lowerCAmelCase_ )
_A = after_tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertIn(lowerCAmelCase_ , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
_A = tokenizer.__class__.from_pretrained(lowerCAmelCase_ , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> List[str]:
_A = self.get_tokenizers(do_lower_case=lowerCAmelCase_ )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
_A , _A = self.get_clean_sequence(lowerCAmelCase_ )
# a special token for Canine can be defined as follows:
_A = 0xe_005
_A = chr(lowerCAmelCase_ )
tokenizer.add_special_tokens({"""cls_token""": special_token} )
_A = tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
self.assertEqual(len(lowerCAmelCase_ ) , 1 )
_A = tokenizer.decode(ids + encoded_special_token , clean_up_tokenization_spaces=lowerCAmelCase_ )
_A = tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
_A = tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
_A = tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
self.assertEqual(lowerCAmelCase_ , input_encoded + special_token_id )
_A = tokenizer.decode(lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_ )
self.assertTrue(special_token not in decoded )
def UpperCAmelCase ( self ) -> int:
_A = self.get_tokenizers(do_lower_case=lowerCAmelCase_ )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
_A = chr(0xe_005 )
_A = chr(0xe_006 )
# `add_tokens` method stores special tokens only in `tokenizer.unique_no_split_tokens`. (in tokenization_utils.py)
tokenizer.add_tokens([SPECIAL_TOKEN_1] , special_tokens=lowerCAmelCase_ )
# `add_special_tokens` method stores special tokens in `tokenizer.additional_special_tokens`,
# which also occur in `tokenizer.all_special_tokens`. (in tokenization_utils_base.py)
tokenizer.add_special_tokens({"""additional_special_tokens""": [SPECIAL_TOKEN_2]} )
_A = tokenizer.tokenize(lowerCAmelCase_ )
_A = tokenizer.tokenize(lowerCAmelCase_ )
self.assertEqual(len(lowerCAmelCase_ ) , 1 )
self.assertEqual(len(lowerCAmelCase_ ) , 1 )
self.assertEqual(token_a[0] , lowerCAmelCase_ )
self.assertEqual(token_a[0] , lowerCAmelCase_ )
@require_tokenizers
def UpperCAmelCase ( self ) -> Any:
_A = self.get_tokenizers(do_lower_case=lowerCAmelCase_ )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
# a special token for Canine can be defined as follows:
_A = 0xe_006
_A = chr(lowerCAmelCase_ )
_A = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ )
tokenizer.add_special_tokens({"""additional_special_tokens""": [new_token]} )
with tempfile.TemporaryDirectory() as tmp_dir_name:
tokenizer.save_pretrained(lowerCAmelCase_ )
tokenizer.from_pretrained(lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> List[str]:
_A = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(lowerCAmelCase_ )
with open(os.path.join(lowerCAmelCase_ , """special_tokens_map.json""" ) , encoding="""utf-8""" ) as json_file:
_A = json.load(lowerCAmelCase_ )
with open(os.path.join(lowerCAmelCase_ , """tokenizer_config.json""" ) , encoding="""utf-8""" ) as json_file:
_A = json.load(lowerCAmelCase_ )
# a special token for Canine can be defined as follows:
_A = 0xe_006
_A = chr(lowerCAmelCase_ )
_A = [new_token_a]
_A = [new_token_a]
with open(os.path.join(lowerCAmelCase_ , """special_tokens_map.json""" ) , """w""" , encoding="""utf-8""" ) as outfile:
json.dump(lowerCAmelCase_ , lowerCAmelCase_ )
with open(os.path.join(lowerCAmelCase_ , """tokenizer_config.json""" ) , """w""" , encoding="""utf-8""" ) as outfile:
json.dump(lowerCAmelCase_ , lowerCAmelCase_ )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
_A = tokenizer_class.from_pretrained(lowerCAmelCase_ , extra_ids=0 )
self.assertIn(lowerCAmelCase_ , tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
[new_token_a] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids([new_token_a] ) ) , )
_A = 0xe_007
_A = chr(lowerCAmelCase_ )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
_A = [AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ )]
_A = tokenizer_class.from_pretrained(
lowerCAmelCase_ , additional_special_tokens=lowerCAmelCase_ , extra_ids=0 )
self.assertIn(lowerCAmelCase_ , tokenizer.additional_special_tokens )
# self.assertIn(new_token_2,tokenizer.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
[new_token_a] , tokenizer.convert_ids_to_tokens(tokenizer.convert_tokens_to_ids([new_token_a] ) ) )
@require_tokenizers
def UpperCAmelCase ( self ) -> Optional[Any]:
_A = self.get_tokenizers(do_lower_case=lowerCAmelCase_ )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
_A = """hello world"""
if self.space_between_special_tokens:
_A = """[CLS] hello world [SEP]"""
else:
_A = input
_A = tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
_A = tokenizer.decode(lowerCAmelCase_ , spaces_between_special_tokens=self.space_between_special_tokens )
self.assertIn(lowerCAmelCase_ , [output, output.lower()] )
def UpperCAmelCase ( self ) -> Union[str, Any]:
_A = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
_A = [
"""bos_token""",
"""eos_token""",
"""unk_token""",
"""sep_token""",
"""pad_token""",
"""cls_token""",
"""mask_token""",
]
_A = """a"""
_A = ord(lowerCAmelCase_ )
for attr in attributes_list:
setattr(lowerCAmelCase_ , attr + """_id""" , lowerCAmelCase_ )
self.assertEqual(getattr(lowerCAmelCase_ , lowerCAmelCase_ ) , lowerCAmelCase_ )
self.assertEqual(getattr(lowerCAmelCase_ , attr + """_id""" ) , lowerCAmelCase_ )
setattr(lowerCAmelCase_ , attr + """_id""" , lowerCAmelCase_ )
self.assertEqual(getattr(lowerCAmelCase_ , lowerCAmelCase_ ) , lowerCAmelCase_ )
self.assertEqual(getattr(lowerCAmelCase_ , attr + """_id""" ) , lowerCAmelCase_ )
setattr(lowerCAmelCase_ , """additional_special_tokens_ids""" , [] )
self.assertListEqual(getattr(lowerCAmelCase_ , """additional_special_tokens""" ) , [] )
self.assertListEqual(getattr(lowerCAmelCase_ , """additional_special_tokens_ids""" ) , [] )
_A = 0xe_006
_A = chr(lowerCAmelCase_ )
setattr(lowerCAmelCase_ , """additional_special_tokens_ids""" , [additional_special_token_id] )
self.assertListEqual(getattr(lowerCAmelCase_ , """additional_special_tokens""" ) , [additional_special_token] )
self.assertListEqual(getattr(lowerCAmelCase_ , """additional_special_tokens_ids""" ) , [additional_special_token_id] )
def UpperCAmelCase ( self ) -> List[str]:
pass
def UpperCAmelCase ( self ) -> Tuple:
pass
def UpperCAmelCase ( self ) -> Union[str, Any]:
pass
def UpperCAmelCase ( self ) -> Optional[Any]:
pass
def UpperCAmelCase ( self ) -> Any:
pass
def UpperCAmelCase ( self ) -> Optional[Any]:
pass
def UpperCAmelCase ( self ) -> List[Any]:
pass
def UpperCAmelCase ( self ) -> Optional[Any]:
pass
| 83
|
_SCREAMING_SNAKE_CASE = {
'A': '.-', 'B': '-...', 'C': '-.-.', 'D': '-..', 'E': '.', 'F': '..-.', 'G': '--.',
'H': '....', 'I': '..', 'J': '.---', 'K': '-.-', 'L': '.-..', 'M': '--', 'N': '-.',
'O': '---', 'P': '.--.', 'Q': '--.-', 'R': '.-.', 'S': '...', 'T': '-', 'U': '..-',
'V': '...-', 'W': '.--', 'X': '-..-', 'Y': '-.--', 'Z': '--..', '1': '.----',
'2': '..---', '3': '...--', '4': '....-', '5': '.....', '6': '-....', '7': '--...',
'8': '---..', '9': '----.', '0': '-----', '&': '.-...', '@': '.--.-.',
':': '---...', ',': '--..--', '.': '.-.-.-', '\'': '.----.', '"': '.-..-.',
'?': '..--..', '/': '-..-.', '=': '-...-', '+': '.-.-.', '-': '-....-',
'(': '-.--.', ')': '-.--.-', '!': '-.-.--', ' ': '/'
} # Exclamation mark is not in ITU-R recommendation
# fmt: on
_SCREAMING_SNAKE_CASE = {value: key for key, value in MORSE_CODE_DICT.items()}
def snake_case ( snake_case__ :str) -> str:
return " ".join(MORSE_CODE_DICT[char] for char in message.upper())
def snake_case ( snake_case__ :str) -> str:
return "".join(REVERSE_DICT[char] for char in message.split())
def snake_case ( ) -> None:
_A = """Morse code here!"""
print(snake_case__)
_A = encrypt(snake_case__)
print(snake_case__)
_A = decrypt(snake_case__)
print(snake_case__)
if __name__ == "__main__":
main()
| 83
| 1
|
from math import pi, sqrt
def lowercase ( __A : float ) -> float:
'''simple docstring'''
if num <= 0:
raise ValueError("""math domain error""" )
if num > 171.5:
raise OverflowError("""math range error""" )
elif num - int(__A ) not in (0, 0.5):
raise NotImplementedError("""num must be an integer or a half-integer""" )
elif num == 0.5:
return sqrt(__A )
else:
return 1.0 if num == 1 else (num - 1) * gamma(num - 1 )
def lowercase ( ) -> None:
'''simple docstring'''
assert gamma(0.5 ) == sqrt(__A )
assert gamma(1 ) == 1.0
assert gamma(2 ) == 1.0
if __name__ == "__main__":
from doctest import testmod
testmod()
__lowercase : Optional[int] = 1.0
while num:
__lowercase : Dict = float(input('''Gamma of: '''))
print(f'''gamma({num}) = {gamma(num)}''')
print('''\nEnter 0 to exit...''')
| 36
|
import argparse
import pytorch_lightning as pl
import torch
from torch import nn
from transformers import LongformerForQuestionAnswering, LongformerModel
class _A ( pl.LightningModule ):
'''simple docstring'''
def __init__( self ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
super().__init__()
snake_case : Dict = model
snake_case : Optional[int] = 2
snake_case : Optional[Any] = nn.Linear(self.model.config.hidden_size ,self.num_labels )
def snake_case_ ( self ):
'''simple docstring'''
pass
def lowercase ( __A : str , __A : str , __A : str ) -> Optional[Any]:
'''simple docstring'''
snake_case : Optional[Any] = LongformerModel.from_pretrained(__A )
snake_case : Tuple = LightningModel(__A )
snake_case : Optional[int] = torch.load(__A , map_location=torch.device("""cpu""" ) )
lightning_model.load_state_dict(ckpt["""state_dict"""] )
# init longformer question answering model
snake_case : Dict = LongformerForQuestionAnswering.from_pretrained(__A )
# transfer weights
longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict() )
longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict() )
longformer_for_qa.eval()
# save model
longformer_for_qa.save_pretrained(__A )
print(f"""Conversion successful. Model saved under {pytorch_dump_folder_path}""" )
if __name__ == "__main__":
__lowercase : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--longformer_model''',
default=None,
type=str,
required=True,
help='''model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.''',
)
parser.add_argument(
'''--longformer_question_answering_ckpt_path''',
default=None,
type=str,
required=True,
help='''Path the official PyTorch Lightning Checkpoint.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
__lowercase : List[str] = parser.parse_args()
convert_longformer_qa_checkpoint_to_pytorch(
args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path
)
| 36
| 1
|
import argparse
import logging
import os
import sys
import numpy as np
import onnxruntime
import torch
from bart_onnx.generation_onnx import BARTBeamSearchGenerator
from bart_onnx.reduce_onnx_size import remove_dup_initializers
import transformers
from transformers import BartForConditionalGeneration, BartTokenizer
logging.basicConfig(
format='%(asctime)s | %(levelname)s | %(name)s | [%(filename)s:%(lineno)d] %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
level=os.environ.get('LOGLEVEL', 'INFO').upper(),
stream=sys.stdout,
)
__lowerCamelCase = logging.getLogger(__name__)
__lowerCamelCase = {'facebook/bart-base': BartForConditionalGeneration}
__lowerCamelCase = {'facebook/bart-base': BartTokenizer}
def UpperCamelCase__ ( ) -> List[Any]:
"""simple docstring"""
_a : Tuple = argparse.ArgumentParser(description='''Export Bart model + Beam Search to ONNX graph.''' )
parser.add_argument(
'''--validation_file''' , type=lowerCAmelCase_ , default=lowerCAmelCase_ , help='''A csv or a json file containing the validation data.''' )
parser.add_argument(
'''--max_length''' , type=lowerCAmelCase_ , default=5 , help='''The maximum total input sequence length after tokenization.''' , )
parser.add_argument(
'''--num_beams''' , type=lowerCAmelCase_ , default=lowerCAmelCase_ , help=(
'''Number of beams to use for evaluation. This argument will be '''
'''passed to ``model.generate``, which is used during ``evaluate`` and ``predict``.'''
) , )
parser.add_argument(
'''--model_name_or_path''' , type=lowerCAmelCase_ , help='''Path to pretrained model or model identifier from huggingface.co/models.''' , required=lowerCAmelCase_ , )
parser.add_argument(
'''--config_name''' , type=lowerCAmelCase_ , default=lowerCAmelCase_ , help='''Pretrained config name or path if not the same as model_name''' , )
parser.add_argument(
'''--device''' , type=lowerCAmelCase_ , default='''cpu''' , help='''Device where the model will be run''' , )
parser.add_argument('''--output_file_path''' , type=lowerCAmelCase_ , default=lowerCAmelCase_ , help='''Where to store the final ONNX file.''' )
_a : List[str] = parser.parse_args()
return args
def UpperCamelCase__ ( UpperCAmelCase , UpperCAmelCase="cpu" ) -> int:
"""simple docstring"""
_a : Any = model_dict[model_name].from_pretrained(lowerCAmelCase_ ).to(lowerCAmelCase_ )
_a : str = tokenizer_dict[model_name].from_pretrained(lowerCAmelCase_ )
if model_name in ["facebook/bart-base"]:
_a : Tuple = 0
_a : List[Any] = None
_a : Tuple = 0
return huggingface_model, tokenizer
def UpperCamelCase__ ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> List[str]:
"""simple docstring"""
model.eval()
_a : str = None
_a : Union[str, Any] = torch.jit.script(BARTBeamSearchGenerator(lowerCAmelCase_ ) )
with torch.no_grad():
_a : int = "My friends are cool but they eat too many carbs."
_a : Dict = tokenizer([ARTICLE_TO_SUMMARIZE] , max_length=1024 , return_tensors='''pt''' ).to(model.device )
_a : Dict = model.generate(
inputs['''input_ids'''] , attention_mask=inputs['''attention_mask'''] , num_beams=lowerCAmelCase_ , max_length=lowerCAmelCase_ , early_stopping=lowerCAmelCase_ , decoder_start_token_id=model.config.decoder_start_token_id , )
torch.onnx.export(
lowerCAmelCase_ , (
inputs['''input_ids'''],
inputs['''attention_mask'''],
num_beams,
max_length,
model.config.decoder_start_token_id,
) , lowerCAmelCase_ , opset_version=14 , input_names=['''input_ids''', '''attention_mask''', '''num_beams''', '''max_length''', '''decoder_start_token_id'''] , output_names=['''output_ids'''] , dynamic_axes={
'''input_ids''': {0: '''batch''', 1: '''seq'''},
'''output_ids''': {0: '''batch''', 1: '''seq_out'''},
} , example_outputs=lowerCAmelCase_ , )
logger.info('''Model exported to {}'''.format(lowerCAmelCase_ ) )
_a : str = remove_dup_initializers(os.path.abspath(lowerCAmelCase_ ) )
logger.info('''Deduplicated and optimized model written to {}'''.format(lowerCAmelCase_ ) )
_a : Tuple = onnxruntime.InferenceSession(lowerCAmelCase_ )
_a : int = ort_sess.run(
lowerCAmelCase_ , {
'''input_ids''': inputs['''input_ids'''].cpu().numpy(),
'''attention_mask''': inputs['''attention_mask'''].cpu().numpy(),
'''num_beams''': np.array(lowerCAmelCase_ ),
'''max_length''': np.array(lowerCAmelCase_ ),
'''decoder_start_token_id''': np.array(model.config.decoder_start_token_id ),
} , )
np.testing.assert_allclose(summary_ids.cpu().numpy() , ort_out[0] , rtol=1e-3 , atol=1e-3 )
logger.info('''Model outputs from torch and ONNX Runtime are similar.''' )
logger.info('''Success.''' )
def UpperCamelCase__ ( ) -> Optional[int]:
"""simple docstring"""
_a : Dict = parse_args()
_a : Tuple = 5
_a : Union[str, Any] = 4
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO , )
logger.setLevel(logging.INFO )
transformers.utils.logging.set_verbosity_error()
_a : Union[str, Any] = torch.device(args.device )
_a : int = load_model_tokenizer(args.model_name_or_path , lowerCAmelCase_ )
if model.config.decoder_start_token_id is None:
raise ValueError('''Make sure that `config.decoder_start_token_id` is correctly defined''' )
model.to(lowerCAmelCase_ )
if args.max_length:
_a : int = args.max_length
if args.num_beams:
_a : Optional[Any] = args.num_beams
if args.output_file_path:
_a : Any = args.output_file_path
else:
_a : Optional[int] = "BART.onnx"
logger.info('''Exporting model to ONNX''' )
export_and_validate_model(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
if __name__ == "__main__":
main()
| 707
|
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import (
BackboneOutput,
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import (
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
logging,
replace_return_docstrings,
)
from ...utils.backbone_utils import BackboneMixin
from .configuration_resnet import ResNetConfig
__lowerCamelCase = logging.get_logger(__name__)
# General docstring
__lowerCamelCase = 'ResNetConfig'
# Base docstring
__lowerCamelCase = 'microsoft/resnet-50'
__lowerCamelCase = [1, 2_048, 7, 7]
# Image classification docstring
__lowerCamelCase = 'microsoft/resnet-50'
__lowerCamelCase = 'tiger cat'
__lowerCamelCase = [
'microsoft/resnet-50',
# See all resnet models at https://huggingface.co/models?filter=resnet
]
class UpperCamelCase_ ( nn.Module ):
def __init__( self , lowercase , lowercase , lowercase = 3 , lowercase = 1 , lowercase = "relu" ) -> str:
super().__init__()
_a : str = nn.Convad(
lowercase , lowercase , kernel_size=lowercase , stride=lowercase , padding=kernel_size // 2 , bias=lowercase )
_a : Optional[Any] = nn.BatchNormad(lowercase )
_a : int = ACTaFN[activation] if activation is not None else nn.Identity()
def snake_case__( self , lowercase ) -> Tensor:
_a : Union[str, Any] = self.convolution(lowercase )
_a : List[str] = self.normalization(lowercase )
_a : List[str] = self.activation(lowercase )
return hidden_state
class UpperCamelCase_ ( nn.Module ):
def __init__( self , lowercase ) -> Optional[int]:
super().__init__()
_a : int = ResNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=7 , stride=2 , activation=config.hidden_act )
_a : Union[str, Any] = nn.MaxPoolad(kernel_size=3 , stride=2 , padding=1 )
_a : Union[str, Any] = config.num_channels
def snake_case__( self , lowercase ) -> Tensor:
_a : Any = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
'''Make sure that the channel dimension of the pixel values match with the one set in the configuration.''' )
_a : Any = self.embedder(lowercase )
_a : Optional[int] = self.pooler(lowercase )
return embedding
class UpperCamelCase_ ( nn.Module ):
def __init__( self , lowercase , lowercase , lowercase = 2 ) -> Dict:
super().__init__()
_a : str = nn.Convad(lowercase , lowercase , kernel_size=1 , stride=lowercase , bias=lowercase )
_a : Union[str, Any] = nn.BatchNormad(lowercase )
def snake_case__( self , lowercase ) -> Tensor:
_a : Optional[int] = self.convolution(lowercase )
_a : Any = self.normalization(lowercase )
return hidden_state
class UpperCamelCase_ ( nn.Module ):
def __init__( self , lowercase , lowercase , lowercase = 1 , lowercase = "relu" ) -> List[str]:
super().__init__()
_a : List[str] = in_channels != out_channels or stride != 1
_a : List[Any] = (
ResNetShortCut(lowercase , lowercase , stride=lowercase ) if should_apply_shortcut else nn.Identity()
)
_a : List[str] = nn.Sequential(
ResNetConvLayer(lowercase , lowercase , stride=lowercase ) , ResNetConvLayer(lowercase , lowercase , activation=lowercase ) , )
_a : Dict = ACTaFN[activation]
def snake_case__( self , lowercase ) -> Optional[int]:
_a : List[Any] = hidden_state
_a : Optional[Any] = self.layer(lowercase )
_a : int = self.shortcut(lowercase )
hidden_state += residual
_a : Dict = self.activation(lowercase )
return hidden_state
class UpperCamelCase_ ( nn.Module ):
def __init__( self , lowercase , lowercase , lowercase = 1 , lowercase = "relu" , lowercase = 4 ) -> Dict:
super().__init__()
_a : Union[str, Any] = in_channels != out_channels or stride != 1
_a : Union[str, Any] = out_channels // reduction
_a : List[str] = (
ResNetShortCut(lowercase , lowercase , stride=lowercase ) if should_apply_shortcut else nn.Identity()
)
_a : Dict = nn.Sequential(
ResNetConvLayer(lowercase , lowercase , kernel_size=1 ) , ResNetConvLayer(lowercase , lowercase , stride=lowercase ) , ResNetConvLayer(lowercase , lowercase , kernel_size=1 , activation=lowercase ) , )
_a : List[str] = ACTaFN[activation]
def snake_case__( self , lowercase ) -> str:
_a : List[str] = hidden_state
_a : Optional[int] = self.layer(lowercase )
_a : Any = self.shortcut(lowercase )
hidden_state += residual
_a : Union[str, Any] = self.activation(lowercase )
return hidden_state
class UpperCamelCase_ ( nn.Module ):
def __init__( self , lowercase , lowercase , lowercase , lowercase = 2 , lowercase = 2 , ) -> Optional[int]:
super().__init__()
_a : List[str] = ResNetBottleNeckLayer if config.layer_type == '''bottleneck''' else ResNetBasicLayer
_a : str = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(lowercase , lowercase , stride=lowercase , activation=config.hidden_act ) , *[layer(lowercase , lowercase , activation=config.hidden_act ) for _ in range(depth - 1 )] , )
def snake_case__( self , lowercase ) -> Tensor:
_a : Optional[int] = input
for layer in self.layers:
_a : Any = layer(lowercase )
return hidden_state
class UpperCamelCase_ ( nn.Module ):
def __init__( self , lowercase ) -> Any:
super().__init__()
_a : Tuple = nn.ModuleList([] )
# based on `downsample_in_first_stage` the first layer of the first stage may or may not downsample the input
self.stages.append(
ResNetStage(
lowercase , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) )
_a : str = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(lowercase , config.depths[1:] ):
self.stages.append(ResNetStage(lowercase , lowercase , lowercase , depth=lowercase ) )
def snake_case__( self , lowercase , lowercase = False , lowercase = True ) -> BaseModelOutputWithNoAttention:
_a : str = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
_a : Dict = hidden_states + (hidden_state,)
_a : List[str] = stage_module(lowercase )
if output_hidden_states:
_a : Tuple = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(
last_hidden_state=lowercase , hidden_states=lowercase , )
class UpperCamelCase_ ( UpperCamelCase ):
lowercase = ResNetConfig
lowercase = '''resnet'''
lowercase = '''pixel_values'''
lowercase = True
def snake_case__( self , lowercase ) -> Any:
if isinstance(lowercase , nn.Convad ):
nn.init.kaiming_normal_(module.weight , mode='''fan_out''' , nonlinearity='''relu''' )
elif isinstance(lowercase , (nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight , 1 )
nn.init.constant_(module.bias , 0 )
def snake_case__( self , lowercase , lowercase=False ) -> int:
if isinstance(lowercase , lowercase ):
_a : List[str] = value
__lowerCamelCase = R'\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`ResNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n'
__lowerCamelCase = R'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConvNextImageProcessor.__call__`] for details.\n\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n'
@add_start_docstrings(
'''The bare ResNet model outputting raw features without any specific head on top.''' , UpperCamelCase , )
class UpperCamelCase_ ( UpperCamelCase ):
def __init__( self , lowercase ) -> int:
super().__init__(lowercase )
_a : Any = config
_a : Optional[int] = ResNetEmbeddings(lowercase )
_a : Any = ResNetEncoder(lowercase )
_a : Dict = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowercase )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowercase , config_class=_CONFIG_FOR_DOC , modality='''vision''' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def snake_case__( self , lowercase , lowercase = None , lowercase = None ) -> BaseModelOutputWithPoolingAndNoAttention:
_a : List[str] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_a : Optional[int] = return_dict if return_dict is not None else self.config.use_return_dict
_a : Optional[Any] = self.embedder(lowercase )
_a : Tuple = self.encoder(
lowercase , output_hidden_states=lowercase , return_dict=lowercase )
_a : str = encoder_outputs[0]
_a : str = self.pooler(lowercase )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=lowercase , pooler_output=lowercase , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
'''
ResNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
''' , UpperCamelCase , )
class UpperCamelCase_ ( UpperCamelCase ):
def __init__( self , lowercase ) -> str:
super().__init__(lowercase )
_a : str = config.num_labels
_a : List[str] = ResNetModel(lowercase )
# classification head
_a : Optional[int] = nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowercase )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowercase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def snake_case__( self , lowercase = None , lowercase = None , lowercase = None , lowercase = None , ) -> ImageClassifierOutputWithNoAttention:
_a : Dict = return_dict if return_dict is not None else self.config.use_return_dict
_a : str = self.resnet(lowercase , output_hidden_states=lowercase , return_dict=lowercase )
_a : int = outputs.pooler_output if return_dict else outputs[1]
_a : str = self.classifier(lowercase )
_a : Dict = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
_a : Any = '''regression'''
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
_a : Optional[Any] = '''single_label_classification'''
else:
_a : Optional[Any] = '''multi_label_classification'''
if self.config.problem_type == "regression":
_a : Optional[Any] = MSELoss()
if self.num_labels == 1:
_a : Union[str, Any] = loss_fct(logits.squeeze() , labels.squeeze() )
else:
_a : List[str] = loss_fct(lowercase , lowercase )
elif self.config.problem_type == "single_label_classification":
_a : str = CrossEntropyLoss()
_a : Any = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
_a : List[Any] = BCEWithLogitsLoss()
_a : List[Any] = loss_fct(lowercase , lowercase )
if not return_dict:
_a : str = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=lowercase , logits=lowercase , hidden_states=outputs.hidden_states )
@add_start_docstrings(
'''
ResNet backbone, to be used with frameworks like DETR and MaskFormer.
''' , UpperCamelCase , )
class UpperCamelCase_ ( UpperCamelCase , UpperCamelCase ):
def __init__( self , lowercase ) -> str:
super().__init__(lowercase )
super()._init_backbone(lowercase )
_a : Optional[int] = [config.embedding_size] + config.hidden_sizes
_a : Any = ResNetEmbeddings(lowercase )
_a : List[str] = ResNetEncoder(lowercase )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowercase )
@replace_return_docstrings(output_type=lowercase , config_class=_CONFIG_FOR_DOC )
def snake_case__( self , lowercase , lowercase = None , lowercase = None ) -> BackboneOutput:
_a : Tuple = return_dict if return_dict is not None else self.config.use_return_dict
_a : Tuple = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_a : List[Any] = self.embedder(lowercase )
_a : Tuple = self.encoder(lowercase , output_hidden_states=lowercase , return_dict=lowercase )
_a : str = outputs.hidden_states
_a : Tuple = ()
for idx, stage in enumerate(self.stage_names ):
if stage in self.out_features:
feature_maps += (hidden_states[idx],)
if not return_dict:
_a : Dict = (feature_maps,)
if output_hidden_states:
output += (outputs.hidden_states,)
return output
return BackboneOutput(
feature_maps=lowercase , hidden_states=outputs.hidden_states if output_hidden_states else None , attentions=lowercase , )
| 307
| 0
|
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
"Salesforce/instruct-blip-flan-t5": "https://huggingface.co/Salesforce/instruct-blip-flan-t5/resolve/main/config.json",
}
class lowerCAmelCase_ ( __lowercase ):
UpperCAmelCase = "instructblip_vision_model"
def __init__( self : str , _A : Dict=1408 , _A : Union[str, Any]=6144 , _A : Union[str, Any]=39 , _A : int=16 , _A : Dict=224 , _A : Dict=14 , _A : Any="gelu" , _A : Dict=1e-6 , _A : List[str]=0.0 , _A : List[str]=1e-10 , _A : List[str]=True , **_A : Dict , ):
super().__init__(**_A )
_UpperCamelCase = hidden_size
_UpperCamelCase = intermediate_size
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = patch_size
_UpperCamelCase = image_size
_UpperCamelCase = initializer_range
_UpperCamelCase = attention_dropout
_UpperCamelCase = layer_norm_eps
_UpperCamelCase = hidden_act
_UpperCamelCase = qkv_bias
@classmethod
def UpperCamelCase_ ( cls : Dict , _A : Union[str, os.PathLike] , **_A : Union[str, Any] ):
cls._set_token_in_kwargs(_A )
_UpperCamelCase , _UpperCamelCase = cls.get_config_dict(_A , **_A )
# get the vision config dict if we are loading from InstructBlipConfig
if config_dict.get('''model_type''' ) == "instructblip":
_UpperCamelCase = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(_A , **_A )
class lowerCAmelCase_ ( __lowercase ):
UpperCAmelCase = "instructblip_qformer"
def __init__( self : List[str] , _A : int=3_0522 , _A : Tuple=768 , _A : List[str]=12 , _A : List[Any]=12 , _A : Any=3072 , _A : int="gelu" , _A : Union[str, Any]=0.1 , _A : Dict=0.1 , _A : str=512 , _A : Any=0.02 , _A : Optional[int]=1e-12 , _A : Tuple=0 , _A : Any="absolute" , _A : Dict=2 , _A : Dict=1408 , **_A : Optional[Any] , ):
super().__init__(pad_token_id=_A , **_A )
_UpperCamelCase = vocab_size
_UpperCamelCase = hidden_size
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = hidden_act
_UpperCamelCase = intermediate_size
_UpperCamelCase = hidden_dropout_prob
_UpperCamelCase = attention_probs_dropout_prob
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = initializer_range
_UpperCamelCase = layer_norm_eps
_UpperCamelCase = position_embedding_type
_UpperCamelCase = cross_attention_frequency
_UpperCamelCase = encoder_hidden_size
@classmethod
def UpperCamelCase_ ( cls : List[str] , _A : Union[str, os.PathLike] , **_A : Tuple ):
cls._set_token_in_kwargs(_A )
_UpperCamelCase , _UpperCamelCase = cls.get_config_dict(_A , **_A )
# get the qformer config dict if we are loading from InstructBlipConfig
if config_dict.get('''model_type''' ) == "instructblip":
_UpperCamelCase = config_dict['''qformer_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(_A , **_A )
class lowerCAmelCase_ ( __lowercase ):
UpperCAmelCase = "instructblip"
UpperCAmelCase = True
def __init__( self : Tuple , _A : List[str]=None , _A : str=None , _A : Optional[int]=None , _A : Any=32 , **_A : Dict ):
super().__init__(**_A )
if vision_config is None:
_UpperCamelCase = {}
logger.info('''vision_config is None. initializing the InstructBlipVisionConfig with default values.''' )
if qformer_config is None:
_UpperCamelCase = {}
logger.info('''qformer_config is None. Initializing the InstructBlipQFormerConfig with default values.''' )
if text_config is None:
_UpperCamelCase = {}
logger.info('''text_config is None. Initializing the text config with default values (`OPTConfig`).''' )
_UpperCamelCase = InstructBlipVisionConfig(**_A )
_UpperCamelCase = InstructBlipQFormerConfig(**_A )
_UpperCamelCase = text_config['''model_type'''] if '''model_type''' in text_config else '''opt'''
_UpperCamelCase = CONFIG_MAPPING[text_model_type](**_A )
_UpperCamelCase = self.text_config.tie_word_embeddings
_UpperCamelCase = self.text_config.is_encoder_decoder
_UpperCamelCase = num_query_tokens
_UpperCamelCase = self.vision_config.hidden_size
_UpperCamelCase = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
_UpperCamelCase = 1.0
_UpperCamelCase = 0.02
@classmethod
def UpperCamelCase_ ( cls : str , _A : InstructBlipVisionConfig , _A : InstructBlipQFormerConfig , _A : PretrainedConfig , **_A : Tuple , ):
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **_A , )
def UpperCamelCase_ ( self : Tuple ):
_UpperCamelCase = copy.deepcopy(self.__dict__ )
_UpperCamelCase = self.vision_config.to_dict()
_UpperCamelCase = self.qformer_config.to_dict()
_UpperCamelCase = self.text_config.to_dict()
_UpperCamelCase = self.__class__.model_type
return output
| 10
|
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class lowerCAmelCase_ ( __lowercase, unittest.TestCase ):
UpperCAmelCase = ShapEPipeline
UpperCAmelCase = ["prompt"]
UpperCAmelCase = ["prompt"]
UpperCAmelCase = [
"num_images_per_prompt",
"num_inference_steps",
"generator",
"latents",
"guidance_scale",
"frame_size",
"output_type",
"return_dict",
]
UpperCAmelCase = False
@property
def UpperCamelCase_ ( self : Union[str, Any] ):
return 32
@property
def UpperCamelCase_ ( self : int ):
return 32
@property
def UpperCamelCase_ ( self : List[str] ):
return self.time_input_dim * 4
@property
def UpperCamelCase_ ( self : Optional[Any] ):
return 8
@property
def UpperCamelCase_ ( self : int ):
_UpperCamelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
return tokenizer
@property
def UpperCamelCase_ ( self : List[Any] ):
torch.manual_seed(0 )
_UpperCamelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(_A )
@property
def UpperCamelCase_ ( self : int ):
torch.manual_seed(0 )
_UpperCamelCase = {
'''num_attention_heads''': 2,
'''attention_head_dim''': 16,
'''embedding_dim''': self.time_input_dim,
'''num_embeddings''': 32,
'''embedding_proj_dim''': self.text_embedder_hidden_size,
'''time_embed_dim''': self.time_embed_dim,
'''num_layers''': 1,
'''clip_embed_dim''': self.time_input_dim * 2,
'''additional_embeddings''': 0,
'''time_embed_act_fn''': '''gelu''',
'''norm_in_type''': '''layer''',
'''encoder_hid_proj_type''': None,
'''added_emb_type''': None,
}
_UpperCamelCase = PriorTransformer(**_A )
return model
@property
def UpperCamelCase_ ( self : Union[str, Any] ):
torch.manual_seed(0 )
_UpperCamelCase = {
'''param_shapes''': (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'''d_latent''': self.time_input_dim,
'''d_hidden''': self.renderer_dim,
'''n_output''': 12,
'''background''': (
0.1,
0.1,
0.1,
),
}
_UpperCamelCase = ShapERenderer(**_A )
return model
def UpperCamelCase_ ( self : str ):
_UpperCamelCase = self.dummy_prior
_UpperCamelCase = self.dummy_text_encoder
_UpperCamelCase = self.dummy_tokenizer
_UpperCamelCase = self.dummy_renderer
_UpperCamelCase = HeunDiscreteScheduler(
beta_schedule='''exp''' , num_train_timesteps=1024 , prediction_type='''sample''' , use_karras_sigmas=_A , clip_sample=_A , clip_sample_range=1.0 , )
_UpperCamelCase = {
'''prior''': prior,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''renderer''': renderer,
'''scheduler''': scheduler,
}
return components
def UpperCamelCase_ ( self : Tuple , _A : Tuple , _A : Optional[int]=0 ):
if str(_A ).startswith('''mps''' ):
_UpperCamelCase = torch.manual_seed(_A )
else:
_UpperCamelCase = torch.Generator(device=_A ).manual_seed(_A )
_UpperCamelCase = {
'''prompt''': '''horse''',
'''generator''': generator,
'''num_inference_steps''': 1,
'''frame_size''': 32,
'''output_type''': '''np''',
}
return inputs
def UpperCamelCase_ ( self : Any ):
_UpperCamelCase = '''cpu'''
_UpperCamelCase = self.get_dummy_components()
_UpperCamelCase = self.pipeline_class(**_A )
_UpperCamelCase = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
_UpperCamelCase = pipe(**self.get_dummy_inputs(_A ) )
_UpperCamelCase = output.images[0]
_UpperCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
_UpperCamelCase = np.array(
[
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def UpperCamelCase_ ( self : Any ):
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def UpperCamelCase_ ( self : Any ):
_UpperCamelCase = torch_device == '''cpu'''
_UpperCamelCase = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=_A , relax_max_difference=_A , )
def UpperCamelCase_ ( self : Any ):
_UpperCamelCase = self.get_dummy_components()
_UpperCamelCase = self.pipeline_class(**_A )
_UpperCamelCase = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
_UpperCamelCase = 1
_UpperCamelCase = 2
_UpperCamelCase = self.get_dummy_inputs(_A )
for key in inputs.keys():
if key in self.batch_params:
_UpperCamelCase = batch_size * [inputs[key]]
_UpperCamelCase = pipe(**_A , num_images_per_prompt=_A )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class lowerCAmelCase_ ( unittest.TestCase ):
def UpperCamelCase_ ( self : str ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase_ ( self : List[str] ):
_UpperCamelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/shap_e/test_shap_e_np_out.npy''' )
_UpperCamelCase = ShapEPipeline.from_pretrained('''openai/shap-e''' )
_UpperCamelCase = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
_UpperCamelCase = torch.Generator(device=_A ).manual_seed(0 )
_UpperCamelCase = pipe(
'''a shark''' , generator=_A , guidance_scale=15.0 , num_inference_steps=64 , frame_size=64 , output_type='''np''' , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(_A , _A )
| 10
| 1
|
"""simple docstring"""
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ConvNextConfig, SegformerImageProcessor, UperNetConfig, UperNetForSemanticSegmentation
def UpperCAmelCase ( snake_case : Tuple ):
_lowerCAmelCase:Tuple = 384
if "tiny" in model_name:
_lowerCAmelCase:Any = [3, 3, 9, 3]
_lowerCAmelCase:Union[str, Any] = [96, 192, 384, 768]
if "small" in model_name:
_lowerCAmelCase:List[str] = [3, 3, 27, 3]
_lowerCAmelCase:Any = [96, 192, 384, 768]
if "base" in model_name:
_lowerCAmelCase:int = [3, 3, 27, 3]
_lowerCAmelCase:int = [128, 256, 512, 1024]
_lowerCAmelCase:Any = 512
if "large" in model_name:
_lowerCAmelCase:Optional[int] = [3, 3, 27, 3]
_lowerCAmelCase:List[str] = [192, 384, 768, 1536]
_lowerCAmelCase:List[Any] = 768
if "xlarge" in model_name:
_lowerCAmelCase:str = [3, 3, 27, 3]
_lowerCAmelCase:Optional[int] = [256, 512, 1024, 2048]
_lowerCAmelCase:Optional[int] = 1024
# set label information
_lowerCAmelCase:Tuple = 150
_lowerCAmelCase:int = '''huggingface/label-files'''
_lowerCAmelCase:Optional[int] = '''ade20k-id2label.json'''
_lowerCAmelCase:List[str] = json.load(open(hf_hub_download(snake_case , snake_case , repo_type='''dataset''' ) , '''r''' ) )
_lowerCAmelCase:Dict = {int(snake_case ): v for k, v in idalabel.items()}
_lowerCAmelCase:Tuple = {v: k for k, v in idalabel.items()}
_lowerCAmelCase:str = ConvNextConfig(
depths=snake_case , hidden_sizes=snake_case , out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] )
_lowerCAmelCase:Union[str, Any] = UperNetConfig(
backbone_config=snake_case , auxiliary_in_channels=snake_case , num_labels=snake_case , idalabel=snake_case , labelaid=snake_case , )
return config
def UpperCAmelCase ( snake_case : List[Any] ):
_lowerCAmelCase:List[str] = []
# fmt: off
# stem
rename_keys.append(('''backbone.downsample_layers.0.0.weight''', '''backbone.embeddings.patch_embeddings.weight''') )
rename_keys.append(('''backbone.downsample_layers.0.0.bias''', '''backbone.embeddings.patch_embeddings.bias''') )
rename_keys.append(('''backbone.downsample_layers.0.1.weight''', '''backbone.embeddings.layernorm.weight''') )
rename_keys.append(('''backbone.downsample_layers.0.1.bias''', '''backbone.embeddings.layernorm.bias''') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F'backbone.stages.{i}.{j}.gamma', F'backbone.encoder.stages.{i}.layers.{j}.layer_scale_parameter') )
rename_keys.append((F'backbone.stages.{i}.{j}.depthwise_conv.weight', F'backbone.encoder.stages.{i}.layers.{j}.dwconv.weight') )
rename_keys.append((F'backbone.stages.{i}.{j}.depthwise_conv.bias', F'backbone.encoder.stages.{i}.layers.{j}.dwconv.bias') )
rename_keys.append((F'backbone.stages.{i}.{j}.norm.weight', F'backbone.encoder.stages.{i}.layers.{j}.layernorm.weight') )
rename_keys.append((F'backbone.stages.{i}.{j}.norm.bias', F'backbone.encoder.stages.{i}.layers.{j}.layernorm.bias') )
rename_keys.append((F'backbone.stages.{i}.{j}.pointwise_conv1.weight', F'backbone.encoder.stages.{i}.layers.{j}.pwconv1.weight') )
rename_keys.append((F'backbone.stages.{i}.{j}.pointwise_conv1.bias', F'backbone.encoder.stages.{i}.layers.{j}.pwconv1.bias') )
rename_keys.append((F'backbone.stages.{i}.{j}.pointwise_conv2.weight', F'backbone.encoder.stages.{i}.layers.{j}.pwconv2.weight') )
rename_keys.append((F'backbone.stages.{i}.{j}.pointwise_conv2.bias', F'backbone.encoder.stages.{i}.layers.{j}.pwconv2.bias') )
if i > 0:
rename_keys.append((F'backbone.downsample_layers.{i}.0.weight', F'backbone.encoder.stages.{i}.downsampling_layer.0.weight') )
rename_keys.append((F'backbone.downsample_layers.{i}.0.bias', F'backbone.encoder.stages.{i}.downsampling_layer.0.bias') )
rename_keys.append((F'backbone.downsample_layers.{i}.1.weight', F'backbone.encoder.stages.{i}.downsampling_layer.1.weight') )
rename_keys.append((F'backbone.downsample_layers.{i}.1.bias', F'backbone.encoder.stages.{i}.downsampling_layer.1.bias') )
rename_keys.append((F'backbone.norm{i}.weight', F'backbone.hidden_states_norms.stage{i+1}.weight') )
rename_keys.append((F'backbone.norm{i}.bias', F'backbone.hidden_states_norms.stage{i+1}.bias') )
# decode head
rename_keys.extend(
[
('''decode_head.conv_seg.weight''', '''decode_head.classifier.weight'''),
('''decode_head.conv_seg.bias''', '''decode_head.classifier.bias'''),
('''auxiliary_head.conv_seg.weight''', '''auxiliary_head.classifier.weight'''),
('''auxiliary_head.conv_seg.bias''', '''auxiliary_head.classifier.bias'''),
] )
# fmt: on
return rename_keys
def UpperCAmelCase ( snake_case : int , snake_case : Tuple , snake_case : Optional[int] ):
_lowerCAmelCase:int = dct.pop(snake_case )
_lowerCAmelCase:Union[str, Any] = val
def UpperCAmelCase ( snake_case : List[str] , snake_case : int , snake_case : Optional[Any] ):
_lowerCAmelCase:Dict = {
'''upernet-convnext-tiny''': '''https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_tiny_fp16_512x512_160k_ade20k/upernet_convnext_tiny_fp16_512x512_160k_ade20k_20220227_124553-cad485de.pth''',
'''upernet-convnext-small''': '''https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_small_fp16_512x512_160k_ade20k/upernet_convnext_small_fp16_512x512_160k_ade20k_20220227_131208-1b1e394f.pth''',
'''upernet-convnext-base''': '''https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_base_fp16_512x512_160k_ade20k/upernet_convnext_base_fp16_512x512_160k_ade20k_20220227_181227-02a24fc6.pth''',
'''upernet-convnext-large''': '''https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_large_fp16_640x640_160k_ade20k/upernet_convnext_large_fp16_640x640_160k_ade20k_20220226_040532-e57aa54d.pth''',
'''upernet-convnext-xlarge''': '''https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_xlarge_fp16_640x640_160k_ade20k/upernet_convnext_xlarge_fp16_640x640_160k_ade20k_20220226_080344-95fc38c2.pth''',
}
_lowerCAmelCase:Optional[Any] = model_name_to_url[model_name]
_lowerCAmelCase:Optional[Any] = torch.hub.load_state_dict_from_url(snake_case , map_location='''cpu''' )['''state_dict''']
_lowerCAmelCase:Tuple = get_upernet_config(snake_case )
_lowerCAmelCase:Union[str, Any] = UperNetForSemanticSegmentation(snake_case )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
_lowerCAmelCase:str = state_dict.pop(snake_case )
if "bn" in key:
_lowerCAmelCase:str = key.replace('''bn''' , '''batch_norm''' )
_lowerCAmelCase:Optional[Any] = val
# rename keys
_lowerCAmelCase:int = create_rename_keys(snake_case )
for src, dest in rename_keys:
rename_key(snake_case , snake_case , snake_case )
model.load_state_dict(snake_case )
# verify on image
_lowerCAmelCase:Optional[int] = '''https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg'''
_lowerCAmelCase:Optional[int] = Image.open(requests.get(snake_case , stream=snake_case ).raw ).convert('''RGB''' )
_lowerCAmelCase:int = SegformerImageProcessor()
_lowerCAmelCase:Optional[int] = processor(snake_case , return_tensors='''pt''' ).pixel_values
with torch.no_grad():
_lowerCAmelCase:str = model(snake_case )
if model_name == "upernet-convnext-tiny":
_lowerCAmelCase:Union[str, Any] = torch.tensor(
[[-8.81_10, -8.81_10, -8.65_21], [-8.81_10, -8.81_10, -8.65_21], [-8.77_46, -8.77_46, -8.61_30]] )
elif model_name == "upernet-convnext-small":
_lowerCAmelCase:List[str] = torch.tensor(
[[-8.82_36, -8.82_36, -8.67_71], [-8.82_36, -8.82_36, -8.67_71], [-8.76_38, -8.76_38, -8.62_40]] )
elif model_name == "upernet-convnext-base":
_lowerCAmelCase:Dict = torch.tensor(
[[-8.85_58, -8.85_58, -8.69_05], [-8.85_58, -8.85_58, -8.69_05], [-8.76_69, -8.76_69, -8.60_21]] )
elif model_name == "upernet-convnext-large":
_lowerCAmelCase:Union[str, Any] = torch.tensor(
[[-8.66_60, -8.66_60, -8.62_10], [-8.66_60, -8.66_60, -8.62_10], [-8.63_10, -8.63_10, -8.59_64]] )
elif model_name == "upernet-convnext-xlarge":
_lowerCAmelCase:List[str] = torch.tensor(
[[-8.49_80, -8.49_80, -8.39_77], [-8.49_80, -8.49_80, -8.39_77], [-8.43_79, -8.43_79, -8.34_12]] )
print('''Logits:''' , outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3] , snake_case , atol=1e-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(F'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(snake_case )
print(F'Saving processor to {pytorch_dump_folder_path}' )
processor.save_pretrained(snake_case )
if push_to_hub:
print(F'Pushing model and processor for {model_name} to hub' )
model.push_to_hub(F'openmmlab/{model_name}' )
processor.push_to_hub(F'openmmlab/{model_name}' )
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''upernet-convnext-tiny''',
type=str,
choices=[F"upernet-convnext-{size}" for size in ['''tiny''', '''small''', '''base''', '''large''', '''xlarge''']],
help='''Name of the ConvNext UperNet model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
UpperCamelCase__ = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 439
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
class a__ ( UpperCamelCase_ ):
snake_case__ = '''bert-generation'''
def __init__( self : Dict ,a__ : str=5_0358 ,a__ : List[str]=1024 ,a__ : int=24 ,a__ : Optional[Any]=16 ,a__ : List[str]=4096 ,a__ : Optional[int]="gelu" ,a__ : str=0.1 ,a__ : Union[str, Any]=0.1 ,a__ : int=512 ,a__ : Dict=0.02 ,a__ : List[Any]=1E-12 ,a__ : List[Any]=0 ,a__ : int=2 ,a__ : str=1 ,a__ : Dict="absolute" ,a__ : int=True ,**a__ : List[str] ,) -> List[Any]:
"""simple docstring"""
super().__init__(pad_token_id=a__ ,bos_token_id=a__ ,eos_token_id=a__ ,**a__)
_lowerCAmelCase:Optional[Any] = vocab_size
_lowerCAmelCase:Union[str, Any] = hidden_size
_lowerCAmelCase:Any = num_hidden_layers
_lowerCAmelCase:int = num_attention_heads
_lowerCAmelCase:int = hidden_act
_lowerCAmelCase:List[Any] = intermediate_size
_lowerCAmelCase:Optional[Any] = hidden_dropout_prob
_lowerCAmelCase:int = attention_probs_dropout_prob
_lowerCAmelCase:Optional[int] = max_position_embeddings
_lowerCAmelCase:Dict = initializer_range
_lowerCAmelCase:Union[str, Any] = layer_norm_eps
_lowerCAmelCase:int = position_embedding_type
_lowerCAmelCase:Tuple = use_cache
| 439
| 1
|
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetaImageProcessor
class lowerCAmelCase_ ( unittest.TestCase ):
def __init__( self ,snake_case__ ,snake_case__=7 ,snake_case__=3 ,snake_case__=30 ,snake_case__=400 ,snake_case__=True ,snake_case__=None ,snake_case__=True ,snake_case__=[0.5, 0.5, 0.5] ,snake_case__=[0.5, 0.5, 0.5] ,snake_case__=True ,snake_case__=1 / 255 ,snake_case__=True ,):
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
SCREAMING_SNAKE_CASE_ : List[str] = size if size is not None else {'shortest_edge': 18, 'longest_edge': 1333}
SCREAMING_SNAKE_CASE_ : List[Any] = parent
SCREAMING_SNAKE_CASE_ : str = batch_size
SCREAMING_SNAKE_CASE_ : Union[str, Any] = num_channels
SCREAMING_SNAKE_CASE_ : Tuple = min_resolution
SCREAMING_SNAKE_CASE_ : Any = max_resolution
SCREAMING_SNAKE_CASE_ : str = do_resize
SCREAMING_SNAKE_CASE_ : Union[str, Any] = size
SCREAMING_SNAKE_CASE_ : Tuple = do_normalize
SCREAMING_SNAKE_CASE_ : Optional[int] = image_mean
SCREAMING_SNAKE_CASE_ : Optional[Any] = image_std
SCREAMING_SNAKE_CASE_ : Tuple = do_rescale
SCREAMING_SNAKE_CASE_ : Any = rescale_factor
SCREAMING_SNAKE_CASE_ : Any = do_pad
def snake_case ( self ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def snake_case ( self ,snake_case__ ,snake_case__=False ):
if not batched:
SCREAMING_SNAKE_CASE_ : List[str] = image_inputs[0]
if isinstance(snake_case__ ,Image.Image ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : str = image.size
else:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Tuple = image.shape[1], image.shape[2]
if w < h:
SCREAMING_SNAKE_CASE_ : Dict = int(self.size['shortest_edge'] * h / w )
SCREAMING_SNAKE_CASE_ : Optional[int] = self.size['shortest_edge']
elif w > h:
SCREAMING_SNAKE_CASE_ : str = self.size['shortest_edge']
SCREAMING_SNAKE_CASE_ : Optional[int] = int(self.size['shortest_edge'] * w / h )
else:
SCREAMING_SNAKE_CASE_ : int = self.size['shortest_edge']
SCREAMING_SNAKE_CASE_ : List[Any] = self.size['shortest_edge']
else:
SCREAMING_SNAKE_CASE_ : Optional[Any] = []
for image in image_inputs:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : int = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
SCREAMING_SNAKE_CASE_ : Optional[Any] = max(snake_case__ ,key=lambda snake_case__ : item[0] )[0]
SCREAMING_SNAKE_CASE_ : Optional[int] = max(snake_case__ ,key=lambda snake_case__ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class lowerCAmelCase_ ( lowerCamelCase_ , unittest.TestCase ):
__a : Dict = DetaImageProcessor if is_vision_available() else None
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = DetaImageProcessingTester(self )
@property
def snake_case ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Any = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(snake_case__ ,'image_mean' ) )
self.assertTrue(hasattr(snake_case__ ,'image_std' ) )
self.assertTrue(hasattr(snake_case__ ,'do_normalize' ) )
self.assertTrue(hasattr(snake_case__ ,'do_resize' ) )
self.assertTrue(hasattr(snake_case__ ,'do_rescale' ) )
self.assertTrue(hasattr(snake_case__ ,'do_pad' ) )
self.assertTrue(hasattr(snake_case__ ,'size' ) )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : int = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size ,{'shortest_edge': 18, 'longest_edge': 1333} )
self.assertEqual(image_processor.do_pad ,snake_case__ )
def snake_case ( self ):
pass
def snake_case ( self ):
# Initialize image_processing
SCREAMING_SNAKE_CASE_ : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE_ : List[str] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ ,Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE_ : List[Any] = image_processing(image_inputs[0] ,return_tensors='pt' ).pixel_values
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : str = self.image_processor_tester.get_expected_values(snake_case__ )
self.assertEqual(
encoded_images.shape ,(1, self.image_processor_tester.num_channels, expected_height, expected_width) ,)
# Test batched
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Any = self.image_processor_tester.get_expected_values(snake_case__ ,batched=snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = image_processing(snake_case__ ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) ,)
def snake_case ( self ):
# Initialize image_processing
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE_ : List[Any] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=snake_case__ ,numpify=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ ,np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE_ : Optional[int] = image_processing(image_inputs[0] ,return_tensors='pt' ).pixel_values
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[Any] = self.image_processor_tester.get_expected_values(snake_case__ )
self.assertEqual(
encoded_images.shape ,(1, self.image_processor_tester.num_channels, expected_height, expected_width) ,)
# Test batched
SCREAMING_SNAKE_CASE_ : Optional[int] = image_processing(snake_case__ ,return_tensors='pt' ).pixel_values
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Tuple = self.image_processor_tester.get_expected_values(snake_case__ ,batched=snake_case__ )
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) ,)
def snake_case ( self ):
# Initialize image_processing
SCREAMING_SNAKE_CASE_ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE_ : int = prepare_image_inputs(self.image_processor_tester ,equal_resolution=snake_case__ ,torchify=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ ,torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE_ : List[Any] = image_processing(image_inputs[0] ,return_tensors='pt' ).pixel_values
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Tuple = self.image_processor_tester.get_expected_values(snake_case__ )
self.assertEqual(
encoded_images.shape ,(1, self.image_processor_tester.num_channels, expected_height, expected_width) ,)
# Test batched
SCREAMING_SNAKE_CASE_ : List[str] = image_processing(snake_case__ ,return_tensors='pt' ).pixel_values
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict = self.image_processor_tester.get_expected_values(snake_case__ ,batched=snake_case__ )
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) ,)
@slow
def snake_case ( self ):
# prepare image and target
SCREAMING_SNAKE_CASE_ : Any = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt' ,'r' ) as f:
SCREAMING_SNAKE_CASE_ : Optional[Any] = json.loads(f.read() )
SCREAMING_SNAKE_CASE_ : Any = {'image_id': 39769, 'annotations': target}
# encode them
SCREAMING_SNAKE_CASE_ : int = DetaImageProcessor()
SCREAMING_SNAKE_CASE_ : int = image_processing(images=snake_case__ ,annotations=snake_case__ ,return_tensors='pt' )
# verify pixel values
SCREAMING_SNAKE_CASE_ : Tuple = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['pixel_values'].shape ,snake_case__ )
SCREAMING_SNAKE_CASE_ : int = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] ,snake_case__ ,atol=1E-4 ) )
# verify area
SCREAMING_SNAKE_CASE_ : Dict = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] ,snake_case__ ) )
# verify boxes
SCREAMING_SNAKE_CASE_ : int = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape ,snake_case__ )
SCREAMING_SNAKE_CASE_ : str = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] ,snake_case__ ,atol=1E-3 ) )
# verify image_id
SCREAMING_SNAKE_CASE_ : int = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] ,snake_case__ ) )
# verify is_crowd
SCREAMING_SNAKE_CASE_ : List[str] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] ,snake_case__ ) )
# verify class_labels
SCREAMING_SNAKE_CASE_ : Tuple = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] ,snake_case__ ) )
# verify orig_size
SCREAMING_SNAKE_CASE_ : int = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] ,snake_case__ ) )
# verify size
SCREAMING_SNAKE_CASE_ : Any = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] ,snake_case__ ) )
@slow
def snake_case ( self ):
# prepare image, target and masks_path
SCREAMING_SNAKE_CASE_ : Optional[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt' ,'r' ) as f:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = json.loads(f.read() )
SCREAMING_SNAKE_CASE_ : Optional[Any] = {'file_name': '000000039769.png', 'image_id': 39769, 'segments_info': target}
SCREAMING_SNAKE_CASE_ : List[str] = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic' )
# encode them
SCREAMING_SNAKE_CASE_ : Any = DetaImageProcessor(format='coco_panoptic' )
SCREAMING_SNAKE_CASE_ : List[Any] = image_processing(images=snake_case__ ,annotations=snake_case__ ,masks_path=snake_case__ ,return_tensors='pt' )
# verify pixel values
SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['pixel_values'].shape ,snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] ,snake_case__ ,atol=1E-4 ) )
# verify area
SCREAMING_SNAKE_CASE_ : List[str] = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] ,snake_case__ ) )
# verify boxes
SCREAMING_SNAKE_CASE_ : Dict = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape ,snake_case__ )
SCREAMING_SNAKE_CASE_ : Any = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] ,snake_case__ ,atol=1E-3 ) )
# verify image_id
SCREAMING_SNAKE_CASE_ : List[str] = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] ,snake_case__ ) )
# verify is_crowd
SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] ,snake_case__ ) )
# verify class_labels
SCREAMING_SNAKE_CASE_ : Tuple = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] ,snake_case__ ) )
# verify masks
SCREAMING_SNAKE_CASE_ : Any = 822873
self.assertEqual(encoding['labels'][0]['masks'].sum().item() ,snake_case__ )
# verify orig_size
SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] ,snake_case__ ) )
# verify size
SCREAMING_SNAKE_CASE_ : Tuple = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] ,snake_case__ ) )
| 105
|
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ConvNextConfig, SegformerImageProcessor, UperNetConfig, UperNetForSemanticSegmentation
def __lowerCamelCase ( __a :int ) -> int:
"""simple docstring"""
A__ = 3_8_4
if "tiny" in model_name:
A__ = [3, 3, 9, 3]
A__ = [9_6, 1_9_2, 3_8_4, 7_6_8]
if "small" in model_name:
A__ = [3, 3, 2_7, 3]
A__ = [9_6, 1_9_2, 3_8_4, 7_6_8]
if "base" in model_name:
A__ = [3, 3, 2_7, 3]
A__ = [1_2_8, 2_5_6, 5_1_2, 1_0_2_4]
A__ = 5_1_2
if "large" in model_name:
A__ = [3, 3, 2_7, 3]
A__ = [1_9_2, 3_8_4, 7_6_8, 1_5_3_6]
A__ = 7_6_8
if "xlarge" in model_name:
A__ = [3, 3, 2_7, 3]
A__ = [2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8]
A__ = 1_0_2_4
# set label information
A__ = 1_5_0
A__ = """huggingface/label-files"""
A__ = """ade20k-id2label.json"""
A__ = json.load(open(hf_hub_download(__a , __a , repo_type="""dataset""" ) , """r""" ) )
A__ = {int(__a ): v for k, v in idalabel.items()}
A__ = {v: k for k, v in idalabel.items()}
A__ = ConvNextConfig(
depths=__a , hidden_sizes=__a , out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] )
A__ = UperNetConfig(
backbone_config=__a , auxiliary_in_channels=__a , num_labels=__a , idalabel=__a , labelaid=__a , )
return config
def __lowerCamelCase ( __a :Union[str, Any] ) -> List[Any]:
"""simple docstring"""
A__ = []
# fmt: off
# stem
rename_keys.append(("""backbone.downsample_layers.0.0.weight""", """backbone.embeddings.patch_embeddings.weight""") )
rename_keys.append(("""backbone.downsample_layers.0.0.bias""", """backbone.embeddings.patch_embeddings.bias""") )
rename_keys.append(("""backbone.downsample_layers.0.1.weight""", """backbone.embeddings.layernorm.weight""") )
rename_keys.append(("""backbone.downsample_layers.0.1.bias""", """backbone.embeddings.layernorm.bias""") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F'backbone.stages.{i}.{j}.gamma', F'backbone.encoder.stages.{i}.layers.{j}.layer_scale_parameter') )
rename_keys.append((F'backbone.stages.{i}.{j}.depthwise_conv.weight', F'backbone.encoder.stages.{i}.layers.{j}.dwconv.weight') )
rename_keys.append((F'backbone.stages.{i}.{j}.depthwise_conv.bias', F'backbone.encoder.stages.{i}.layers.{j}.dwconv.bias') )
rename_keys.append((F'backbone.stages.{i}.{j}.norm.weight', F'backbone.encoder.stages.{i}.layers.{j}.layernorm.weight') )
rename_keys.append((F'backbone.stages.{i}.{j}.norm.bias', F'backbone.encoder.stages.{i}.layers.{j}.layernorm.bias') )
rename_keys.append((F'backbone.stages.{i}.{j}.pointwise_conv1.weight', F'backbone.encoder.stages.{i}.layers.{j}.pwconv1.weight') )
rename_keys.append((F'backbone.stages.{i}.{j}.pointwise_conv1.bias', F'backbone.encoder.stages.{i}.layers.{j}.pwconv1.bias') )
rename_keys.append((F'backbone.stages.{i}.{j}.pointwise_conv2.weight', F'backbone.encoder.stages.{i}.layers.{j}.pwconv2.weight') )
rename_keys.append((F'backbone.stages.{i}.{j}.pointwise_conv2.bias', F'backbone.encoder.stages.{i}.layers.{j}.pwconv2.bias') )
if i > 0:
rename_keys.append((F'backbone.downsample_layers.{i}.0.weight', F'backbone.encoder.stages.{i}.downsampling_layer.0.weight') )
rename_keys.append((F'backbone.downsample_layers.{i}.0.bias', F'backbone.encoder.stages.{i}.downsampling_layer.0.bias') )
rename_keys.append((F'backbone.downsample_layers.{i}.1.weight', F'backbone.encoder.stages.{i}.downsampling_layer.1.weight') )
rename_keys.append((F'backbone.downsample_layers.{i}.1.bias', F'backbone.encoder.stages.{i}.downsampling_layer.1.bias') )
rename_keys.append((F'backbone.norm{i}.weight', F'backbone.hidden_states_norms.stage{i+1}.weight') )
rename_keys.append((F'backbone.norm{i}.bias', F'backbone.hidden_states_norms.stage{i+1}.bias') )
# decode head
rename_keys.extend(
[
("""decode_head.conv_seg.weight""", """decode_head.classifier.weight"""),
("""decode_head.conv_seg.bias""", """decode_head.classifier.bias"""),
("""auxiliary_head.conv_seg.weight""", """auxiliary_head.classifier.weight"""),
("""auxiliary_head.conv_seg.bias""", """auxiliary_head.classifier.bias"""),
] )
# fmt: on
return rename_keys
def __lowerCamelCase ( __a :Union[str, Any] , __a :List[str] , __a :str ) -> str:
"""simple docstring"""
A__ = dct.pop(__a )
A__ = val
def __lowerCamelCase ( __a :Any , __a :int , __a :Any ) -> List[Any]:
"""simple docstring"""
A__ = {
"""upernet-convnext-tiny""": """https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_tiny_fp16_512x512_160k_ade20k/upernet_convnext_tiny_fp16_512x512_160k_ade20k_20220227_124553-cad485de.pth""",
"""upernet-convnext-small""": """https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_small_fp16_512x512_160k_ade20k/upernet_convnext_small_fp16_512x512_160k_ade20k_20220227_131208-1b1e394f.pth""",
"""upernet-convnext-base""": """https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_base_fp16_512x512_160k_ade20k/upernet_convnext_base_fp16_512x512_160k_ade20k_20220227_181227-02a24fc6.pth""",
"""upernet-convnext-large""": """https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_large_fp16_640x640_160k_ade20k/upernet_convnext_large_fp16_640x640_160k_ade20k_20220226_040532-e57aa54d.pth""",
"""upernet-convnext-xlarge""": """https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_xlarge_fp16_640x640_160k_ade20k/upernet_convnext_xlarge_fp16_640x640_160k_ade20k_20220226_080344-95fc38c2.pth""",
}
A__ = model_name_to_url[model_name]
A__ = torch.hub.load_state_dict_from_url(__a , map_location="""cpu""" )["""state_dict"""]
A__ = get_upernet_config(__a )
A__ = UperNetForSemanticSegmentation(__a )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
A__ = state_dict.pop(__a )
if "bn" in key:
A__ = key.replace("""bn""" , """batch_norm""" )
A__ = val
# rename keys
A__ = create_rename_keys(__a )
for src, dest in rename_keys:
rename_key(__a , __a , __a )
model.load_state_dict(__a )
# verify on image
A__ = """https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg"""
A__ = Image.open(requests.get(__a , stream=__a ).raw ).convert("""RGB""" )
A__ = SegformerImageProcessor()
A__ = processor(__a , return_tensors="""pt""" ).pixel_values
with torch.no_grad():
A__ = model(__a )
if model_name == "upernet-convnext-tiny":
A__ = torch.tensor(
[[-8.8110, -8.8110, -8.6521], [-8.8110, -8.8110, -8.6521], [-8.7746, -8.7746, -8.6130]] )
elif model_name == "upernet-convnext-small":
A__ = torch.tensor(
[[-8.8236, -8.8236, -8.6771], [-8.8236, -8.8236, -8.6771], [-8.7638, -8.7638, -8.6240]] )
elif model_name == "upernet-convnext-base":
A__ = torch.tensor(
[[-8.8558, -8.8558, -8.6905], [-8.8558, -8.8558, -8.6905], [-8.7669, -8.7669, -8.6021]] )
elif model_name == "upernet-convnext-large":
A__ = torch.tensor(
[[-8.6660, -8.6660, -8.6210], [-8.6660, -8.6660, -8.6210], [-8.6310, -8.6310, -8.5964]] )
elif model_name == "upernet-convnext-xlarge":
A__ = torch.tensor(
[[-8.4980, -8.4980, -8.3977], [-8.4980, -8.4980, -8.3977], [-8.4379, -8.4379, -8.3412]] )
print("""Logits:""" , outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3] , __a , atol=1E-4 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
print(F'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(__a )
print(F'Saving processor to {pytorch_dump_folder_path}' )
processor.save_pretrained(__a )
if push_to_hub:
print(F'Pushing model and processor for {model_name} to hub' )
model.push_to_hub(F'openmmlab/{model_name}' )
processor.push_to_hub(F'openmmlab/{model_name}' )
if __name__ == "__main__":
A : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''upernet-convnext-tiny''',
type=str,
choices=[F'''upernet-convnext-{size}''' for size in ['''tiny''', '''small''', '''base''', '''large''', '''xlarge''']],
help='''Name of the ConvNext UperNet model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
A : str = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 176
| 0
|
"""simple docstring"""
import inspect
import unittest
import numpy as np
from transformers import ViTConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
from transformers.models.vit.modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel
class UpperCamelCase (unittest.TestCase ):
def __init__( self :Tuple , __magic_name__ :Union[str, Any] , __magic_name__ :List[Any]=13 , __magic_name__ :List[str]=30 , __magic_name__ :List[str]=2 , __magic_name__ :Dict=3 , __magic_name__ :Optional[int]=True , __magic_name__ :int=True , __magic_name__ :Optional[int]=32 , __magic_name__ :Dict=5 , __magic_name__ :Optional[int]=4 , __magic_name__ :List[str]=37 , __magic_name__ :Any="gelu" , __magic_name__ :List[str]=0.1 , __magic_name__ :Dict=0.1 , __magic_name__ :int=10 , __magic_name__ :List[Any]=0.02 , ) ->str:
lowercase : List[str] = parent
lowercase : List[str] = batch_size
lowercase : List[str] = image_size
lowercase : Optional[int] = patch_size
lowercase : str = num_channels
lowercase : Optional[Any] = is_training
lowercase : Union[str, Any] = use_labels
lowercase : Union[str, Any] = hidden_size
lowercase : Optional[int] = num_hidden_layers
lowercase : str = num_attention_heads
lowercase : str = intermediate_size
lowercase : int = hidden_act
lowercase : Union[str, Any] = hidden_dropout_prob
lowercase : List[Any] = attention_probs_dropout_prob
lowercase : Union[str, Any] = type_sequence_label_size
lowercase : Optional[Any] = initializer_range
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
lowercase : Union[str, Any] = (image_size // patch_size) ** 2
lowercase : Optional[Any] = num_patches + 1
def __snake_case ( self :List[str] ) ->Dict:
lowercase : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase : Optional[int] = ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__magic_name__ , initializer_range=self.initializer_range , )
return config, pixel_values
def __snake_case ( self :List[str] , __magic_name__ :Optional[Any] , __magic_name__ :Tuple ) ->Union[str, Any]:
lowercase : Dict = FlaxViTModel(config=__magic_name__ )
lowercase : str = model(__magic_name__ )
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token)
lowercase : List[Any] = (self.image_size, self.image_size)
lowercase : str = (self.patch_size, self.patch_size)
lowercase : str = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, num_patches + 1, self.hidden_size) )
def __snake_case ( self :Optional[Any] , __magic_name__ :Tuple , __magic_name__ :str ) ->Union[str, Any]:
lowercase : Optional[int] = self.type_sequence_label_size
lowercase : Tuple = FlaxViTForImageClassification(config=__magic_name__ )
lowercase : Union[str, Any] = model(__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowercase : List[Any] = 1
lowercase : int = FlaxViTForImageClassification(__magic_name__ )
lowercase : Optional[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowercase : Union[str, Any] = model(__magic_name__ )
def __snake_case ( self :Tuple ) ->int:
lowercase : int = self.prepare_config_and_inputs()
(
(
lowercase
) , (
lowercase
) ,
) : List[Any] = config_and_inputs
lowercase : Dict = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_flax
class UpperCamelCase (__snake_case , unittest.TestCase ):
_SCREAMING_SNAKE_CASE : List[str] = (FlaxViTModel, FlaxViTForImageClassification) if is_flax_available() else ()
def __snake_case ( self :Optional[int] ) ->None:
lowercase : List[Any] = FlaxViTModelTester(self )
lowercase : Any = ConfigTester(self , config_class=__magic_name__ , has_text_modality=__magic_name__ , hidden_size=37 )
def __snake_case ( self :int ) ->int:
self.config_tester.run_common_tests()
def __snake_case ( self :Optional[int] ) ->List[str]:
lowercase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__magic_name__ )
def __snake_case ( self :Optional[Any] ) ->Any:
lowercase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__magic_name__ )
def __snake_case ( self :List[str] ) ->Optional[int]:
lowercase , lowercase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase : Tuple = model_class(__magic_name__ )
lowercase : List[Any] = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase : List[str] = [*signature.parameters.keys()]
lowercase : str = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __magic_name__ )
def __snake_case ( self :str ) ->Optional[Any]:
lowercase , lowercase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
lowercase : Tuple = self._prepare_for_class(__magic_name__ , __magic_name__ )
lowercase : Optional[Any] = model_class(__magic_name__ )
@jax.jit
def model_jitted(__magic_name__ :int , **__magic_name__ :int ):
return model(pixel_values=__magic_name__ , **__magic_name__ )
with self.subTest("""JIT Enabled""" ):
lowercase : Union[str, Any] = model_jitted(**__magic_name__ ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
lowercase : Dict = model_jitted(**__magic_name__ ).to_tuple()
self.assertEqual(len(__magic_name__ ) , len(__magic_name__ ) )
for jitted_output, output in zip(__magic_name__ , __magic_name__ ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def __snake_case ( self :str ) ->Optional[Any]:
for model_class_name in self.all_model_classes:
lowercase : Union[str, Any] = model_class_name.from_pretrained("""google/vit-base-patch16-224""" )
lowercase : int = model(np.ones((1, 3, 224, 224) ) )
self.assertIsNotNone(__magic_name__ )
| 348
|
"""simple docstring"""
def UpperCamelCase ( _A , _A ) -> str:
lowercase : list[list[str]] = [[] for _ in range(_A )]
lowercase : Any = key - 1
if key <= 0:
raise ValueError("""Height of grid can't be 0 or negative""" )
if key == 1 or len(_A ) <= key:
return input_string
for position, character in enumerate(_A ):
lowercase : Optional[int] = position % (lowest * 2) # puts it in bounds
lowercase : Dict = min(_A , lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append(_A )
lowercase : Optional[Any] = ["""""".join(_A ) for row in temp_grid]
lowercase : int = """""".join(_A )
return output_string
def UpperCamelCase ( _A , _A ) -> str:
lowercase : Optional[Any] = []
lowercase : int = key - 1
if key <= 0:
raise ValueError("""Height of grid can't be 0 or negative""" )
if key == 1:
return input_string
lowercase : list[list[str]] = [[] for _ in range(_A )] # generates template
for position in range(len(_A ) ):
lowercase : Union[str, Any] = position % (lowest * 2) # puts it in bounds
lowercase : Union[str, Any] = min(_A , lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append("""*""" )
lowercase : Any = 0
for row in temp_grid: # fills in the characters
lowercase : Dict = input_string[counter : counter + len(_A )]
grid.append(list(_A ) )
counter += len(_A )
lowercase : Union[str, Any] = """""" # reads as zigzag
for position in range(len(_A ) ):
lowercase : List[str] = position % (lowest * 2) # puts it in bounds
lowercase : str = min(_A , lowest * 2 - num ) # creates zigzag pattern
output_string += grid[num][0]
grid[num].pop(0 )
return output_string
def UpperCamelCase ( _A ) -> dict[int, str]:
lowercase : int = {}
for key_guess in range(1 , len(_A ) ): # tries every key
lowercase : Dict = decrypt(_A , _A )
return results
if __name__ == "__main__":
import doctest
doctest.testmod()
| 348
| 1
|
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all feature extractors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...feature_extraction_utils import FeatureExtractionMixin
from ...utils import CONFIG_NAME, FEATURE_EXTRACTOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
__A = logging.get_logger(__name__)
__A = OrderedDict(
[
('''audio-spectrogram-transformer''', '''ASTFeatureExtractor'''),
('''beit''', '''BeitFeatureExtractor'''),
('''chinese_clip''', '''ChineseCLIPFeatureExtractor'''),
('''clap''', '''ClapFeatureExtractor'''),
('''clip''', '''CLIPFeatureExtractor'''),
('''clipseg''', '''ViTFeatureExtractor'''),
('''conditional_detr''', '''ConditionalDetrFeatureExtractor'''),
('''convnext''', '''ConvNextFeatureExtractor'''),
('''cvt''', '''ConvNextFeatureExtractor'''),
('''data2vec-audio''', '''Wav2Vec2FeatureExtractor'''),
('''data2vec-vision''', '''BeitFeatureExtractor'''),
('''deformable_detr''', '''DeformableDetrFeatureExtractor'''),
('''deit''', '''DeiTFeatureExtractor'''),
('''detr''', '''DetrFeatureExtractor'''),
('''dinat''', '''ViTFeatureExtractor'''),
('''donut-swin''', '''DonutFeatureExtractor'''),
('''dpt''', '''DPTFeatureExtractor'''),
('''encodec''', '''EncodecFeatureExtractor'''),
('''flava''', '''FlavaFeatureExtractor'''),
('''glpn''', '''GLPNFeatureExtractor'''),
('''groupvit''', '''CLIPFeatureExtractor'''),
('''hubert''', '''Wav2Vec2FeatureExtractor'''),
('''imagegpt''', '''ImageGPTFeatureExtractor'''),
('''layoutlmv2''', '''LayoutLMv2FeatureExtractor'''),
('''layoutlmv3''', '''LayoutLMv3FeatureExtractor'''),
('''levit''', '''LevitFeatureExtractor'''),
('''maskformer''', '''MaskFormerFeatureExtractor'''),
('''mctct''', '''MCTCTFeatureExtractor'''),
('''mobilenet_v1''', '''MobileNetV1FeatureExtractor'''),
('''mobilenet_v2''', '''MobileNetV2FeatureExtractor'''),
('''mobilevit''', '''MobileViTFeatureExtractor'''),
('''nat''', '''ViTFeatureExtractor'''),
('''owlvit''', '''OwlViTFeatureExtractor'''),
('''perceiver''', '''PerceiverFeatureExtractor'''),
('''poolformer''', '''PoolFormerFeatureExtractor'''),
('''regnet''', '''ConvNextFeatureExtractor'''),
('''resnet''', '''ConvNextFeatureExtractor'''),
('''segformer''', '''SegformerFeatureExtractor'''),
('''sew''', '''Wav2Vec2FeatureExtractor'''),
('''sew-d''', '''Wav2Vec2FeatureExtractor'''),
('''speech_to_text''', '''Speech2TextFeatureExtractor'''),
('''speecht5''', '''SpeechT5FeatureExtractor'''),
('''swiftformer''', '''ViTFeatureExtractor'''),
('''swin''', '''ViTFeatureExtractor'''),
('''swinv2''', '''ViTFeatureExtractor'''),
('''table-transformer''', '''DetrFeatureExtractor'''),
('''timesformer''', '''VideoMAEFeatureExtractor'''),
('''tvlt''', '''TvltFeatureExtractor'''),
('''unispeech''', '''Wav2Vec2FeatureExtractor'''),
('''unispeech-sat''', '''Wav2Vec2FeatureExtractor'''),
('''van''', '''ConvNextFeatureExtractor'''),
('''videomae''', '''VideoMAEFeatureExtractor'''),
('''vilt''', '''ViltFeatureExtractor'''),
('''vit''', '''ViTFeatureExtractor'''),
('''vit_mae''', '''ViTFeatureExtractor'''),
('''vit_msn''', '''ViTFeatureExtractor'''),
('''wav2vec2''', '''Wav2Vec2FeatureExtractor'''),
('''wav2vec2-conformer''', '''Wav2Vec2FeatureExtractor'''),
('''wavlm''', '''Wav2Vec2FeatureExtractor'''),
('''whisper''', '''WhisperFeatureExtractor'''),
('''xclip''', '''CLIPFeatureExtractor'''),
('''yolos''', '''YolosFeatureExtractor'''),
]
)
__A = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FEATURE_EXTRACTOR_MAPPING_NAMES)
def __a ( lowerCAmelCase_ : Union[str, Any] ) -> int:
'''simple docstring'''
for module_name, extractors in FEATURE_EXTRACTOR_MAPPING_NAMES.items():
if class_name in extractors:
UpperCAmelCase_= model_type_to_module_name(lowerCamelCase_ )
UpperCAmelCase_= importlib.import_module(F""".{module_name}""" ,"""transformers.models""" )
try:
return getattr(lowerCamelCase_ ,lowerCamelCase_ )
except AttributeError:
continue
for _, extractor in FEATURE_EXTRACTOR_MAPPING._extra_content.items():
if getattr(lowerCamelCase_ ,"""__name__""" ,lowerCamelCase_ ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
UpperCAmelCase_= importlib.import_module("""transformers""" )
if hasattr(lowerCamelCase_ ,lowerCamelCase_ ):
return getattr(lowerCamelCase_ ,lowerCamelCase_ )
return None
def __a ( lowerCAmelCase_ : int ,lowerCAmelCase_ : Union[str, Any] = None ,lowerCAmelCase_ : Union[str, Any] = False ,lowerCAmelCase_ : Dict = False ,lowerCAmelCase_ : Any = None ,lowerCAmelCase_ : Optional[int] = None ,lowerCAmelCase_ : Dict = None ,lowerCAmelCase_ : Optional[Any] = False ,**lowerCAmelCase_ : str ,) -> str:
'''simple docstring'''
UpperCAmelCase_= get_file_from_repo(
lowerCamelCase_ ,lowerCamelCase_ ,cache_dir=lowerCamelCase_ ,force_download=lowerCamelCase_ ,resume_download=lowerCamelCase_ ,proxies=lowerCamelCase_ ,use_auth_token=lowerCamelCase_ ,revision=lowerCamelCase_ ,local_files_only=lowerCamelCase_ ,)
if resolved_config_file is None:
logger.info(
"""Could not locate the feature extractor configuration file, will try to use the model config instead.""" )
return {}
with open(lowerCamelCase_ ,encoding="""utf-8""" ) as reader:
return json.load(lowerCamelCase_ )
class lowercase :
"""simple docstring"""
def __init__( self : Union[str, Any] ) -> Dict:
raise EnvironmentError(
"""AutoFeatureExtractor is designed to be instantiated """
"""using the `AutoFeatureExtractor.from_pretrained(pretrained_model_name_or_path)` method.""" )
@classmethod
@replace_list_option_in_docstrings(__UpperCAmelCase )
def _SCREAMING_SNAKE_CASE ( cls : str , __UpperCAmelCase : str , **__UpperCAmelCase : Dict ) -> Optional[Any]:
UpperCAmelCase_= kwargs.pop("""config""" , __UpperCAmelCase )
UpperCAmelCase_= kwargs.pop("""trust_remote_code""" , __UpperCAmelCase )
UpperCAmelCase_= True
UpperCAmelCase_, UpperCAmelCase_= FeatureExtractionMixin.get_feature_extractor_dict(__UpperCAmelCase , **__UpperCAmelCase )
UpperCAmelCase_= config_dict.get("""feature_extractor_type""" , __UpperCAmelCase )
UpperCAmelCase_= None
if "AutoFeatureExtractor" in config_dict.get("""auto_map""" , {} ):
UpperCAmelCase_= config_dict["""auto_map"""]["""AutoFeatureExtractor"""]
# If we don't find the feature extractor class in the feature extractor config, let's try the model config.
if feature_extractor_class is None and feature_extractor_auto_map is None:
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
UpperCAmelCase_= AutoConfig.from_pretrained(__UpperCAmelCase , **__UpperCAmelCase )
# It could be in `config.feature_extractor_type``
UpperCAmelCase_= getattr(__UpperCAmelCase , """feature_extractor_type""" , __UpperCAmelCase )
if hasattr(__UpperCAmelCase , """auto_map""" ) and "AutoFeatureExtractor" in config.auto_map:
UpperCAmelCase_= config.auto_map["""AutoFeatureExtractor"""]
if feature_extractor_class is not None:
UpperCAmelCase_= feature_extractor_class_from_name(__UpperCAmelCase )
UpperCAmelCase_= feature_extractor_auto_map is not None
UpperCAmelCase_= feature_extractor_class is not None or type(__UpperCAmelCase ) in FEATURE_EXTRACTOR_MAPPING
UpperCAmelCase_= resolve_trust_remote_code(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
if has_remote_code and trust_remote_code:
UpperCAmelCase_= get_class_from_dynamic_module(
__UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase )
UpperCAmelCase_= kwargs.pop("""code_revision""" , __UpperCAmelCase )
if os.path.isdir(__UpperCAmelCase ):
feature_extractor_class.register_for_auto_class()
return feature_extractor_class.from_dict(__UpperCAmelCase , **__UpperCAmelCase )
elif feature_extractor_class is not None:
return feature_extractor_class.from_dict(__UpperCAmelCase , **__UpperCAmelCase )
# Last try: we use the FEATURE_EXTRACTOR_MAPPING.
elif type(__UpperCAmelCase ) in FEATURE_EXTRACTOR_MAPPING:
UpperCAmelCase_= FEATURE_EXTRACTOR_MAPPING[type(__UpperCAmelCase )]
return feature_extractor_class.from_dict(__UpperCAmelCase , **__UpperCAmelCase )
raise ValueError(
F"""Unrecognized feature extractor in {pretrained_model_name_or_path}. Should have a """
F"""`feature_extractor_type` key in its {FEATURE_EXTRACTOR_NAME} of {CONFIG_NAME}, or one of the following """
F"""`model_type` keys in its {CONFIG_NAME}: {", ".join(c for c in FEATURE_EXTRACTOR_MAPPING_NAMES.keys() )}""" )
@staticmethod
def _SCREAMING_SNAKE_CASE ( __UpperCAmelCase : str , __UpperCAmelCase : str ) -> Dict:
FEATURE_EXTRACTOR_MAPPING.register(__UpperCAmelCase , __UpperCAmelCase )
| 593
|
import importlib
import shutil
import threading
import warnings
from typing import List
import fsspec
import fsspec.asyn
from . import compression
from .hffilesystem import HfFileSystem
A__ : int = importlib.util.find_spec('s3fs') is not None
if _has_safs:
from .safilesystem import SaFileSystem # noqa: F401
A__ : List[compression.BaseCompressedFileFileSystem] = [
compression.BzaFileSystem,
compression.GzipFileSystem,
compression.LzaFileSystem,
compression.XzFileSystem,
compression.ZstdFileSystem,
]
# Register custom filesystems
for fs_class in COMPRESSION_FILESYSTEMS + [HfFileSystem]:
if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class:
warnings.warn(F"A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.")
fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True)
def a ( lowerCamelCase_ ):
'''simple docstring'''
if "://" in dataset_path:
lowercase__ = dataset_path.split('''://''' )[1]
return dataset_path
def a ( lowerCamelCase_ ):
'''simple docstring'''
if fs is not None and fs.protocol != "file":
return True
else:
return False
def a ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = not is_remote_filesystem(lowerCamelCase_ )
if is_local:
# LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory
shutil.move(fs._strip_protocol(lowerCamelCase_ ) , fs._strip_protocol(lowerCamelCase_ ) )
else:
fs.mv(lowerCamelCase_ , lowerCamelCase_ , recursive=lowerCamelCase_ )
def a ( ):
'''simple docstring'''
if hasattr(fsspec.asyn , '''reset_lock''' ):
# for future fsspec>2022.05.0
fsspec.asyn.reset_lock()
else:
lowercase__ = None
lowercase__ = None
lowercase__ = threading.Lock()
| 183
| 0
|
import torch
from diffusers import KDPMaDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class snake_case_ ( lowerCAmelCase ):
__lowerCamelCase : List[str] = (KDPMaDiscreteScheduler,)
__lowerCamelCase : List[str] = 10
def __A ( self , **__lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : Optional[int] = {
'num_train_timesteps': 1_100,
'beta_start': 0.00_01,
'beta_end': 0.02,
'beta_schedule': 'linear',
}
config.update(**__lowerCAmelCase )
return config
def __A ( self ):
for timesteps in [10, 50, 100, 1_000]:
self.check_over_configs(num_train_timesteps=__lowerCAmelCase )
def __A ( self ):
for beta_start, beta_end in zip([0.0_00_01, 0.00_01, 0.0_01] , [0.00_02, 0.0_02, 0.02] ):
self.check_over_configs(beta_start=__lowerCAmelCase , beta_end=__lowerCAmelCase )
def __A ( self ):
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=__lowerCAmelCase )
def __A ( self ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__lowerCAmelCase )
def __A ( self ):
SCREAMING_SNAKE_CASE_ : Any = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE_ : List[Any] = self.get_scheduler_config(prediction_type='v_prediction' )
SCREAMING_SNAKE_CASE_ : List[str] = scheduler_class(**__lowerCAmelCase )
scheduler.set_timesteps(self.num_inference_steps )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.dummy_model()
SCREAMING_SNAKE_CASE_ : Any = self.dummy_sample_deter * scheduler.init_noise_sigma
SCREAMING_SNAKE_CASE_ : Dict = sample.to(__lowerCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
SCREAMING_SNAKE_CASE_ : Any = scheduler.scale_model_input(__lowerCAmelCase , __lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Any = model(__lowerCAmelCase , __lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : List[str] = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Any = output.prev_sample
SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.sum(torch.abs(__lowerCAmelCase ) )
SCREAMING_SNAKE_CASE_ : int = torch.mean(torch.abs(__lowerCAmelCase ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 4.6934e-07 ) < 1e-2
assert abs(result_mean.item() - 6.1112e-10 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 4.693428650170972e-07 ) < 1e-2
assert abs(result_mean.item() - 0.00_02 ) < 1e-3
def __A ( self ):
if torch_device == "mps":
return
SCREAMING_SNAKE_CASE_ : int = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.get_scheduler_config()
SCREAMING_SNAKE_CASE_ : List[str] = scheduler_class(**__lowerCAmelCase )
scheduler.set_timesteps(self.num_inference_steps )
SCREAMING_SNAKE_CASE_ : Dict = self.dummy_model()
SCREAMING_SNAKE_CASE_ : List[str] = self.dummy_sample_deter * scheduler.init_noise_sigma
SCREAMING_SNAKE_CASE_ : List[Any] = sample.to(__lowerCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
SCREAMING_SNAKE_CASE_ : List[Any] = scheduler.scale_model_input(__lowerCAmelCase , __lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : int = model(__lowerCAmelCase , __lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Tuple = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = output.prev_sample
SCREAMING_SNAKE_CASE_ : List[Any] = torch.sum(torch.abs(__lowerCAmelCase ) )
SCREAMING_SNAKE_CASE_ : List[Any] = torch.mean(torch.abs(__lowerCAmelCase ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 20.41_25 ) < 1e-2
assert abs(result_mean.item() - 0.02_66 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 20.41_25 ) < 1e-2
assert abs(result_mean.item() - 0.02_66 ) < 1e-3
def __A ( self ):
if torch_device == "mps":
return
SCREAMING_SNAKE_CASE_ : Dict = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.get_scheduler_config()
SCREAMING_SNAKE_CASE_ : str = scheduler_class(**__lowerCAmelCase )
scheduler.set_timesteps(self.num_inference_steps , device=__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : List[str] = self.dummy_model()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.dummy_sample_deter.to(__lowerCAmelCase ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
SCREAMING_SNAKE_CASE_ : Tuple = scheduler.scale_model_input(__lowerCAmelCase , __lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : int = model(__lowerCAmelCase , __lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : List[str] = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Any = output.prev_sample
SCREAMING_SNAKE_CASE_ : str = torch.sum(torch.abs(__lowerCAmelCase ) )
SCREAMING_SNAKE_CASE_ : List[str] = torch.mean(torch.abs(__lowerCAmelCase ) )
if str(__lowerCAmelCase ).startswith('cpu' ):
# The following sum varies between 148 and 156 on mps. Why?
assert abs(result_sum.item() - 20.41_25 ) < 1e-2
assert abs(result_mean.item() - 0.02_66 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 20.41_25 ) < 1e-2
assert abs(result_mean.item() - 0.02_66 ) < 1e-3
| 311
|
import numpy as np
from cva import COLOR_BGR2GRAY, CV_8UC3, cvtColor, filteraD, imread, imshow, waitKey
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> np.ndarray:
# prepare kernel
# the kernel size have to be odd
if (ksize % 2) == 0:
SCREAMING_SNAKE_CASE_ : List[str] = ksize + 1
SCREAMING_SNAKE_CASE_ : str = np.zeros((ksize, ksize) , dtype=np.floataa )
# each value
for y in range(SCREAMING_SNAKE_CASE ):
for x in range(SCREAMING_SNAKE_CASE ):
# distance from center
SCREAMING_SNAKE_CASE_ : Any = x - ksize // 2
SCREAMING_SNAKE_CASE_ : Tuple = y - ksize // 2
# degree to radiant
SCREAMING_SNAKE_CASE_ : Optional[Any] = theta / 180 * np.pi
SCREAMING_SNAKE_CASE_ : List[str] = np.cos(_theta )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = np.sin(_theta )
# get kernel x
SCREAMING_SNAKE_CASE_ : int = cos_theta * px + sin_theta * py
# get kernel y
SCREAMING_SNAKE_CASE_ : Tuple = -sin_theta * px + cos_theta * py
# fill kernel
SCREAMING_SNAKE_CASE_ : List[Any] = np.exp(
-(_x**2 + gamma**2 * _y**2) / (2 * sigma**2) ) * np.cos(2 * np.pi * _x / lambd + psi )
return gabor
if __name__ == "__main__":
import doctest
doctest.testmod()
# read original image
lowerCAmelCase__: str = imread("../image_data/lena.jpg")
# turn image in gray scale value
lowerCAmelCase__: List[str] = cvtColor(img, COLOR_BGR2GRAY)
# Apply multiple Kernel to detect edges
lowerCAmelCase__: Optional[Any] = np.zeros(gray.shape[:2])
for theta in [0, 30, 60, 90, 120, 150]:
lowerCAmelCase__: Dict = gabor_filter_kernel(10, 8, theta, 10, 0, 0)
out += filteraD(gray, CV_8UC3, kernel_aa)
lowerCAmelCase__: Any = out / out.max() * 255
lowerCAmelCase__: Tuple = out.astype(np.uinta)
imshow("Original", gray)
imshow("Gabor filter with 20x20 mask and 6 directions", out)
waitKey(0)
| 311
| 1
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_realm import RealmTokenizer
lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
lowerCamelCase : Optional[int] = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
lowerCamelCase : List[str] = {
"vocab_file": {
"google/realm-cc-news-pretrained-embedder": (
"https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/vocab.txt"
),
"google/realm-cc-news-pretrained-encoder": (
"https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/vocab.txt"
),
"google/realm-cc-news-pretrained-scorer": (
"https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/vocab.txt"
),
"google/realm-cc-news-pretrained-openqa": (
"https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/vocab.txt"
),
"google/realm-orqa-nq-openqa": "https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/vocab.txt",
"google/realm-orqa-nq-reader": "https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/vocab.txt",
"google/realm-orqa-wq-openqa": "https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/vocab.txt",
"google/realm-orqa-wq-reader": "https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/vocab.txt",
},
"tokenizer_file": {
"google/realm-cc-news-pretrained-embedder": (
"https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/tokenizer.jsont"
),
"google/realm-cc-news-pretrained-encoder": (
"https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/tokenizer.json"
),
"google/realm-cc-news-pretrained-scorer": (
"https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/tokenizer.json"
),
"google/realm-cc-news-pretrained-openqa": (
"https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/tokenizer.json"
),
"google/realm-orqa-nq-openqa": (
"https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/tokenizer.json"
),
"google/realm-orqa-nq-reader": (
"https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/tokenizer.json"
),
"google/realm-orqa-wq-openqa": (
"https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/tokenizer.json"
),
"google/realm-orqa-wq-reader": (
"https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/tokenizer.json"
),
},
}
lowerCamelCase : str = {
"google/realm-cc-news-pretrained-embedder": 512,
"google/realm-cc-news-pretrained-encoder": 512,
"google/realm-cc-news-pretrained-scorer": 512,
"google/realm-cc-news-pretrained-openqa": 512,
"google/realm-orqa-nq-openqa": 512,
"google/realm-orqa-nq-reader": 512,
"google/realm-orqa-wq-openqa": 512,
"google/realm-orqa-wq-reader": 512,
}
lowerCamelCase : Dict = {
"google/realm-cc-news-pretrained-embedder": {"do_lower_case": True},
"google/realm-cc-news-pretrained-encoder": {"do_lower_case": True},
"google/realm-cc-news-pretrained-scorer": {"do_lower_case": True},
"google/realm-cc-news-pretrained-openqa": {"do_lower_case": True},
"google/realm-orqa-nq-openqa": {"do_lower_case": True},
"google/realm-orqa-nq-reader": {"do_lower_case": True},
"google/realm-orqa-wq-openqa": {"do_lower_case": True},
"google/realm-orqa-wq-reader": {"do_lower_case": True},
}
class A( UpperCamelCase ):
'''simple docstring'''
UpperCamelCase = VOCAB_FILES_NAMES
UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase = PRETRAINED_INIT_CONFIGURATION
UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase = RealmTokenizer
def __init__( self : int , A_ : str=None , A_ : Dict=None , A_ : str=True , A_ : Optional[Any]="[UNK]" , A_ : Union[str, Any]="[SEP]" , A_ : str="[PAD]" , A_ : Tuple="[CLS]" , A_ : List[str]="[MASK]" , A_ : Any=True , A_ : str=None , **A_ : Tuple , ) -> int:
"""simple docstring"""
super().__init__(
A_ , tokenizer_file=A_ , do_lower_case=A_ , unk_token=A_ , sep_token=A_ , pad_token=A_ , cls_token=A_ , mask_token=A_ , tokenize_chinese_chars=A_ , strip_accents=A_ , **A_ , )
lowerCamelCase_ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , A_ ) != do_lower_case
or normalizer_state.get('strip_accents' , A_ ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , A_ ) != tokenize_chinese_chars
):
lowerCamelCase_ = getattr(A_ , normalizer_state.pop('type' ) )
lowerCamelCase_ = do_lower_case
lowerCamelCase_ = strip_accents
lowerCamelCase_ = tokenize_chinese_chars
lowerCamelCase_ = normalizer_class(**A_ )
lowerCamelCase_ = do_lower_case
def a__ ( self : Tuple , A_ : Any , **A_ : str ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = PaddingStrategy.MAX_LENGTH
lowerCamelCase_ = text
lowerCamelCase_ = kwargs.pop('text_pair' , A_ )
lowerCamelCase_ = kwargs.pop('return_tensors' , A_ )
lowerCamelCase_ = {
'input_ids': [],
'attention_mask': [],
'token_type_ids': [],
}
for idx, candidate_text in enumerate(A_ ):
if batch_text_pair is not None:
lowerCamelCase_ = batch_text_pair[idx]
else:
lowerCamelCase_ = None
lowerCamelCase_ = super().__call__(A_ , A_ , return_tensors=A_ , **A_ )
lowerCamelCase_ = encoded_candidates.get('input_ids' )
lowerCamelCase_ = encoded_candidates.get('attention_mask' )
lowerCamelCase_ = encoded_candidates.get('token_type_ids' )
if encoded_input_ids is not None:
output_data["input_ids"].append(A_ )
if encoded_attention_mask is not None:
output_data["attention_mask"].append(A_ )
if encoded_token_type_ids is not None:
output_data["token_type_ids"].append(A_ )
lowerCamelCase_ = {key: item for key, item in output_data.items() if len(A_ ) != 0}
return BatchEncoding(A_ , tensor_type=A_ )
def a__ ( self : List[str] , A_ : Dict , A_ : List[str]=None ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def a__ ( self : Optional[Any] , A_ : List[int] , A_ : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
lowerCamelCase_ = [self.sep_token_id]
lowerCamelCase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def a__ ( self : Optional[Any] , A_ : str , A_ : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
lowerCamelCase_ = self._tokenizer.model.save(A_ , name=A_ )
return tuple(A_ )
| 70
|
import numpy as np
from nltk.translate import meteor_score
import datasets
from datasets.config import importlib_metadata, version
lowerCAmelCase = version.parse(importlib_metadata.version('''nltk'''))
if NLTK_VERSION >= version.Version('''3.6.4'''):
from nltk import word_tokenize
lowerCAmelCase = '''\
@inproceedings{banarjee2005,
title = {{METEOR}: An Automatic Metric for {MT} Evaluation with Improved Correlation with Human Judgments},
author = {Banerjee, Satanjeev and Lavie, Alon},
booktitle = {Proceedings of the {ACL} Workshop on Intrinsic and Extrinsic Evaluation Measures for Machine Translation and/or Summarization},
month = jun,
year = {2005},
address = {Ann Arbor, Michigan},
publisher = {Association for Computational Linguistics},
url = {https://www.aclweb.org/anthology/W05-0909},
pages = {65--72},
}
'''
lowerCAmelCase = '''\
METEOR, an automatic metric for machine translation evaluation
that is based on a generalized concept of unigram matching between the
machine-produced translation and human-produced reference translations.
Unigrams can be matched based on their surface forms, stemmed forms,
and meanings; furthermore, METEOR can be easily extended to include more
advanced matching strategies. Once all generalized unigram matches
between the two strings have been found, METEOR computes a score for
this matching using a combination of unigram-precision, unigram-recall, and
a measure of fragmentation that is designed to directly capture how
well-ordered the matched words in the machine translation are in relation
to the reference.
METEOR gets an R correlation value of 0.347 with human evaluation on the Arabic
data and 0.331 on the Chinese data. This is shown to be an improvement on
using simply unigram-precision, unigram-recall and their harmonic F1
combination.
'''
lowerCAmelCase = '''
Computes METEOR score of translated segments against one or more references.
Args:
predictions: list of predictions to score. Each prediction
should be a string with tokens separated by spaces.
references: list of reference for each prediction. Each
reference should be a string with tokens separated by spaces.
alpha: Parameter for controlling relative weights of precision and recall. default: 0.9
beta: Parameter for controlling shape of penalty as a function of fragmentation. default: 3
gamma: Relative weight assigned to fragmentation penalty. default: 0.5
Returns:
\'meteor\': meteor score.
Examples:
>>> meteor = datasets.load_metric(\'meteor\')
>>> predictions = ["It is a guide to action which ensures that the military always obeys the commands of the party"]
>>> references = ["It is a guide to action that ensures that the military will forever heed Party commands"]
>>> results = meteor.compute(predictions=predictions, references=references)
>>> print(round(results["meteor"], 4))
0.6944
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A ( datasets.Metric ):
def _A (self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Value('string' , id='sequence' ),
} ) , codebase_urls=['https://github.com/nltk/nltk/blob/develop/nltk/translate/meteor_score.py'] , reference_urls=[
'https://www.nltk.org/api/nltk.translate.html#module-nltk.translate.meteor_score',
'https://en.wikipedia.org/wiki/METEOR',
] , )
def _A (self , lowerCAmelCase ):
import nltk
nltk.download('wordnet' )
if NLTK_VERSION >= version.Version('3.6.5' ):
nltk.download('punkt' )
if NLTK_VERSION >= version.Version('3.6.6' ):
nltk.download('omw-1.4' )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=0.9 , lowerCAmelCase=3 , lowerCAmelCase=0.5 ):
if NLTK_VERSION >= version.Version('3.6.5' ):
__lowercase= [
meteor_score.single_meteor_score(
word_tokenize(lowerCAmelCase ) , word_tokenize(lowerCAmelCase ) , alpha=lowerCAmelCase , beta=lowerCAmelCase , gamma=lowerCAmelCase )
for ref, pred in zip(lowerCAmelCase , lowerCAmelCase )
]
else:
__lowercase= [
meteor_score.single_meteor_score(lowerCAmelCase , lowerCAmelCase , alpha=lowerCAmelCase , beta=lowerCAmelCase , gamma=lowerCAmelCase )
for ref, pred in zip(lowerCAmelCase , lowerCAmelCase )
]
return {"meteor": np.mean(lowerCAmelCase )}
| 230
| 0
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..models.auto import AutoProcessor
from ..models.vision_encoder_decoder import VisionEncoderDecoderModel
from ..utils import is_vision_available
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class __magic_name__ ( UpperCAmelCase_ ):
_SCREAMING_SNAKE_CASE : List[Any] = "naver-clova-ix/donut-base-finetuned-docvqa"
_SCREAMING_SNAKE_CASE : Optional[Any] = (
"This is a tool that answers a question about an document (pdf). It takes an input named `document` which "
"should be the document containing the information, as well as a `question` that is the question about the "
"document. It returns a text that contains the answer to the question."
)
_SCREAMING_SNAKE_CASE : Tuple = "document_qa"
_SCREAMING_SNAKE_CASE : str = AutoProcessor
_SCREAMING_SNAKE_CASE : Union[str, Any] = VisionEncoderDecoderModel
_SCREAMING_SNAKE_CASE : int = ["image", "text"]
_SCREAMING_SNAKE_CASE : Any = ["text"]
def __init__( self : Optional[int] , *snake_case_ : Tuple , **snake_case_ : Tuple ):
if not is_vision_available():
raise ValueError("Pillow must be installed to use the DocumentQuestionAnsweringTool." )
super().__init__(*_snake_case , **_snake_case )
def lowerCAmelCase ( self : Union[str, Any] , snake_case_ : "Image" , snake_case_ : str ):
__snake_case = "<s_docvqa><s_question>{user_input}</s_question><s_answer>"
__snake_case = task_prompt.replace("{user_input}" , _snake_case )
__snake_case = self.pre_processor.tokenizer(
_snake_case , add_special_tokens=_snake_case , return_tensors="pt" ).input_ids
__snake_case = self.pre_processor(_snake_case , return_tensors="pt" ).pixel_values
return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values}
def lowerCAmelCase ( self : int , snake_case_ : Optional[Any] ):
return self.model.generate(
inputs["pixel_values"].to(self.device ) , decoder_input_ids=inputs["decoder_input_ids"].to(self.device ) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=_snake_case , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=_snake_case , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=_snake_case , ).sequences
def lowerCAmelCase ( self : List[Any] , snake_case_ : List[str] ):
__snake_case = self.pre_processor.batch_decode(_snake_case )[0]
__snake_case = sequence.replace(self.pre_processor.tokenizer.eos_token , "" )
__snake_case = sequence.replace(self.pre_processor.tokenizer.pad_token , "" )
__snake_case = re.sub(r"<.*?>" , "" , _snake_case , count=1 ).strip() # remove first task start token
__snake_case = self.pre_processor.tokenajson(_snake_case )
return sequence["answer"]
| 708
|
"""simple docstring"""
import argparse
from transformers import (
TapasConfig,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasTokenizer,
load_tf_weights_in_tapas,
)
from transformers.utils import logging
logging.set_verbosity_info()
def __UpperCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> List[Any]:
"""simple docstring"""
__snake_case = TapasConfig.from_json_file(SCREAMING_SNAKE_CASE )
# set absolute/relative position embeddings parameter
__snake_case = reset_position_index_per_cell
# set remaining parameters of TapasConfig as well as the model based on the task
if task == "SQA":
__snake_case = TapasForQuestionAnswering(config=SCREAMING_SNAKE_CASE )
elif task == "WTQ":
# run_task_main.py hparams
__snake_case = 4
__snake_case = True
# hparam_utils.py hparams
__snake_case = 0.664_694
__snake_case = 0.207_951
__snake_case = 0.121_194
__snake_case = True
__snake_case = True
__snake_case = False
__snake_case = 0.0_352_513
__snake_case = TapasForQuestionAnswering(config=SCREAMING_SNAKE_CASE )
elif task == "WIKISQL_SUPERVISED":
# run_task_main.py hparams
__snake_case = 4
__snake_case = False
# hparam_utils.py hparams
__snake_case = 36.4_519
__snake_case = 0.903_421
__snake_case = 222.088
__snake_case = True
__snake_case = True
__snake_case = True
__snake_case = 0.763_141
__snake_case = TapasForQuestionAnswering(config=SCREAMING_SNAKE_CASE )
elif task == "TABFACT":
__snake_case = TapasForSequenceClassification(config=SCREAMING_SNAKE_CASE )
elif task == "MLM":
__snake_case = TapasForMaskedLM(config=SCREAMING_SNAKE_CASE )
elif task == "INTERMEDIATE_PRETRAINING":
__snake_case = TapasModel(config=SCREAMING_SNAKE_CASE )
else:
raise ValueError(F'''Task {task} not supported.''' )
print(F'''Building PyTorch model from configuration: {config}''' )
# Load weights from tf checkpoint
load_tf_weights_in_tapas(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Save pytorch-model (weights and configuration)
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
model.save_pretrained(SCREAMING_SNAKE_CASE )
# Save tokenizer files
print(F'''Save tokenizer files to {pytorch_dump_path}''' )
__snake_case = TapasTokenizer(vocab_file=tf_checkpoint_path[:-10] + "vocab.txt" , model_max_length=5_12 )
tokenizer.save_pretrained(SCREAMING_SNAKE_CASE )
print("Used relative position embeddings:" , model.config.reset_position_index_per_cell )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--task""", default="""SQA""", type=str, help="""Model task for which to convert a checkpoint. Defaults to SQA."""
)
parser.add_argument(
"""--reset_position_index_per_cell""",
default=False,
action="""store_true""",
help="""Whether to use relative position embeddings or not. Defaults to True.""",
)
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--tapas_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained TAPAS model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
_SCREAMING_SNAKE_CASE = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.task,
args.reset_position_index_per_cell,
args.tf_checkpoint_path,
args.tapas_config_file,
args.pytorch_dump_path,
)
| 614
| 0
|
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import DebertaVaConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
TFDebertaVaModel,
)
class UpperCAmelCase__ :
def __init__( self : str,__A : Any,__A : str=1_3,__A : List[Any]=7,__A : Optional[int]=True,__A : Any=True,__A : Optional[Any]=True,__A : int=True,__A : Dict=9_9,__A : List[Any]=3_2,__A : Dict=2,__A : Optional[Any]=4,__A : Tuple=3_7,__A : Optional[Any]="gelu",__A : Tuple=0.1,__A : Union[str, Any]=0.1,__A : Union[str, Any]=5_1_2,__A : Union[str, Any]=1_6,__A : Tuple=2,__A : Any=0.02,__A : Any=False,__A : int=True,__A : Optional[Any]="None",__A : Optional[int]=3,__A : Optional[Any]=4,__A : Any=None,):
_lowerCamelCase : List[Any] = parent
_lowerCamelCase : Dict = batch_size
_lowerCamelCase : Dict = seq_length
_lowerCamelCase : Optional[Any] = is_training
_lowerCamelCase : Optional[Any] = use_input_mask
_lowerCamelCase : Any = use_token_type_ids
_lowerCamelCase : str = use_labels
_lowerCamelCase : int = vocab_size
_lowerCamelCase : Optional[int] = hidden_size
_lowerCamelCase : Optional[int] = num_hidden_layers
_lowerCamelCase : str = num_attention_heads
_lowerCamelCase : List[Any] = intermediate_size
_lowerCamelCase : str = hidden_act
_lowerCamelCase : Optional[Any] = hidden_dropout_prob
_lowerCamelCase : Any = attention_probs_dropout_prob
_lowerCamelCase : Union[str, Any] = max_position_embeddings
_lowerCamelCase : Tuple = type_vocab_size
_lowerCamelCase : Optional[Any] = type_sequence_label_size
_lowerCamelCase : List[Any] = initializer_range
_lowerCamelCase : Tuple = num_labels
_lowerCamelCase : int = num_choices
_lowerCamelCase : List[Any] = relative_attention
_lowerCamelCase : int = position_biased_input
_lowerCamelCase : Any = pos_att_type
_lowerCamelCase : Optional[int] = scope
def lowerCamelCase_ ( self : Any ):
_lowerCamelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length],self.vocab_size )
_lowerCamelCase : List[Any] = None
if self.use_input_mask:
_lowerCamelCase : Any = random_attention_mask([self.batch_size, self.seq_length] )
_lowerCamelCase : List[str] = None
if self.use_token_type_ids:
_lowerCamelCase : Tuple = ids_tensor([self.batch_size, self.seq_length],self.type_vocab_size )
_lowerCamelCase : Any = None
_lowerCamelCase : Optional[Any] = None
_lowerCamelCase : str = None
if self.use_labels:
_lowerCamelCase : Optional[Any] = ids_tensor([self.batch_size],self.type_sequence_label_size )
_lowerCamelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length],self.num_labels )
_lowerCamelCase : int = DebertaVaConfig(
vocab_size=self.vocab_size,hidden_size=self.hidden_size,num_hidden_layers=self.num_hidden_layers,num_attention_heads=self.num_attention_heads,intermediate_size=self.intermediate_size,hidden_act=self.hidden_act,hidden_dropout_prob=self.hidden_dropout_prob,attention_probs_dropout_prob=self.attention_probs_dropout_prob,max_position_embeddings=self.max_position_embeddings,type_vocab_size=self.type_vocab_size,relative_attention=self.relative_attention,position_biased_input=self.position_biased_input,initializer_range=self.initializer_range,return_dict=SCREAMING_SNAKE_CASE_,)
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCamelCase_ ( self : int,__A : List[Any],__A : Optional[Any],__A : Optional[Any],__A : Optional[int],__A : List[Any],__A : Any,__A : Tuple ):
_lowerCamelCase : Optional[Any] = TFDebertaVaModel(config=SCREAMING_SNAKE_CASE_ )
_lowerCamelCase : Optional[int] = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
_lowerCamelCase : Dict = [input_ids, input_mask]
_lowerCamelCase : List[Any] = model(SCREAMING_SNAKE_CASE_ )
_lowerCamelCase : Optional[Any] = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.last_hidden_state.shape,(self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase_ ( self : List[Any],__A : Union[str, Any],__A : List[Any],__A : Dict,__A : List[Any],__A : Dict,__A : Optional[Any],__A : Dict ):
_lowerCamelCase : Any = TFDebertaVaForMaskedLM(config=SCREAMING_SNAKE_CASE_ )
_lowerCamelCase : int = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
_lowerCamelCase : Any = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase_ ( self : Optional[int],__A : List[Any],__A : Any,__A : str,__A : Union[str, Any],__A : Optional[Any],__A : Dict,__A : Union[str, Any] ):
_lowerCamelCase : Any = self.num_labels
_lowerCamelCase : Any = TFDebertaVaForSequenceClassification(config=SCREAMING_SNAKE_CASE_ )
_lowerCamelCase : List[str] = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
_lowerCamelCase : List[str] = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.num_labels) )
def lowerCamelCase_ ( self : Tuple,__A : List[Any],__A : Tuple,__A : Optional[Any],__A : Optional[Any],__A : Union[str, Any],__A : List[Any],__A : List[str] ):
_lowerCamelCase : Tuple = self.num_labels
_lowerCamelCase : Union[str, Any] = TFDebertaVaForTokenClassification(config=SCREAMING_SNAKE_CASE_ )
_lowerCamelCase : Optional[int] = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
_lowerCamelCase : Union[str, Any] = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.seq_length, self.num_labels) )
def lowerCamelCase_ ( self : Optional[int],__A : Any,__A : List[str],__A : int,__A : Optional[Any],__A : int,__A : Tuple,__A : Tuple ):
_lowerCamelCase : Dict = TFDebertaVaForQuestionAnswering(config=SCREAMING_SNAKE_CASE_ )
_lowerCamelCase : List[Any] = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
_lowerCamelCase : Tuple = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.start_logits.shape,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape,(self.batch_size, self.seq_length) )
def lowerCamelCase_ ( self : Optional[Any] ):
_lowerCamelCase : Tuple = self.prepare_config_and_inputs()
(
_lowerCamelCase
) : Optional[Any] = config_and_inputs
_lowerCamelCase : List[Any] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class UpperCAmelCase__ ( A , A , unittest.TestCase ):
lowerCAmelCase_ = (
(
TFDebertaVaModel,
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
)
if is_tf_available()
else ()
)
lowerCAmelCase_ = (
{
'''feature-extraction''': TFDebertaVaModel,
'''fill-mask''': TFDebertaVaForMaskedLM,
'''question-answering''': TFDebertaVaForQuestionAnswering,
'''text-classification''': TFDebertaVaForSequenceClassification,
'''token-classification''': TFDebertaVaForTokenClassification,
'''zero-shot''': TFDebertaVaForSequenceClassification,
}
if is_tf_available()
else {}
)
lowerCAmelCase_ = False
lowerCAmelCase_ = False
def lowerCamelCase_ ( self : Optional[int] ):
_lowerCamelCase : Optional[int] = TFDebertaVaModelTester(self )
_lowerCamelCase : Dict = ConfigTester(self,config_class=SCREAMING_SNAKE_CASE_,hidden_size=3_7 )
def lowerCamelCase_ ( self : Optional[Any] ):
self.config_tester.run_common_tests()
def lowerCamelCase_ ( self : Any ):
_lowerCamelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
def lowerCamelCase_ ( self : Union[str, Any] ):
_lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*SCREAMING_SNAKE_CASE_ )
def lowerCamelCase_ ( self : List[str] ):
_lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*SCREAMING_SNAKE_CASE_ )
def lowerCamelCase_ ( self : int ):
_lowerCamelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*SCREAMING_SNAKE_CASE_ )
def lowerCamelCase_ ( self : Union[str, Any] ):
_lowerCamelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*SCREAMING_SNAKE_CASE_ )
@slow
def lowerCamelCase_ ( self : Optional[Any] ):
_lowerCamelCase : Any = TFDebertaVaModel.from_pretrained("kamalkraj/deberta-v2-xlarge" )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
@require_tf
class UpperCAmelCase__ ( unittest.TestCase ):
@unittest.skip(reason="Model not available yet" )
def lowerCamelCase_ ( self : List[str] ):
pass
@slow
def lowerCamelCase_ ( self : List[str] ):
_lowerCamelCase : Dict = TFDebertaVaModel.from_pretrained("kamalkraj/deberta-v2-xlarge" )
_lowerCamelCase : Dict = tf.constant([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] )
_lowerCamelCase : Tuple = tf.constant([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
_lowerCamelCase : Optional[Any] = model(SCREAMING_SNAKE_CASE_,attention_mask=SCREAMING_SNAKE_CASE_ )[0]
_lowerCamelCase : Optional[Any] = tf.constant(
[[[0.2356, 0.1948, 0.0369], [-0.1063, 0.3586, -0.5152], [-0.6399, -0.0259, -0.2525]]] )
tf.debugging.assert_near(output[:, 1:4, 1:4],SCREAMING_SNAKE_CASE_,atol=1e-4 )
| 44
|
from __future__ import annotations
def lowercase ( __A : list ) -> float:
'''simple docstring'''
if not nums:
raise ValueError("""List is empty""" )
return sum(__A ) / len(__A )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 36
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_A : List[Any] = logging.get_logger(__name__)
_A : int = {
'''google/bit-50''': '''https://huggingface.co/google/bit-50/resolve/main/config.json''',
}
class a ( _UpperCAmelCase ,_UpperCAmelCase ):
UpperCAmelCase__ : Optional[int] = "bit"
UpperCAmelCase__ : int = ["preactivation", "bottleneck"]
UpperCAmelCase__ : int = ["SAME", "VALID"]
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : List[str]=3 , SCREAMING_SNAKE_CASE_ : Tuple=64 , SCREAMING_SNAKE_CASE_ : List[str]=[256, 512, 1024, 2048] , SCREAMING_SNAKE_CASE_ : Dict=[3, 4, 6, 3] , SCREAMING_SNAKE_CASE_ : Any="preactivation" , SCREAMING_SNAKE_CASE_ : Optional[Any]="relu" , SCREAMING_SNAKE_CASE_ : Any=None , SCREAMING_SNAKE_CASE_ : List[Any]=32 , SCREAMING_SNAKE_CASE_ : Optional[Any]=0.0 , SCREAMING_SNAKE_CASE_ : Tuple=False , SCREAMING_SNAKE_CASE_ : str=32 , SCREAMING_SNAKE_CASE_ : Any=1 , SCREAMING_SNAKE_CASE_ : int=None , SCREAMING_SNAKE_CASE_ : List[Any]=None , **SCREAMING_SNAKE_CASE_ : Dict , ):
super().__init__(**SCREAMING_SNAKE_CASE_ )
if layer_type not in self.layer_types:
raise ValueError(F'layer_type={layer_type} is not one of {",".join(self.layer_types )}' )
if global_padding is not None:
if global_padding.upper() in self.supported_padding:
__lowerCamelCase: Optional[int] = global_padding.upper()
else:
raise ValueError(F'Padding strategy {global_padding} not supported' )
__lowerCamelCase: str = num_channels
__lowerCamelCase: List[Any] = embedding_size
__lowerCamelCase: Any = hidden_sizes
__lowerCamelCase: Optional[int] = depths
__lowerCamelCase: Union[str, Any] = layer_type
__lowerCamelCase: int = hidden_act
__lowerCamelCase: List[Any] = global_padding
__lowerCamelCase: Any = num_groups
__lowerCamelCase: Union[str, Any] = drop_path_rate
__lowerCamelCase: Union[str, Any] = embedding_dynamic_padding
__lowerCamelCase: Optional[Any] = output_stride
__lowerCamelCase: str = width_factor
__lowerCamelCase: Union[str, Any] = ["""stem"""] + [F'stage{idx}' for idx in range(1 , len(SCREAMING_SNAKE_CASE_ ) + 1 )]
__lowerCamelCase , __lowerCamelCase: List[str] = get_aligned_output_features_output_indices(
out_features=SCREAMING_SNAKE_CASE_ , out_indices=SCREAMING_SNAKE_CASE_ , stage_names=self.stage_names )
| 189
|
import json
import os
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding, EncodedInput
from ...utils import PaddingStrategy, logging
_A : List[str] = logging.get_logger(__name__)
_A : Any = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt'''}
# See all LED models at https://huggingface.co/models?filter=LED
_A : int = {
'''vocab_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json''',
},
'''merges_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json''',
},
}
_A : Dict = {
'''allenai/led-base-16384''': 16_384,
}
@lru_cache()
# Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode
def __lowerCAmelCase ( ) -> List[Any]:
__lowerCamelCase: Optional[Any] = (
list(range(ord("""!""" ) , ord("""~""" ) + 1 ) ) + list(range(ord("""¡""" ) , ord("""¬""" ) + 1 ) ) + list(range(ord("""®""" ) , ord("""ÿ""" ) + 1 ) )
)
__lowerCamelCase: List[str] = bs[:]
__lowerCamelCase: Tuple = 0
for b in range(2**8 ):
if b not in bs:
bs.append(snake_case )
cs.append(2**8 + n )
n += 1
__lowerCamelCase: List[Any] = [chr(snake_case ) for n in cs]
return dict(zip(snake_case , snake_case ) )
def __lowerCAmelCase ( snake_case : List[str] ) -> str:
__lowerCamelCase: int = set()
__lowerCamelCase: str = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__lowerCamelCase: Any = char
return pairs
class a ( _UpperCAmelCase ):
UpperCAmelCase__ : Optional[Any] = VOCAB_FILES_NAMES
UpperCAmelCase__ : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ : List[str] = ["input_ids", "attention_mask"]
def __init__( self : List[str] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Dict="replace" , SCREAMING_SNAKE_CASE_ : Dict="<s>" , SCREAMING_SNAKE_CASE_ : Optional[int]="</s>" , SCREAMING_SNAKE_CASE_ : int="</s>" , SCREAMING_SNAKE_CASE_ : Any="<s>" , SCREAMING_SNAKE_CASE_ : Union[str, Any]="<unk>" , SCREAMING_SNAKE_CASE_ : Tuple="<pad>" , SCREAMING_SNAKE_CASE_ : Dict="<mask>" , SCREAMING_SNAKE_CASE_ : int=False , **SCREAMING_SNAKE_CASE_ : List[str] , ):
__lowerCamelCase: Tuple = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else bos_token
__lowerCamelCase: Union[str, Any] = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else eos_token
__lowerCamelCase: List[Any] = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else sep_token
__lowerCamelCase: Dict = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else cls_token
__lowerCamelCase: Any = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else unk_token
__lowerCamelCase: Tuple = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
__lowerCamelCase: List[str] = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else mask_token
super().__init__(
errors=SCREAMING_SNAKE_CASE_ , bos_token=SCREAMING_SNAKE_CASE_ , eos_token=SCREAMING_SNAKE_CASE_ , unk_token=SCREAMING_SNAKE_CASE_ , sep_token=SCREAMING_SNAKE_CASE_ , cls_token=SCREAMING_SNAKE_CASE_ , pad_token=SCREAMING_SNAKE_CASE_ , mask_token=SCREAMING_SNAKE_CASE_ , add_prefix_space=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
with open(SCREAMING_SNAKE_CASE_ , encoding="""utf-8""" ) as vocab_handle:
__lowerCamelCase: str = json.load(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase: Optional[Any] = {v: k for k, v in self.encoder.items()}
__lowerCamelCase: Any = errors # how to handle errors in decoding
__lowerCamelCase: List[Any] = bytes_to_unicode()
__lowerCamelCase: Optional[int] = {v: k for k, v in self.byte_encoder.items()}
with open(SCREAMING_SNAKE_CASE_ , encoding="""utf-8""" ) as merges_handle:
__lowerCamelCase: List[Any] = merges_handle.read().split("""\n""" )[1:-1]
__lowerCamelCase: Tuple = [tuple(merge.split() ) for merge in bpe_merges]
__lowerCamelCase: str = dict(zip(SCREAMING_SNAKE_CASE_ , range(len(SCREAMING_SNAKE_CASE_ ) ) ) )
__lowerCamelCase: List[str] = {}
__lowerCamelCase: Optional[int] = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
__lowerCamelCase: str = re.compile(R"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""" )
@property
# Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size
def SCREAMING_SNAKE_CASE__ ( self : Any ):
return len(self.encoder )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
return dict(self.encoder , **self.added_tokens_encoder )
def SCREAMING_SNAKE_CASE__ ( self : List[str] , SCREAMING_SNAKE_CASE_ : int ):
if token in self.cache:
return self.cache[token]
__lowerCamelCase: List[str] = tuple(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase: List[Any] = get_pairs(SCREAMING_SNAKE_CASE_ )
if not pairs:
return token
while True:
__lowerCamelCase: List[str] = min(SCREAMING_SNAKE_CASE_ , key=lambda SCREAMING_SNAKE_CASE_ : self.bpe_ranks.get(SCREAMING_SNAKE_CASE_ , float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
__lowerCamelCase , __lowerCamelCase: List[str] = bigram
__lowerCamelCase: Optional[int] = []
__lowerCamelCase: Optional[Any] = 0
while i < len(SCREAMING_SNAKE_CASE_ ):
try:
__lowerCamelCase: int = word.index(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
__lowerCamelCase: Tuple = j
if word[i] == first and i < len(SCREAMING_SNAKE_CASE_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
__lowerCamelCase: Optional[int] = tuple(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase: Any = new_word
if len(SCREAMING_SNAKE_CASE_ ) == 1:
break
else:
__lowerCamelCase: Tuple = get_pairs(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase: Dict = """ """.join(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase: Any = word
return word
def SCREAMING_SNAKE_CASE__ ( self : int , SCREAMING_SNAKE_CASE_ : Any ):
__lowerCamelCase: Any = []
for token in re.findall(self.pat , SCREAMING_SNAKE_CASE_ ):
__lowerCamelCase: int = """""".join(
self.byte_encoder[b] for b in token.encode("""utf-8""" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(SCREAMING_SNAKE_CASE_ ).split(""" """ ) )
return bpe_tokens
def SCREAMING_SNAKE_CASE__ ( self : str , SCREAMING_SNAKE_CASE_ : Tuple ):
return self.encoder.get(SCREAMING_SNAKE_CASE_ , self.encoder.get(self.unk_token ) )
def SCREAMING_SNAKE_CASE__ ( self : Any , SCREAMING_SNAKE_CASE_ : str ):
return self.decoder.get(SCREAMING_SNAKE_CASE_ )
def SCREAMING_SNAKE_CASE__ ( self : str , SCREAMING_SNAKE_CASE_ : Union[str, Any] ):
__lowerCamelCase: Any = """""".join(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase: str = bytearray([self.byte_decoder[c] for c in text] ).decode("""utf-8""" , errors=self.errors )
return text
def SCREAMING_SNAKE_CASE__ ( self : Any , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[str] = None ):
if not os.path.isdir(SCREAMING_SNAKE_CASE_ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
__lowerCamelCase: Dict = os.path.join(
SCREAMING_SNAKE_CASE_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
__lowerCamelCase: str = os.path.join(
SCREAMING_SNAKE_CASE_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(SCREAMING_SNAKE_CASE_ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=SCREAMING_SNAKE_CASE_ , ensure_ascii=SCREAMING_SNAKE_CASE_ ) + """\n""" )
__lowerCamelCase: List[str] = 0
with open(SCREAMING_SNAKE_CASE_ , """w""" , encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda SCREAMING_SNAKE_CASE_ : kv[1] ):
if index != token_index:
logger.warning(
F'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'
""" Please check that the tokenizer is not corrupted!""" )
__lowerCamelCase: Tuple = token_index
writer.write(""" """.join(SCREAMING_SNAKE_CASE_ ) + """\n""" )
index += 1
return vocab_file, merge_file
def SCREAMING_SNAKE_CASE__ ( self : Dict , SCREAMING_SNAKE_CASE_ : List[int] , SCREAMING_SNAKE_CASE_ : Optional[List[int]] = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__lowerCamelCase: List[str] = [self.cls_token_id]
__lowerCamelCase: int = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE__ ( self : int , SCREAMING_SNAKE_CASE_ : List[int] , SCREAMING_SNAKE_CASE_ : Optional[List[int]] = None , SCREAMING_SNAKE_CASE_ : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=SCREAMING_SNAKE_CASE_ , token_ids_a=SCREAMING_SNAKE_CASE_ , already_has_special_tokens=SCREAMING_SNAKE_CASE_ )
if token_ids_a is None:
return [1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1]
return [1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1, 1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1]
def SCREAMING_SNAKE_CASE__ ( self : Any , SCREAMING_SNAKE_CASE_ : List[int] , SCREAMING_SNAKE_CASE_ : Optional[List[int]] = None ):
__lowerCamelCase: int = [self.sep_token_id]
__lowerCamelCase: Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Optional[Any]=False , **SCREAMING_SNAKE_CASE_ : Union[str, Any] ):
__lowerCamelCase: Any = kwargs.pop("""add_prefix_space""" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(SCREAMING_SNAKE_CASE_ ) > 0 and not text[0].isspace()):
__lowerCamelCase: Dict = """ """ + text
return (text, kwargs)
def SCREAMING_SNAKE_CASE__ ( self : Dict , SCREAMING_SNAKE_CASE_ : Union[Dict[str, EncodedInput], BatchEncoding] , SCREAMING_SNAKE_CASE_ : Optional[int] = None , SCREAMING_SNAKE_CASE_ : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , SCREAMING_SNAKE_CASE_ : Optional[int] = None , SCREAMING_SNAKE_CASE_ : Optional[bool] = None , ):
__lowerCamelCase: Optional[Any] = super()._pad(
encoded_inputs=SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , padding_strategy=SCREAMING_SNAKE_CASE_ , pad_to_multiple_of=SCREAMING_SNAKE_CASE_ , return_attention_mask=SCREAMING_SNAKE_CASE_ , )
# Load from model defaults
if return_attention_mask is None:
__lowerCamelCase: Optional[Any] = """attention_mask""" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
__lowerCamelCase: List[str] = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
__lowerCamelCase: str = len(encoded_inputs["""global_attention_mask"""] ) != len(SCREAMING_SNAKE_CASE_ )
if needs_to_be_padded:
__lowerCamelCase: Union[str, Any] = len(SCREAMING_SNAKE_CASE_ ) - len(encoded_inputs["""global_attention_mask"""] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
__lowerCamelCase: str = (
encoded_inputs["""global_attention_mask"""] + [-1] * difference
)
elif self.padding_side == "left":
__lowerCamelCase: Optional[int] = [-1] * difference + encoded_inputs[
"""global_attention_mask"""
]
else:
raise ValueError("""Invalid padding strategy:""" + str(self.padding_side ) )
return encoded_inputs
| 189
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowercase = {
'''configuration_time_series_transformer''': [
'''TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''TimeSeriesTransformerConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
'''TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TimeSeriesTransformerForPrediction''',
'''TimeSeriesTransformerModel''',
'''TimeSeriesTransformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TimeSeriesTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimeSeriesTransformerForPrediction,
TimeSeriesTransformerModel,
TimeSeriesTransformerPreTrainedModel,
)
else:
import sys
_lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 659
|
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
PNDMScheduler,
StableDiffusionLDMaDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import nightly, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
enable_full_determinism()
class __snake_case ( unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ = StableDiffusionLDMaDPipeline
UpperCamelCase_ = TEXT_TO_IMAGE_PARAMS
UpperCamelCase_ = TEXT_TO_IMAGE_BATCH_PARAMS
UpperCamelCase_ = TEXT_TO_IMAGE_IMAGE_PARAMS
def UpperCAmelCase_ ( self : Tuple ) -> str:
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase_ : Optional[Any] = UNetaDConditionModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") ,up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") ,cross_attention_dim=32 ,)
lowerCAmelCase_ : Any = DDIMScheduler(
beta_start=0.00_085 ,beta_end=0.012 ,beta_schedule="scaled_linear" ,clip_sample=lowerCAmelCase__ ,set_alpha_to_one=lowerCAmelCase__ ,)
torch.manual_seed(0 )
lowerCAmelCase_ : str = AutoencoderKL(
block_out_channels=[32, 64] ,in_channels=6 ,out_channels=6 ,down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] ,up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] ,latent_channels=4 ,)
torch.manual_seed(0 )
lowerCAmelCase_ : Optional[Any] = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1e-0_5 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=10_00 ,)
lowerCAmelCase_ : Optional[int] = CLIPTextModel(lowerCAmelCase__ )
lowerCAmelCase_ : Dict = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
lowerCAmelCase_ : List[Any] = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def UpperCAmelCase_ ( self : Tuple ,lowerCAmelCase__ : List[Any] ,lowerCAmelCase__ : List[str]=0 ) -> Dict:
'''simple docstring'''
if str(lowerCAmelCase__ ).startswith("mps" ):
lowerCAmelCase_ : Optional[int] = torch.manual_seed(lowerCAmelCase__ )
else:
lowerCAmelCase_ : Dict = torch.Generator(device=lowerCAmelCase__ ).manual_seed(lowerCAmelCase__ )
lowerCAmelCase_ : str = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def UpperCAmelCase_ ( self : Any ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase_ : Dict = "cpu" # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase_ : List[str] = self.get_dummy_components()
lowerCAmelCase_ : Union[str, Any] = StableDiffusionLDMaDPipeline(**lowerCAmelCase__ )
lowerCAmelCase_ : List[Any] = ldmad_pipe.to(lowerCAmelCase__ )
ldmad_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
lowerCAmelCase_ : Any = self.get_dummy_inputs(lowerCAmelCase__ )
lowerCAmelCase_ : Union[str, Any] = ldmad_pipe(**lowerCAmelCase__ )
lowerCAmelCase_ , lowerCAmelCase_ : Any = output.rgb, output.depth
lowerCAmelCase_ : Dict = rgb[0, -3:, -3:, -1]
lowerCAmelCase_ : Tuple = depth[0, -3:, -1]
assert rgb.shape == (1, 64, 64, 3)
assert depth.shape == (1, 64, 64)
lowerCAmelCase_ : Optional[Any] = np.array(
[0.37_338_176, 0.70_247, 0.74_203_193, 0.51_643_604, 0.58_256_793, 0.60_932_136, 0.4_181_095, 0.48_355_877, 0.46_535_262] )
lowerCAmelCase_ : Tuple = np.array([103.46_727, 85.812_004, 87.849_236] )
assert np.abs(image_slice_rgb.flatten() - expected_slice_rgb ).max() < 1e-2
assert np.abs(image_slice_depth.flatten() - expected_slice_depth ).max() < 1e-2
def UpperCAmelCase_ ( self : int ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase_ : Dict = self.get_dummy_components()
lowerCAmelCase_ : List[str] = StableDiffusionLDMaDPipeline(**lowerCAmelCase__ )
lowerCAmelCase_ : List[Any] = ldmad_pipe.to(lowerCAmelCase__ )
ldmad_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
lowerCAmelCase_ : Union[str, Any] = self.get_dummy_inputs(lowerCAmelCase__ )
lowerCAmelCase_ : str = 3 * [inputs["prompt"]]
# forward
lowerCAmelCase_ : Union[str, Any] = ldmad_pipe(**lowerCAmelCase__ )
lowerCAmelCase_ , lowerCAmelCase_ : Optional[Any] = output.rgb, output.depth
lowerCAmelCase_ : str = rgb_slice_a[0, -3:, -3:, -1]
lowerCAmelCase_ : List[str] = depth_slice_a[0, -3:, -1]
lowerCAmelCase_ : Union[str, Any] = self.get_dummy_inputs(lowerCAmelCase__ )
lowerCAmelCase_ : Tuple = 3 * [inputs.pop("prompt" )]
lowerCAmelCase_ : str = ldmad_pipe.tokenizer(
lowerCAmelCase__ ,padding="max_length" ,max_length=ldmad_pipe.tokenizer.model_max_length ,truncation=lowerCAmelCase__ ,return_tensors="pt" ,)
lowerCAmelCase_ : Union[str, Any] = text_inputs["input_ids"].to(lowerCAmelCase__ )
lowerCAmelCase_ : Optional[int] = ldmad_pipe.text_encoder(lowerCAmelCase__ )[0]
lowerCAmelCase_ : Optional[int] = prompt_embeds
# forward
lowerCAmelCase_ : str = ldmad_pipe(**lowerCAmelCase__ )
lowerCAmelCase_ , lowerCAmelCase_ : str = output.rgb, output.depth
lowerCAmelCase_ : Optional[Any] = rgb_slice_a[0, -3:, -3:, -1]
lowerCAmelCase_ : Tuple = depth_slice_a[0, -3:, -1]
assert np.abs(rgb_slice_a.flatten() - rgb_slice_a.flatten() ).max() < 1e-4
assert np.abs(depth_slice_a.flatten() - depth_slice_a.flatten() ).max() < 1e-4
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
lowerCAmelCase_ : Any = "cpu" # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase_ : Optional[int] = self.get_dummy_components()
lowerCAmelCase_ : Dict = PNDMScheduler(skip_prk_steps=lowerCAmelCase__ )
lowerCAmelCase_ : Union[str, Any] = StableDiffusionLDMaDPipeline(**lowerCAmelCase__ )
lowerCAmelCase_ : Any = ldmad_pipe.to(lowerCAmelCase__ )
ldmad_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
lowerCAmelCase_ : List[str] = self.get_dummy_inputs(lowerCAmelCase__ )
lowerCAmelCase_ : List[Any] = "french fries"
lowerCAmelCase_ : Optional[int] = ldmad_pipe(**lowerCAmelCase__ ,negative_prompt=lowerCAmelCase__ )
lowerCAmelCase_ , lowerCAmelCase_ : Union[str, Any] = output.rgb, output.depth
lowerCAmelCase_ : Any = rgb[0, -3:, -3:, -1]
lowerCAmelCase_ : Tuple = depth[0, -3:, -1]
assert rgb.shape == (1, 64, 64, 3)
assert depth.shape == (1, 64, 64)
lowerCAmelCase_ : int = np.array(
[0.37_044, 0.71_811_503, 0.7_223_251, 0.48_603_675, 0.5_638_391, 0.6_364_948, 0.42_833_704, 0.4_901_315, 0.47_926_217] )
lowerCAmelCase_ : Union[str, Any] = np.array([107.84_738, 84.62_802, 89.962_135] )
assert np.abs(rgb_slice.flatten() - expected_slice_rgb ).max() < 1e-2
assert np.abs(depth_slice.flatten() - expected_slice_depth ).max() < 1e-2
@slow
@require_torch_gpu
class __snake_case ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase_ ( self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase_ ( self : Any ,lowerCAmelCase__ : Tuple ,lowerCAmelCase__ : Dict="cpu" ,lowerCAmelCase__ : Union[str, Any]=torch.floataa ,lowerCAmelCase__ : List[str]=0 ) -> int:
'''simple docstring'''
lowerCAmelCase_ : Any = torch.Generator(device=lowerCAmelCase__ ).manual_seed(lowerCAmelCase__ )
lowerCAmelCase_ : List[str] = np.random.RandomState(lowerCAmelCase__ ).standard_normal((1, 4, 64, 64) )
lowerCAmelCase_ : Optional[Any] = torch.from_numpy(lowerCAmelCase__ ).to(device=lowerCAmelCase__ ,dtype=lowerCAmelCase__ )
lowerCAmelCase_ : Union[str, Any] = {
"prompt": "a photograph of an astronaut riding a horse",
"latents": latents,
"generator": generator,
"num_inference_steps": 3,
"guidance_scale": 7.5,
"output_type": "numpy",
}
return inputs
def UpperCAmelCase_ ( self : List[Any] ) -> List[str]:
'''simple docstring'''
lowerCAmelCase_ : Optional[Any] = StableDiffusionLDMaDPipeline.from_pretrained("Intel/ldm3d" )
lowerCAmelCase_ : List[str] = ldmad_pipe.to(lowerCAmelCase__ )
ldmad_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
lowerCAmelCase_ : Dict = self.get_inputs(lowerCAmelCase__ )
lowerCAmelCase_ : List[str] = ldmad_pipe(**lowerCAmelCase__ )
lowerCAmelCase_ , lowerCAmelCase_ : Dict = output.rgb, output.depth
lowerCAmelCase_ : List[str] = rgb[0, -3:, -3:, -1].flatten()
lowerCAmelCase_ : Optional[int] = rgb[0, -3:, -1].flatten()
assert rgb.shape == (1, 5_12, 5_12, 3)
assert depth.shape == (1, 5_12, 5_12)
lowerCAmelCase_ : int = np.array(
[0.53_805_465, 0.56_707_305, 0.5_486_515, 0.57_012_236, 0.5_814_511, 0.56_253_487, 0.54_843_014, 0.55_092_263, 0.6_459_706] )
lowerCAmelCase_ : Optional[Any] = np.array(
[0.9_263_781, 0.6_678_672, 0.5_486_515, 0.92_202_145, 0.67_831_135, 0.56_253_487, 0.9_241_694, 0.7_551_478, 0.6_459_706] )
assert np.abs(rgb_slice - expected_slice_rgb ).max() < 3e-3
assert np.abs(depth_slice - expected_slice_depth ).max() < 3e-3
@nightly
@require_torch_gpu
class __snake_case ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase_ ( self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase_ ( self : Tuple ,lowerCAmelCase__ : Tuple ,lowerCAmelCase__ : Dict="cpu" ,lowerCAmelCase__ : List[str]=torch.floataa ,lowerCAmelCase__ : Optional[int]=0 ) -> int:
'''simple docstring'''
lowerCAmelCase_ : Dict = torch.Generator(device=lowerCAmelCase__ ).manual_seed(lowerCAmelCase__ )
lowerCAmelCase_ : Tuple = np.random.RandomState(lowerCAmelCase__ ).standard_normal((1, 4, 64, 64) )
lowerCAmelCase_ : Any = torch.from_numpy(lowerCAmelCase__ ).to(device=lowerCAmelCase__ ,dtype=lowerCAmelCase__ )
lowerCAmelCase_ : int = {
"prompt": "a photograph of an astronaut riding a horse",
"latents": latents,
"generator": generator,
"num_inference_steps": 50,
"guidance_scale": 7.5,
"output_type": "numpy",
}
return inputs
def UpperCAmelCase_ ( self : Dict ) -> int:
'''simple docstring'''
lowerCAmelCase_ : List[Any] = StableDiffusionLDMaDPipeline.from_pretrained("Intel/ldm3d" ).to(lowerCAmelCase__ )
ldmad_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
lowerCAmelCase_ : Union[str, Any] = self.get_inputs(lowerCAmelCase__ )
lowerCAmelCase_ : Union[str, Any] = ldmad_pipe(**lowerCAmelCase__ )
lowerCAmelCase_ , lowerCAmelCase_ : Any = output.rgb, output.depth
lowerCAmelCase_ : Dict = 0.495_586
lowerCAmelCase_ : Optional[Any] = 0.33_795_515
lowerCAmelCase_ : Any = 112.48_518
lowerCAmelCase_ : List[Any] = 98.489_746
assert np.abs(expected_rgb_mean - rgb.mean() ) < 1e-3
assert np.abs(expected_rgb_std - rgb.std() ) < 1e-3
assert np.abs(expected_depth_mean - depth.mean() ) < 1e-3
assert np.abs(expected_depth_std - depth.std() ) < 1e-3
def UpperCAmelCase_ ( self : Tuple ) -> List[str]:
'''simple docstring'''
lowerCAmelCase_ : int = StableDiffusionLDMaDPipeline.from_pretrained("Intel/ldm3d-4c" ).to(lowerCAmelCase__ )
ldmad_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
lowerCAmelCase_ : str = self.get_inputs(lowerCAmelCase__ )
lowerCAmelCase_ : Tuple = ldmad_pipe(**lowerCAmelCase__ )
lowerCAmelCase_ , lowerCAmelCase_ : Tuple = output.rgb, output.depth
lowerCAmelCase_ : List[str] = 0.4_194_127
lowerCAmelCase_ : List[str] = 0.35_375_586
lowerCAmelCase_ : str = 0.5_638_502
lowerCAmelCase_ : Optional[Any] = 0.34_686_103
assert rgb.shape == (1, 5_12, 5_12, 3)
assert depth.shape == (1, 5_12, 5_12, 1)
assert np.abs(expected_rgb_mean - rgb.mean() ) < 1e-3
assert np.abs(expected_rgb_std - rgb.std() ) < 1e-3
assert np.abs(expected_depth_mean - depth.mean() ) < 1e-3
assert np.abs(expected_depth_std - depth.std() ) < 1e-3
| 659
| 1
|
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
UpperCAmelCase_ =subprocess.check_output("""git merge-base main HEAD""".split()).decode("""utf-8""")
UpperCAmelCase_ =(
subprocess.check_output(F'''git diff --diff-filter=d --name-only {fork_point_sha}'''.split()).decode("""utf-8""").split()
)
UpperCAmelCase_ ="""|""".join(sys.argv[1:])
UpperCAmelCase_ =re.compile(RF'''^({joined_dirs}).*?\.py$''')
UpperCAmelCase_ =[x for x in modified_files if regex.match(x)]
print(""" """.join(relevant_modified_files), end="""""")
| 719
|
from collections import Counter
from pathlib import Path
from typing import Optional, Tuple
import yaml
class __UpperCamelCase ( yaml.SafeLoader ):
'''simple docstring'''
def __snake_case ( self , UpperCAmelCase_ ):
lowerCAmelCase = [self.constructed_objects[key_node] for key_node, _ in node.value]
lowerCAmelCase = [tuple(UpperCAmelCase_ ) if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) else key for key in keys]
lowerCAmelCase = Counter(UpperCAmelCase_ )
lowerCAmelCase = [key for key in counter if counter[key] > 1]
if duplicate_keys:
raise TypeError(F"""Got duplicate yaml keys: {duplicate_keys}""" )
def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_=False ):
lowerCAmelCase = super().construct_mapping(UpperCAmelCase_ , deep=UpperCAmelCase_ )
self._check_no_duplicates_on_constructed_node(UpperCAmelCase_ )
return mapping
def UpperCAmelCase ( _snake_case ):
lowerCAmelCase = list(readme_content.splitlines() )
if full_content and full_content[0] == "---" and "---" in full_content[1:]:
lowerCAmelCase = full_content[1:].index('''---''' ) + 1
lowerCAmelCase = '''\n'''.join(full_content[1:sep_idx] )
return yamlblock, "\n".join(full_content[sep_idx + 1 :] )
return None, "\n".join(_snake_case )
class __UpperCamelCase ( __UpperCAmelCase ):
'''simple docstring'''
__a : Any ={"""train_eval_index"""} # train-eval-index in the YAML metadata
@classmethod
def __snake_case ( cls , UpperCAmelCase_ ):
with open(UpperCAmelCase_ , encoding='''utf-8''' ) as readme_file:
lowerCAmelCase , lowerCAmelCase = _split_yaml_from_readme(readme_file.read() )
if yaml_string is not None:
return cls.from_yaml_string(UpperCAmelCase_ )
else:
return cls()
def __snake_case ( self , UpperCAmelCase_ ):
if path.exists():
with open(UpperCAmelCase_ , encoding='''utf-8''' ) as readme_file:
lowerCAmelCase = readme_file.read()
else:
lowerCAmelCase = None
lowerCAmelCase = self._to_readme(UpperCAmelCase_ )
with open(UpperCAmelCase_ , '''w''' , encoding='''utf-8''' ) as readme_file:
readme_file.write(UpperCAmelCase_ )
def __snake_case ( self , UpperCAmelCase_ = None ):
if readme_content is not None:
lowerCAmelCase , lowerCAmelCase = _split_yaml_from_readme(UpperCAmelCase_ )
lowerCAmelCase = '''---\n''' + self.to_yaml_string() + '''---\n''' + content
else:
lowerCAmelCase = '''---\n''' + self.to_yaml_string() + '''---\n'''
return full_content
@classmethod
def __snake_case ( cls , UpperCAmelCase_ ):
lowerCAmelCase = yaml.load(UpperCAmelCase_ , Loader=_NoDuplicateSafeLoader ) or {}
# Convert the YAML keys to DatasetMetadata fields
lowerCAmelCase = {
(key.replace('''-''' , '''_''' ) if key.replace('''-''' , '''_''' ) in cls._FIELDS_WITH_DASHES else key): value
for key, value in metadata_dict.items()
}
return cls(**UpperCAmelCase_ )
def __snake_case ( self ):
return yaml.safe_dump(
{
(key.replace('''_''' , '''-''' ) if key in self._FIELDS_WITH_DASHES else key): value
for key, value in self.items()
} , sort_keys=UpperCAmelCase_ , allow_unicode=UpperCAmelCase_ , encoding='''utf-8''' , ).decode('''utf-8''' )
UpperCAmelCase_ ={
"""image-classification""": [],
"""translation""": [],
"""image-segmentation""": [],
"""fill-mask""": [],
"""automatic-speech-recognition""": [],
"""token-classification""": [],
"""sentence-similarity""": [],
"""audio-classification""": [],
"""question-answering""": [],
"""summarization""": [],
"""zero-shot-classification""": [],
"""table-to-text""": [],
"""feature-extraction""": [],
"""other""": [],
"""multiple-choice""": [],
"""text-classification""": [],
"""text-to-image""": [],
"""text2text-generation""": [],
"""zero-shot-image-classification""": [],
"""tabular-classification""": [],
"""tabular-regression""": [],
"""image-to-image""": [],
"""tabular-to-text""": [],
"""unconditional-image-generation""": [],
"""text-retrieval""": [],
"""text-to-speech""": [],
"""object-detection""": [],
"""audio-to-audio""": [],
"""text-generation""": [],
"""conversational""": [],
"""table-question-answering""": [],
"""visual-question-answering""": [],
"""image-to-text""": [],
"""reinforcement-learning""": [],
"""voice-activity-detection""": [],
"""time-series-forecasting""": [],
"""document-question-answering""": [],
}
if __name__ == "__main__":
from argparse import ArgumentParser
UpperCAmelCase_ =ArgumentParser(usage="""Validate the yaml metadata block of a README.md file.""")
ap.add_argument("""readme_filepath""")
UpperCAmelCase_ =ap.parse_args()
UpperCAmelCase_ =Path(args.readme_filepath)
UpperCAmelCase_ =DatasetMetadata.from_readme(readme_filepath)
print(dataset_metadata)
dataset_metadata.to_readme(readme_filepath)
| 33
| 0
|
import argparse
import csv
import logging
import os
import random
import numpy as np
import torch
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from tqdm import tqdm, trange
from transformers import (
CONFIG_NAME,
WEIGHTS_NAME,
AdamW,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTTokenizer,
get_linear_schedule_with_warmup,
)
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO
)
lowercase__ : Union[str, Any] = logging.getLogger(__name__)
def SCREAMING_SNAKE_CASE ( __UpperCamelCase , __UpperCamelCase) -> List[Any]:
a = np.argmax(UpperCAmelCase__ , axis=1)
return np.sum(outputs == labels)
def SCREAMING_SNAKE_CASE ( __UpperCamelCase) -> Any:
with open(UpperCAmelCase__ , encoding="utf_8") as f:
a = csv.reader(UpperCAmelCase__)
a = []
next(UpperCAmelCase__) # skip the first line
for line in tqdm(UpperCAmelCase__):
output.append((" ".join(line[1:5]), line[5], line[6], int(line[-1]) - 1))
return output
def SCREAMING_SNAKE_CASE ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase) -> int:
a = []
for dataset in encoded_datasets:
a = len(UpperCAmelCase__)
a = np.zeros((n_batch, 2, input_len) , dtype=np.intaa)
a = np.zeros((n_batch, 2) , dtype=np.intaa)
a = np.full((n_batch, 2, input_len) , fill_value=-1_00 , dtype=np.intaa)
a = np.zeros((n_batch,) , dtype=np.intaa)
for (
i,
(story, conta, conta, mc_label),
) in enumerate(UpperCAmelCase__):
a = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
a = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
a = with_conta
a = with_conta
a = len(UpperCAmelCase__) - 1
a = len(UpperCAmelCase__) - 1
a = with_conta
a = with_conta
a = mc_label
a = (input_ids, mc_token_ids, lm_labels, mc_labels)
tensor_datasets.append(tuple(torch.tensor(UpperCAmelCase__) for t in all_inputs))
return tensor_datasets
def SCREAMING_SNAKE_CASE ( ) -> Tuple:
a = argparse.ArgumentParser()
parser.add_argument("--model_name" , type=UpperCAmelCase__ , default="openai-gpt" , help="pretrained model name")
parser.add_argument("--do_train" , action="store_true" , help="Whether to run training.")
parser.add_argument("--do_eval" , action="store_true" , help="Whether to run eval on the dev set.")
parser.add_argument(
"--output_dir" , default=UpperCAmelCase__ , type=UpperCAmelCase__ , required=UpperCAmelCase__ , help="The output directory where the model predictions and checkpoints will be written." , )
parser.add_argument("--train_dataset" , type=UpperCAmelCase__ , default="")
parser.add_argument("--eval_dataset" , type=UpperCAmelCase__ , default="")
parser.add_argument("--seed" , type=UpperCAmelCase__ , default=42)
parser.add_argument("--num_train_epochs" , type=UpperCAmelCase__ , default=3)
parser.add_argument("--train_batch_size" , type=UpperCAmelCase__ , default=8)
parser.add_argument("--eval_batch_size" , type=UpperCAmelCase__ , default=16)
parser.add_argument("--adam_epsilon" , default=1e-8 , type=UpperCAmelCase__ , help="Epsilon for Adam optimizer.")
parser.add_argument("--max_grad_norm" , type=UpperCAmelCase__ , default=1)
parser.add_argument(
"--max_steps" , default=-1 , type=UpperCAmelCase__ , help=(
"If > 0: set total number of training steps to perform. Override num_train_epochs."
) , )
parser.add_argument(
"--gradient_accumulation_steps" , type=UpperCAmelCase__ , default=1 , help="Number of updates steps to accumulate before performing a backward/update pass." , )
parser.add_argument("--learning_rate" , type=UpperCAmelCase__ , default=6.25e-5)
parser.add_argument("--warmup_steps" , default=0 , type=UpperCAmelCase__ , help="Linear warmup over warmup_steps.")
parser.add_argument("--lr_schedule" , type=UpperCAmelCase__ , default="warmup_linear")
parser.add_argument("--weight_decay" , type=UpperCAmelCase__ , default=0.01)
parser.add_argument("--lm_coef" , type=UpperCAmelCase__ , default=0.9)
parser.add_argument("--n_valid" , type=UpperCAmelCase__ , default=3_74)
parser.add_argument("--server_ip" , type=UpperCAmelCase__ , default="" , help="Can be used for distant debugging.")
parser.add_argument("--server_port" , type=UpperCAmelCase__ , default="" , help="Can be used for distant debugging.")
a = parser.parse_args()
print(UpperCAmelCase__)
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("Waiting for debugger attach")
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=UpperCAmelCase__)
ptvsd.wait_for_attach()
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed_all(args.seed)
a = torch.device("cuda" if torch.cuda.is_available() else "cpu")
a = torch.cuda.device_count()
logger.info("device: {}, n_gpu {}".format(UpperCAmelCase__ , UpperCAmelCase__))
if not args.do_train and not args.do_eval:
raise ValueError("At least one of `do_train` or `do_eval` must be True.")
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir)
# Load tokenizer and model
# This loading functions also add new tokens and embeddings called `special tokens`
# These new embeddings will be fine-tuned on the RocStories dataset
a = ["_start_", "_delimiter_", "_classify_"]
a = OpenAIGPTTokenizer.from_pretrained(args.model_name)
tokenizer.add_tokens(UpperCAmelCase__)
a = tokenizer.convert_tokens_to_ids(UpperCAmelCase__)
a = OpenAIGPTDoubleHeadsModel.from_pretrained(args.model_name)
model.resize_token_embeddings(len(UpperCAmelCase__))
model.to(UpperCAmelCase__)
# Load and encode the datasets
def tokenize_and_encode(__UpperCamelCase):
if isinstance(UpperCAmelCase__ , UpperCAmelCase__):
return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(UpperCAmelCase__))
elif isinstance(UpperCAmelCase__ , UpperCAmelCase__):
return obj
return [tokenize_and_encode(UpperCAmelCase__) for o in obj]
logger.info("Encoding dataset...")
a = load_rocstories_dataset(args.train_dataset)
a = load_rocstories_dataset(args.eval_dataset)
a = (train_dataset, eval_dataset)
a = tokenize_and_encode(UpperCAmelCase__)
# Compute the max input length for the Transformer
a = model.config.n_positions // 2 - 2
a = max(
len(story[:max_length]) + max(len(conta[:max_length]) , len(conta[:max_length])) + 3
for dataset in encoded_datasets
for story, conta, conta, _ in dataset)
a = min(UpperCAmelCase__ , model.config.n_positions) # Max size of input for the pre-trained model
# Prepare inputs tensors and dataloaders
a = pre_process_datasets(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , *UpperCAmelCase__)
a , a = tensor_datasets[0], tensor_datasets[1]
a = TensorDataset(*UpperCAmelCase__)
a = RandomSampler(UpperCAmelCase__)
a = DataLoader(UpperCAmelCase__ , sampler=UpperCAmelCase__ , batch_size=args.train_batch_size)
a = TensorDataset(*UpperCAmelCase__)
a = SequentialSampler(UpperCAmelCase__)
a = DataLoader(UpperCAmelCase__ , sampler=UpperCAmelCase__ , batch_size=args.eval_batch_size)
# Prepare optimizer
if args.do_train:
if args.max_steps > 0:
a = args.max_steps
a = args.max_steps // (len(UpperCAmelCase__) // args.gradient_accumulation_steps) + 1
else:
a = len(UpperCAmelCase__) // args.gradient_accumulation_steps * args.num_train_epochs
a = list(model.named_parameters())
a = ["bias", "LayerNorm.bias", "LayerNorm.weight"]
a = [
{
"params": [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)],
"weight_decay": args.weight_decay,
},
{"params": [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], "weight_decay": 0.0},
]
a = AdamW(UpperCAmelCase__ , lr=args.learning_rate , eps=args.adam_epsilon)
a = get_linear_schedule_with_warmup(
UpperCAmelCase__ , num_warmup_steps=args.warmup_steps , num_training_steps=UpperCAmelCase__)
if args.do_train:
a , a , a = 0, 0, None
model.train()
for _ in trange(int(args.num_train_epochs) , desc="Epoch"):
a = 0
a = 0
a = tqdm(UpperCAmelCase__ , desc="Training")
for step, batch in enumerate(UpperCAmelCase__):
a = tuple(t.to(UpperCAmelCase__) for t in batch)
a , a , a , a = batch
a = model(UpperCAmelCase__ , mc_token_ids=UpperCAmelCase__ , lm_labels=UpperCAmelCase__ , mc_labels=UpperCAmelCase__)
a = args.lm_coef * losses[0] + losses[1]
loss.backward()
optimizer.step()
scheduler.step()
optimizer.zero_grad()
tr_loss += loss.item()
a = (
loss.item() if exp_average_loss is None else 0.7 * exp_average_loss + 0.3 * loss.item()
)
nb_tr_steps += 1
a = "Training loss: {:.2e} lr: {:.2e}".format(UpperCAmelCase__ , scheduler.get_lr()[0])
# Save a trained model
if args.do_train:
# Save a trained model, configuration and tokenizer
a = model.module if hasattr(UpperCAmelCase__ , "module") else model # Only save the model itself
# If we save using the predefined names, we can load using `from_pretrained`
a = os.path.join(args.output_dir , UpperCAmelCase__)
a = os.path.join(args.output_dir , UpperCAmelCase__)
torch.save(model_to_save.state_dict() , UpperCAmelCase__)
model_to_save.config.to_json_file(UpperCAmelCase__)
tokenizer.save_vocabulary(args.output_dir)
# Load a trained model and vocabulary that you have fine-tuned
a = OpenAIGPTDoubleHeadsModel.from_pretrained(args.output_dir)
a = OpenAIGPTTokenizer.from_pretrained(args.output_dir)
model.to(UpperCAmelCase__)
if args.do_eval:
model.eval()
a , a = 0, 0
a , a = 0, 0
for batch in tqdm(UpperCAmelCase__ , desc="Evaluating"):
a = tuple(t.to(UpperCAmelCase__) for t in batch)
a , a , a , a = batch
with torch.no_grad():
a , a , a , a = model(
UpperCAmelCase__ , mc_token_ids=UpperCAmelCase__ , lm_labels=UpperCAmelCase__ , mc_labels=UpperCAmelCase__)
a = mc_logits.detach().cpu().numpy()
a = mc_labels.to("cpu").numpy()
a = accuracy(UpperCAmelCase__ , UpperCAmelCase__)
eval_loss += mc_loss.mean().item()
eval_accuracy += tmp_eval_accuracy
nb_eval_examples += input_ids.size(0)
nb_eval_steps += 1
a = eval_loss / nb_eval_steps
a = eval_accuracy / nb_eval_examples
a = tr_loss / nb_tr_steps if args.do_train else None
a = {"eval_loss": eval_loss, "eval_accuracy": eval_accuracy, "train_loss": train_loss}
a = os.path.join(args.output_dir , "eval_results.txt")
with open(UpperCAmelCase__ , "w") as writer:
logger.info("***** Eval results *****")
for key in sorted(result.keys()):
logger.info(" %s = %s" , UpperCAmelCase__ , str(result[key]))
writer.write("%s = %s\n" % (key, str(result[key])))
if __name__ == "__main__":
main()
| 515
|
'''simple docstring'''
import os
import tempfile
import unittest
from transformers import DistilBertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
)
class A__ ( _snake_case ):
def __init__( self , UpperCamelCase__ , UpperCamelCase__=13 , UpperCamelCase__=7 , UpperCamelCase__=True , UpperCamelCase__=True , UpperCamelCase__=False , UpperCamelCase__=True , UpperCamelCase__=99 , UpperCamelCase__=32 , UpperCamelCase__=5 , UpperCamelCase__=4 , UpperCamelCase__=37 , UpperCamelCase__="gelu" , UpperCamelCase__=0.1 , UpperCamelCase__=0.1 , UpperCamelCase__=512 , UpperCamelCase__=16 , UpperCamelCase__=2 , UpperCamelCase__=0.02 , UpperCamelCase__=3 , UpperCamelCase__=4 , UpperCamelCase__=None , ) -> Dict:
'''simple docstring'''
A_ = parent
A_ = batch_size
A_ = seq_length
A_ = is_training
A_ = use_input_mask
A_ = use_token_type_ids
A_ = use_labels
A_ = vocab_size
A_ = hidden_size
A_ = num_hidden_layers
A_ = num_attention_heads
A_ = intermediate_size
A_ = hidden_act
A_ = hidden_dropout_prob
A_ = attention_probs_dropout_prob
A_ = max_position_embeddings
A_ = type_vocab_size
A_ = type_sequence_label_size
A_ = initializer_range
A_ = num_labels
A_ = num_choices
A_ = scope
def snake_case_ ( self ) -> List[Any]:
'''simple docstring'''
A_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A_ = None
if self.use_input_mask:
A_ = random_attention_mask([self.batch_size, self.seq_length] )
A_ = None
A_ = None
A_ = None
if self.use_labels:
A_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A_ = ids_tensor([self.batch_size] , self.num_choices )
A_ = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case_ ( self ) -> List[str]:
'''simple docstring'''
return DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
A_ = DistilBertModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
A_ = model(UpperCamelCase__ , UpperCamelCase__ )
A_ = model(UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
A_ = DistilBertForMaskedLM(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
A_ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Dict:
'''simple docstring'''
A_ = DistilBertForQuestionAnswering(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
A_ = model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , start_positions=UpperCamelCase__ , end_positions=UpperCamelCase__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
A_ = self.num_labels
A_ = DistilBertForSequenceClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
A_ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Any:
'''simple docstring'''
A_ = self.num_labels
A_ = DistilBertForTokenClassification(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
A_ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> List[Any]:
'''simple docstring'''
A_ = self.num_choices
A_ = DistilBertForMultipleChoice(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
A_ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
A_ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
A_ = model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , labels=UpperCamelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def snake_case_ ( self ) -> Tuple:
'''simple docstring'''
A_ = self.prepare_config_and_inputs()
((A_) , (A_) , (A_) , (A_) , (A_) , (A_)) = config_and_inputs
A_ = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class A__ ( _snake_case , _snake_case , unittest.TestCase ):
lowercase = (
(
DistilBertModel,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
)
if is_torch_available()
else None
)
lowercase = (
{
"feature-extraction": DistilBertModel,
"fill-mask": DistilBertForMaskedLM,
"question-answering": DistilBertForQuestionAnswering,
"text-classification": DistilBertForSequenceClassification,
"token-classification": DistilBertForTokenClassification,
"zero-shot": DistilBertForSequenceClassification,
}
if is_torch_available()
else {}
)
lowercase = True
lowercase = True
lowercase = True
lowercase = True
def snake_case_ ( self ) -> Tuple:
'''simple docstring'''
A_ = DistilBertModelTester(self )
A_ = ConfigTester(self , config_class=UpperCamelCase__ , dim=37 )
def snake_case_ ( self ) -> List[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def snake_case_ ( self ) -> Tuple:
'''simple docstring'''
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*UpperCamelCase__ )
def snake_case_ ( self ) -> List[Any]:
'''simple docstring'''
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*UpperCamelCase__ )
def snake_case_ ( self ) -> Union[str, Any]:
'''simple docstring'''
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*UpperCamelCase__ )
def snake_case_ ( self ) -> Any:
'''simple docstring'''
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*UpperCamelCase__ )
def snake_case_ ( self ) -> Dict:
'''simple docstring'''
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*UpperCamelCase__ )
def snake_case_ ( self ) -> Union[str, Any]:
'''simple docstring'''
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*UpperCamelCase__ )
@slow
def snake_case_ ( self ) -> Tuple:
'''simple docstring'''
for model_name in DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ = DistilBertModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
@slow
@require_torch_gpu
def snake_case_ ( self ) -> str:
'''simple docstring'''
A_ , A_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# BertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == DistilBertForMultipleChoice:
return
A_ = True
A_ = model_class(config=UpperCamelCase__ )
A_ = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ )
A_ = torch.jit.trace(
UpperCamelCase__ , (inputs_dict["""input_ids"""].to("""cpu""" ), inputs_dict["""attention_mask"""].to("""cpu""" )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(UpperCamelCase__ , os.path.join(UpperCamelCase__ , """traced_model.pt""" ) )
A_ = torch.jit.load(os.path.join(UpperCamelCase__ , """traced_model.pt""" ) , map_location=UpperCamelCase__ )
loaded(inputs_dict["""input_ids"""].to(UpperCamelCase__ ) , inputs_dict["""attention_mask"""].to(UpperCamelCase__ ) )
@require_torch
class A__ ( unittest.TestCase ):
@slow
def snake_case_ ( self ) -> int:
'''simple docstring'''
A_ = DistilBertModel.from_pretrained("""distilbert-base-uncased""" )
A_ = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
A_ = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
A_ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ )[0]
A_ = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , UpperCamelCase__ )
A_ = torch.tensor(
[[[-0.1639, 0.3299, 0.1648], [-0.1746, 0.3289, 0.1710], [-0.1884, 0.3357, 0.1810]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , UpperCamelCase__ , atol=1e-4 ) )
| 288
| 0
|
"""simple docstring"""
from __future__ import annotations
def __UpperCamelCase ( SCREAMING_SNAKE_CASE ) -> float:
"""simple docstring"""
if not nums:
raise ValueError("List is empty" )
return sum(lowerCamelCase__ ) / len(lowerCamelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 710
|
"""simple docstring"""
from __future__ import annotations
from collections.abc import Callable
def __UpperCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = 1_00 , ) -> float:
"""simple docstring"""
__snake_case = x_start
__snake_case = fnc(SCREAMING_SNAKE_CASE )
__snake_case = 0.0
for _ in range(SCREAMING_SNAKE_CASE ):
# Approximates small segments of curve as linear and solve
# for trapezoidal area
__snake_case = (x_end - x_start) / steps + xa
__snake_case = fnc(SCREAMING_SNAKE_CASE )
area += abs(fxa + fxa ) * (xa - xa) / 2
# Increment step
__snake_case = xa
__snake_case = fxa
return area
if __name__ == "__main__":
def __UpperCamelCase ( SCREAMING_SNAKE_CASE ) -> List[Any]:
"""simple docstring"""
return x**3 + x**2
print("""f(x) = x^3 + x^2""")
print("""The area between the curve, x = -5, x = 5 and the x axis is:""")
_SCREAMING_SNAKE_CASE = 10
while i <= 100_000:
print(F"""with {i} steps: {trapezoidal_area(f, -5, 5, i)}""")
i *= 10
| 614
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.