code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_UpperCamelCase = {
"""configuration_biogpt""": ["""BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """BioGptConfig"""],
"""tokenization_biogpt""": ["""BioGptTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = [
"""BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BioGptForCausalLM""",
"""BioGptForTokenClassification""",
"""BioGptForSequenceClassification""",
"""BioGptModel""",
"""BioGptPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_biogpt import BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, BioGptConfig
from .tokenization_biogpt import BioGptTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_biogpt import (
BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST,
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptPreTrainedModel,
)
else:
import sys
_UpperCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 453
|
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( lowercase__ , lowercase__ ) -> float:
if digit_amount > 0:
return round(number - int(lowercase__ ) , lowercase__ )
return number - int(lowercase__ )
if __name__ == "__main__":
print(decimal_isolate(1.53, 0))
print(decimal_isolate(35.345, 1))
print(decimal_isolate(35.345, 2))
print(decimal_isolate(35.345, 3))
print(decimal_isolate(-14.789, 3))
print(decimal_isolate(0, 2))
print(decimal_isolate(-14.123, 1))
print(decimal_isolate(-14.123, 2))
print(decimal_isolate(-14.123, 3))
| 453
| 1
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
"distilbert-base-uncased": "https://huggingface.co/distilbert-base-uncased/resolve/main/config.json",
"distilbert-base-uncased-distilled-squad": (
"https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/config.json"
),
"distilbert-base-cased": "https://huggingface.co/distilbert-base-cased/resolve/main/config.json",
"distilbert-base-cased-distilled-squad": (
"https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/config.json"
),
"distilbert-base-german-cased": "https://huggingface.co/distilbert-base-german-cased/resolve/main/config.json",
"distilbert-base-multilingual-cased": (
"https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/config.json"
),
"distilbert-base-uncased-finetuned-sst-2-english": (
"https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english/resolve/main/config.json"
),
}
class a ( __UpperCAmelCase ):
lowercase_ : str = 'distilbert'
lowercase_ : Any = {
'hidden_size': 'dim',
'num_attention_heads': 'n_heads',
'num_hidden_layers': 'n_layers',
}
def __init__( self : Optional[int] , snake_case__ : int=30_522 , snake_case__ : str=512 , snake_case__ : Tuple=False , snake_case__ : Tuple=6 , snake_case__ : Any=12 , snake_case__ : Dict=768 , snake_case__ : Any=4 * 768 , snake_case__ : Union[str, Any]=0.1 , snake_case__ : str=0.1 , snake_case__ : Tuple="gelu" , snake_case__ : str=0.0_2 , snake_case__ : Optional[Any]=0.1 , snake_case__ : Optional[Any]=0.2 , snake_case__ : str=0 , **snake_case__ : Dict , ):
"""simple docstring"""
__lowerCAmelCase = vocab_size
__lowerCAmelCase = max_position_embeddings
__lowerCAmelCase = sinusoidal_pos_embds
__lowerCAmelCase = n_layers
__lowerCAmelCase = n_heads
__lowerCAmelCase = dim
__lowerCAmelCase = hidden_dim
__lowerCAmelCase = dropout
__lowerCAmelCase = attention_dropout
__lowerCAmelCase = activation
__lowerCAmelCase = initializer_range
__lowerCAmelCase = qa_dropout
__lowerCAmelCase = seq_classif_dropout
super().__init__(**snake_case__ , pad_token_id=snake_case__ )
class a ( __UpperCAmelCase ):
@property
def UpperCAmelCase__ ( self : Tuple ):
"""simple docstring"""
if self.task == "multiple-choice":
__lowerCAmelCase = {0: "batch", 1: "choice", 2: "sequence"}
else:
__lowerCAmelCase = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 376
|
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class a ( __UpperCAmelCase ):
@staticmethod
@abstractmethod
def UpperCAmelCase__ ( snake_case__ : ArgumentParser ):
"""simple docstring"""
raise NotImplementedError()
@abstractmethod
def UpperCAmelCase__ ( self : Any ):
"""simple docstring"""
raise NotImplementedError()
| 376
| 1
|
'''simple docstring'''
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Image
from .base import TaskTemplate
@dataclass(frozen=UpperCAmelCase__ )
class a__ ( UpperCAmelCase__ ):
lowerCamelCase : str =field(default="image-classification" , metadata={"include_in_asdict_even_if_is_default": True} )
lowerCamelCase : ClassVar[Features] =Features({"image": Image()} )
lowerCamelCase : ClassVar[Features] =Features({"labels": ClassLabel} )
lowerCamelCase : str ="image"
lowerCamelCase : str ="labels"
def SCREAMING_SNAKE_CASE__ ( self : int , a : str ):
"""simple docstring"""
if self.label_column not in features:
raise ValueError(f"""Column {self.label_column} is not present in features.""" )
if not isinstance(features[self.label_column] , a ):
raise ValueError(f"""Column {self.label_column} is not a ClassLabel.""" )
__lowerCamelCase = copy.deepcopy(self )
__lowerCamelCase = self.label_schema.copy()
__lowerCamelCase = features[self.label_column]
__lowerCamelCase = label_schema
return task_template
@property
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
"""simple docstring"""
return {
self.image_column: "image",
self.label_column: "labels",
}
| 546
|
'''simple docstring'''
import doctest
import glob
import importlib
import inspect
import os
import re
from contextlib import contextmanager
from functools import wraps
from unittest.mock import patch
import numpy as np
import pytest
from absl.testing import parameterized
import datasets
from datasets import load_metric
from .utils import for_all_test_methods, local, slow
# mark all tests as integration
__UpperCAmelCase =pytest.mark.integration
__UpperCAmelCase ={"comet"}
__UpperCAmelCase =importlib.util.find_spec("fairseq") is not None
__UpperCAmelCase ={"code_eval"}
__UpperCAmelCase =os.name == "nt"
__UpperCAmelCase ={"bertscore", "frugalscore", "perplexity"}
__UpperCAmelCase =importlib.util.find_spec("transformers") is not None
def __lowerCAmelCase ( UpperCamelCase__ ) -> Any:
@wraps(UpperCamelCase__ )
def wrapper(self , UpperCamelCase__ ):
if not _has_fairseq and metric_name in REQUIRE_FAIRSEQ:
self.skipTest('''"test requires Fairseq"''' )
else:
test_case(self , UpperCamelCase__ )
return wrapper
def __lowerCAmelCase ( UpperCamelCase__ ) -> Any:
@wraps(UpperCamelCase__ )
def wrapper(self , UpperCamelCase__ ):
if not _has_transformers and metric_name in REQUIRE_TRANSFORMERS:
self.skipTest('''"test requires transformers"''' )
else:
test_case(self , UpperCamelCase__ )
return wrapper
def __lowerCAmelCase ( UpperCamelCase__ ) -> List[Any]:
@wraps(UpperCamelCase__ )
def wrapper(self , UpperCamelCase__ ):
if _on_windows and metric_name in UNSUPPORTED_ON_WINDOWS:
self.skipTest('''"test not supported on Windows"''' )
else:
test_case(self , UpperCamelCase__ )
return wrapper
def __lowerCAmelCase ( ) -> Any:
__lowerCamelCase = [metric_dir.split(os.sep )[-2] for metric_dir in glob.glob('''./metrics/*/''' )]
return [{"testcase_name": x, "metric_name": x} for x in metrics if x != "gleu"] # gleu is unfinished
@parameterized.named_parameters(get_local_metric_names() )
@for_all_test_methods(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
@local
class a__ ( parameterized.TestCase ):
lowerCamelCase : str ={}
lowerCamelCase : Union[str, Any] =None
@pytest.mark.filterwarnings('''ignore:metric_module_factory is deprecated:FutureWarning''' )
@pytest.mark.filterwarnings('''ignore:load_metric is deprecated:FutureWarning''' )
def SCREAMING_SNAKE_CASE__ ( self : Tuple , a : List[Any] ):
"""simple docstring"""
__lowerCamelCase = '''[...]'''
__lowerCamelCase = importlib.import_module(
datasets.load.metric_module_factory(os.path.join('''metrics''' , a ) ).module_path )
__lowerCamelCase = datasets.load.import_main_class(metric_module.__name__ , dataset=a )
# check parameters
__lowerCamelCase = inspect.signature(metric._compute ).parameters
self.assertTrue(all(p.kind != p.VAR_KEYWORD for p in parameters.values() ) ) # no **kwargs
# run doctest
with self.patch_intensive_calls(a , metric_module.__name__ ):
with self.use_local_metrics():
try:
__lowerCamelCase = doctest.testmod(a , verbose=a , raise_on_error=a )
except doctest.UnexpectedException as e:
raise e.exc_info[1] # raise the exception that doctest caught
self.assertEqual(results.failed , 0 )
self.assertGreater(results.attempted , 1 )
@slow
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , a : Optional[int] ):
"""simple docstring"""
__lowerCamelCase = '''[...]'''
__lowerCamelCase = importlib.import_module(
datasets.load.metric_module_factory(os.path.join('''metrics''' , a ) ).module_path )
# run doctest
with self.use_local_metrics():
__lowerCamelCase = doctest.testmod(a , verbose=a , raise_on_error=a )
self.assertEqual(results.failed , 0 )
self.assertGreater(results.attempted , 1 )
@contextmanager
def SCREAMING_SNAKE_CASE__ ( self : Any , a : Optional[Any] , a : Any ):
"""simple docstring"""
if metric_name in self.INTENSIVE_CALLS_PATCHER:
with self.INTENSIVE_CALLS_PATCHER[metric_name](a ):
yield
else:
yield
@contextmanager
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
"""simple docstring"""
def load_local_metric(a : List[str] , *a : Optional[int] , **a : Tuple ):
return load_metric(os.path.join('''metrics''' , a ) , *a , **a )
with patch('''datasets.load_metric''' ) as mock_load_metric:
__lowerCamelCase = load_local_metric
yield
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : List[str] , a : Tuple ):
"""simple docstring"""
def wrapper(a : List[Any] ):
__lowerCamelCase = contextmanager(a )
__lowerCamelCase = patcher
return patcher
return wrapper
@LocalMetricTest.register_intensive_calls_patcher('''bleurt''' )
def __lowerCAmelCase ( UpperCamelCase__ ) -> Union[str, Any]:
import tensorflow.compat.va as tf
from bleurt.score import Predictor
tf.flags.DEFINE_string('''sv''' , '''''' , '''''' ) # handle pytest cli flags
class a__ ( UpperCAmelCase__ ):
def SCREAMING_SNAKE_CASE__ ( self : Tuple , a : Dict ):
"""simple docstring"""
assert len(input_dict['''input_ids'''] ) == 2
return np.array([1.03, 1.04] )
# mock predict_fn which is supposed to do a forward pass with a bleurt model
with patch('''bleurt.score._create_predictor''' ) as mock_create_predictor:
__lowerCamelCase = MockedPredictor()
yield
@LocalMetricTest.register_intensive_calls_patcher('''bertscore''' )
def __lowerCAmelCase ( UpperCamelCase__ ) -> Tuple:
import torch
def bert_cos_score_idf(UpperCamelCase__ , UpperCamelCase__ , *UpperCamelCase__ , **UpperCamelCase__ ):
return torch.tensor([[1.0, 1.0, 1.0]] * len(UpperCamelCase__ ) )
# mock get_model which is supposed to do download a bert model
# mock bert_cos_score_idf which is supposed to do a forward pass with a bert model
with patch('''bert_score.scorer.get_model''' ), patch(
'''bert_score.scorer.bert_cos_score_idf''' ) as mock_bert_cos_score_idf:
__lowerCamelCase = bert_cos_score_idf
yield
@LocalMetricTest.register_intensive_calls_patcher('''comet''' )
def __lowerCAmelCase ( UpperCamelCase__ ) -> List[str]:
def load_from_checkpoint(UpperCamelCase__ ):
class a__ :
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , a : str , *a : int , **a : Tuple ):
"""simple docstring"""
assert len(a ) == 2
__lowerCamelCase = [0.19, 0.92]
return scores, sum(a ) / len(a )
return Model()
# mock load_from_checkpoint which is supposed to do download a bert model
# mock load_from_checkpoint which is supposed to do download a bert model
with patch('''comet.download_model''' ) as mock_download_model:
__lowerCamelCase = None
with patch('''comet.load_from_checkpoint''' ) as mock_load_from_checkpoint:
__lowerCamelCase = load_from_checkpoint
yield
def __lowerCAmelCase ( ) -> List[Any]:
__lowerCamelCase = load_metric(os.path.join('''metrics''' , '''seqeval''' ) )
__lowerCamelCase = '''ERROR'''
__lowerCamelCase = f"""Scheme should be one of [IOB1, IOB2, IOE1, IOE2, IOBES, BILOU], got {wrong_scheme}"""
with pytest.raises(UpperCamelCase__ , match=re.escape(UpperCamelCase__ ) ):
metric.compute(predictions=[] , references=[] , scheme=UpperCamelCase__ )
| 546
| 1
|
'''simple docstring'''
import unittest
from huggingface_hub import hf_hub_download
from transformers import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEFeatureExtractor
from transformers.pipelines import VideoClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_decord,
require_tf,
require_torch,
require_torch_or_tf,
require_vision,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
@require_vision
@require_decord
class _lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
snake_case_ = MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
def __lowercase ( self : List[str] , UpperCamelCase_ : str , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : List[Any] ) -> Any:
'''simple docstring'''
_lowercase : Optional[Any] = hf_hub_download(
repo_id='''nateraw/video-demo''' , filename='''archery.mp4''' , repo_type='''dataset''' )
_lowercase : int = VideoClassificationPipeline(model=UpperCamelCase_ , image_processor=UpperCamelCase_ , top_k=2 )
_lowercase : List[str] = [
example_video_filepath,
'''https://huggingface.co/datasets/nateraw/video-demo/resolve/main/archery.mp4''',
]
return video_classifier, examples
def __lowercase ( self : Tuple , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Union[str, Any] ) -> int:
'''simple docstring'''
for example in examples:
_lowercase : List[str] = video_classifier(UpperCamelCase_ )
self.assertEqual(
UpperCamelCase_ , [
{'''score''': ANY(UpperCamelCase_ ), '''label''': ANY(UpperCamelCase_ )},
{'''score''': ANY(UpperCamelCase_ ), '''label''': ANY(UpperCamelCase_ )},
] , )
@require_torch
def __lowercase ( self : Tuple ) -> List[Any]:
'''simple docstring'''
_lowercase : str = '''hf-internal-testing/tiny-random-VideoMAEForVideoClassification'''
_lowercase : str = VideoMAEFeatureExtractor(
size={'''shortest_edge''': 10} , crop_size={'''height''': 10, '''width''': 10} )
_lowercase : int = pipeline(
'''video-classification''' , model=UpperCamelCase_ , feature_extractor=UpperCamelCase_ , frame_sampling_rate=4 )
_lowercase : Union[str, Any] = hf_hub_download(repo_id='''nateraw/video-demo''' , filename='''archery.mp4''' , repo_type='''dataset''' )
_lowercase : List[str] = video_classifier(UpperCamelCase_ , top_k=2 )
self.assertEqual(
nested_simplify(UpperCamelCase_ , decimals=4 ) , [{'''score''': 0.5_199, '''label''': '''LABEL_0'''}, {'''score''': 0.4_801, '''label''': '''LABEL_1'''}] , )
_lowercase : List[str] = video_classifier(
[
video_file_path,
video_file_path,
] , top_k=2 , )
self.assertEqual(
nested_simplify(UpperCamelCase_ , decimals=4 ) , [
[{'''score''': 0.5_199, '''label''': '''LABEL_0'''}, {'''score''': 0.4_801, '''label''': '''LABEL_1'''}],
[{'''score''': 0.5_199, '''label''': '''LABEL_0'''}, {'''score''': 0.4_801, '''label''': '''LABEL_1'''}],
] , )
@require_tf
def __lowercase ( self : str ) -> Tuple:
'''simple docstring'''
pass
| 717
|
'''simple docstring'''
import math
def _SCREAMING_SNAKE_CASE( snake_case_ : int ) ->list[int]:
'''simple docstring'''
_lowercase : Optional[int] = []
_lowercase : Any = 2
_lowercase : List[str] = int(math.sqrt(snake_case_ ) ) # Size of every segment
_lowercase : Tuple = [True] * (end + 1)
_lowercase : List[str] = []
while start <= end:
if temp[start] is True:
in_prime.append(snake_case_ )
for i in range(start * start , end + 1 , snake_case_ ):
_lowercase : Tuple = False
start += 1
prime += in_prime
_lowercase : str = end + 1
_lowercase : Optional[int] = min(2 * end , snake_case_ )
while low <= n:
_lowercase : Optional[int] = [True] * (high - low + 1)
for each in in_prime:
_lowercase : Union[str, Any] = math.floor(low / each ) * each
if t < low:
t += each
for j in range(snake_case_ , high + 1 , snake_case_ ):
_lowercase : Optional[int] = False
for j in range(len(snake_case_ ) ):
if temp[j] is True:
prime.append(j + low )
_lowercase : Union[str, Any] = high + 1
_lowercase : Tuple = min(high + end , snake_case_ )
return prime
print(sieve(10**6))
| 411
| 0
|
import copy
import unittest
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
MODEL_FOR_QUESTION_ANSWERING_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
)
from transformers.models.layoutlmva.modeling_layoutlmva import LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__(self : Tuple , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[Any]=2 , UpperCAmelCase_ : List[str]=3 , UpperCAmelCase_ : Dict=4 , UpperCAmelCase_ : Optional[Any]=2 , UpperCAmelCase_ : Union[str, Any]=7 , UpperCAmelCase_ : Any=True , UpperCAmelCase_ : Dict=True , UpperCAmelCase_ : List[Any]=True , UpperCAmelCase_ : List[Any]=True , UpperCAmelCase_ : Any=99 , UpperCAmelCase_ : Dict=36 , UpperCAmelCase_ : Any=3 , UpperCAmelCase_ : Dict=4 , UpperCAmelCase_ : Optional[int]=37 , UpperCAmelCase_ : int="gelu" , UpperCAmelCase_ : List[Any]=0.1 , UpperCAmelCase_ : Optional[Any]=0.1 , UpperCAmelCase_ : Union[str, Any]=512 , UpperCAmelCase_ : List[Any]=16 , UpperCAmelCase_ : Dict=2 , UpperCAmelCase_ : Optional[int]=0.02 , UpperCAmelCase_ : List[str]=6 , UpperCAmelCase_ : Tuple=6 , UpperCAmelCase_ : Dict=3 , UpperCAmelCase_ : Union[str, Any]=4 , UpperCAmelCase_ : Any=None , UpperCAmelCase_ : Union[str, Any]=1_000 , ) ->Optional[int]:
'''simple docstring'''
lowerCamelCase__: Any =parent
lowerCamelCase__: Any =batch_size
lowerCamelCase__: Union[str, Any] =num_channels
lowerCamelCase__: str =image_size
lowerCamelCase__: Any =patch_size
lowerCamelCase__: Optional[Any] =text_seq_length
lowerCamelCase__: Optional[Any] =is_training
lowerCamelCase__: List[str] =use_input_mask
lowerCamelCase__: List[str] =use_token_type_ids
lowerCamelCase__: List[Any] =use_labels
lowerCamelCase__: Tuple =vocab_size
lowerCamelCase__: Optional[Any] =hidden_size
lowerCamelCase__: Optional[int] =num_hidden_layers
lowerCamelCase__: Tuple =num_attention_heads
lowerCamelCase__: List[Any] =intermediate_size
lowerCamelCase__: Optional[Any] =hidden_act
lowerCamelCase__: List[str] =hidden_dropout_prob
lowerCamelCase__: Optional[Any] =attention_probs_dropout_prob
lowerCamelCase__: Any =max_position_embeddings
lowerCamelCase__: str =type_vocab_size
lowerCamelCase__: str =type_sequence_label_size
lowerCamelCase__: Optional[int] =initializer_range
lowerCamelCase__: Union[str, Any] =coordinate_size
lowerCamelCase__: Optional[Any] =shape_size
lowerCamelCase__: int =num_labels
lowerCamelCase__: str =num_choices
lowerCamelCase__: List[Any] =scope
lowerCamelCase__: List[str] =range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
lowerCamelCase__: List[Any] =text_seq_length
lowerCamelCase__: Any =(image_size // patch_size) ** 2 + 1
lowerCamelCase__: int =self.text_seq_length + self.image_seq_length
def SCREAMING_SNAKE_CASE_ (self : Tuple) ->List[str]:
'''simple docstring'''
lowerCamelCase__: Optional[Any] =ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size)
lowerCamelCase__: Any =ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox)
# Ensure that bbox is legal
for i in range(bbox.shape[0]):
for j in range(bbox.shape[1]):
if bbox[i, j, 3] < bbox[i, j, 1]:
lowerCamelCase__: Optional[int] =bbox[i, j, 3]
lowerCamelCase__: int =bbox[i, j, 1]
lowerCamelCase__: Tuple =t
if bbox[i, j, 2] < bbox[i, j, 0]:
lowerCamelCase__: Tuple =bbox[i, j, 2]
lowerCamelCase__: List[str] =bbox[i, j, 0]
lowerCamelCase__: Any =t
lowerCamelCase__: List[Any] =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
lowerCamelCase__: Optional[int] =None
if self.use_input_mask:
lowerCamelCase__: List[str] =random_attention_mask([self.batch_size, self.text_seq_length])
lowerCamelCase__: Any =None
if self.use_token_type_ids:
lowerCamelCase__: List[Any] =ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size)
lowerCamelCase__: List[Any] =None
lowerCamelCase__: List[str] =None
if self.use_labels:
lowerCamelCase__: List[str] =ids_tensor([self.batch_size] , self.type_sequence_label_size)
lowerCamelCase__: Optional[Any] =ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels)
lowerCamelCase__: List[Any] =LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def SCREAMING_SNAKE_CASE_ (self : List[str] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : int , UpperCAmelCase_ : Dict , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[Any]) ->List[str]:
'''simple docstring'''
lowerCamelCase__: Dict =LayoutLMvaModel(config=UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
# text + image
lowerCamelCase__: Optional[int] =model(UpperCAmelCase_ , pixel_values=UpperCAmelCase_)
lowerCamelCase__: Optional[Any] =model(
UpperCAmelCase_ , bbox=UpperCAmelCase_ , pixel_values=UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_)
lowerCamelCase__: Union[str, Any] =model(UpperCAmelCase_ , bbox=UpperCAmelCase_ , pixel_values=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_)
lowerCamelCase__: Union[str, Any] =model(UpperCAmelCase_ , bbox=UpperCAmelCase_ , pixel_values=UpperCAmelCase_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
# text only
lowerCamelCase__: List[str] =model(UpperCAmelCase_)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size))
# image only
lowerCamelCase__: List[str] =model(pixel_values=UpperCAmelCase_)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size))
def SCREAMING_SNAKE_CASE_ (self : Optional[int] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : int , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : int , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : str) ->List[str]:
'''simple docstring'''
lowerCamelCase__: Union[str, Any] =self.num_labels
lowerCamelCase__: Optional[int] =LayoutLMvaForSequenceClassification(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
lowerCamelCase__: List[Any] =model(
UpperCAmelCase_ , bbox=UpperCAmelCase_ , pixel_values=UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , labels=UpperCAmelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def SCREAMING_SNAKE_CASE_ (self : Any , UpperCAmelCase_ : int , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : int) ->Optional[Any]:
'''simple docstring'''
lowerCamelCase__: Optional[Any] =self.num_labels
lowerCamelCase__: List[str] =LayoutLMvaForTokenClassification(config=UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
lowerCamelCase__: int =model(
UpperCAmelCase_ , bbox=UpperCAmelCase_ , pixel_values=UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , labels=UpperCAmelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels))
def SCREAMING_SNAKE_CASE_ (self : List[str] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : int , UpperCAmelCase_ : Dict , UpperCAmelCase_ : str , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : List[Any]) ->Tuple:
'''simple docstring'''
lowerCamelCase__: Tuple =LayoutLMvaForQuestionAnswering(config=UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
lowerCamelCase__: Tuple =model(
UpperCAmelCase_ , bbox=UpperCAmelCase_ , pixel_values=UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , start_positions=UpperCAmelCase_ , end_positions=UpperCAmelCase_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def SCREAMING_SNAKE_CASE_ (self : Tuple) ->List[Any]:
'''simple docstring'''
lowerCamelCase__: List[Any] =self.prepare_config_and_inputs()
(
(
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) ,
): Tuple =config_and_inputs
lowerCamelCase__: List[str] ={
"input_ids": input_ids,
"bbox": bbox,
"pixel_values": pixel_values,
"token_type_ids": token_type_ids,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_torch
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowercase_ = False
lowercase_ = False
lowercase_ = False
lowercase_ = (
(
LayoutLMvaModel,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaForQuestionAnswering,
)
if is_torch_available()
else ()
)
lowercase_ = (
{"document-question-answering": LayoutLMvaForQuestionAnswering, "feature-extraction": LayoutLMvaModel}
if is_torch_available()
else {}
)
def SCREAMING_SNAKE_CASE_ (self : Dict , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Union[str, Any]) ->Tuple:
'''simple docstring'''
return True
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any]) ->Dict:
'''simple docstring'''
lowerCamelCase__: List[Any] =LayoutLMvaModelTester(self)
lowerCamelCase__: Any =ConfigTester(self , config_class=UpperCAmelCase_ , hidden_size=37)
def SCREAMING_SNAKE_CASE_ (self : Any , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Dict=False) ->Dict:
'''simple docstring'''
lowerCamelCase__: Tuple =copy.deepcopy(UpperCAmelCase_)
if model_class in get_values(UpperCAmelCase_):
lowerCamelCase__: Dict ={
k: v.unsqueeze(1).expand(-1 , self.model_tester.num_choices , -1).contiguous()
if isinstance(UpperCAmelCase_ , torch.Tensor) and v.ndim > 1
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(UpperCAmelCase_):
lowerCamelCase__: List[str] =torch.ones(self.model_tester.batch_size , dtype=torch.long , device=UpperCAmelCase_)
elif model_class in get_values(UpperCAmelCase_):
lowerCamelCase__: Tuple =torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCAmelCase_)
lowerCamelCase__: List[str] =torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCAmelCase_)
elif model_class in [
*get_values(UpperCAmelCase_),
]:
lowerCamelCase__: int =torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCAmelCase_)
elif model_class in [
*get_values(UpperCAmelCase_),
]:
lowerCamelCase__: Optional[int] =torch.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=torch.long , device=UpperCAmelCase_ , )
return inputs_dict
def SCREAMING_SNAKE_CASE_ (self : Dict) ->Dict:
'''simple docstring'''
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE_ (self : List[str]) ->Dict:
'''simple docstring'''
lowerCamelCase__: List[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : List[str]) ->Dict:
'''simple docstring'''
lowerCamelCase__: Union[str, Any] =self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowerCamelCase__: int =type
self.model_tester.create_and_check_model(*UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : List[str]) ->Union[str, Any]:
'''simple docstring'''
lowerCamelCase__: Dict =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : List[str]) ->Tuple:
'''simple docstring'''
lowerCamelCase__: Optional[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : List[Any]) ->List[Any]:
'''simple docstring'''
lowerCamelCase__: Any =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCAmelCase_)
@slow
def SCREAMING_SNAKE_CASE_ (self : int) ->str:
'''simple docstring'''
for model_name in LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__: List[str] =LayoutLMvaModel.from_pretrained(UpperCAmelCase_)
self.assertIsNotNone(UpperCAmelCase_)
def lowerCAmelCase_ ( ) -> Tuple:
"""simple docstring"""
lowerCamelCase__: str =Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any]) ->Union[str, Any]:
'''simple docstring'''
return LayoutLMvaImageProcessor(apply_ocr=UpperCAmelCase_) if is_vision_available() else None
@slow
def SCREAMING_SNAKE_CASE_ (self : Dict) ->Optional[int]:
'''simple docstring'''
lowerCamelCase__: Any =LayoutLMvaModel.from_pretrained("microsoft/layoutlmv3-base").to(UpperCAmelCase_)
lowerCamelCase__: Optional[int] =self.default_image_processor
lowerCamelCase__: Union[str, Any] =prepare_img()
lowerCamelCase__: str =image_processor(images=UpperCAmelCase_ , return_tensors="pt").pixel_values.to(UpperCAmelCase_)
lowerCamelCase__: Optional[Any] =torch.tensor([[1, 2]])
lowerCamelCase__: Optional[Any] =torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]]).unsqueeze(0)
# forward pass
lowerCamelCase__: List[Any] =model(
input_ids=input_ids.to(UpperCAmelCase_) , bbox=bbox.to(UpperCAmelCase_) , pixel_values=pixel_values.to(UpperCAmelCase_) , )
# verify the logits
lowerCamelCase__: List[Any] =torch.Size((1, 199, 768))
self.assertEqual(outputs.last_hidden_state.shape , UpperCAmelCase_)
lowerCamelCase__: List[str] =torch.tensor(
[[-0.0529, 0.3618, 0.1632], [-0.1587, -0.1667, -0.0400], [-0.1557, -0.1671, -0.0505]]).to(UpperCAmelCase_)
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , UpperCAmelCase_ , atol=1E-4))
| 59
|
import unittest
import numpy as np
import requests
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
a_ :List[str] = False
if is_vision_available():
from PIL import Image
from transformers import PixaStructImageProcessor
class snake_case__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : int, _snake_case : Union[str, Any], _snake_case : List[str]=7, _snake_case : int=3, _snake_case : List[Any]=1_8, _snake_case : List[str]=3_0, _snake_case : str=4_0_0, _snake_case : Optional[Any]=None, _snake_case : Dict=True, _snake_case : str=True, _snake_case : Union[str, Any]=None, ) ->str:
snake_case__ : int = size if size is not None else {'height': 2_0, 'width': 2_0}
snake_case__ : Optional[Any] = parent
snake_case__ : Optional[Any] = batch_size
snake_case__ : List[Any] = num_channels
snake_case__ : List[str] = image_size
snake_case__ : List[str] = min_resolution
snake_case__ : int = max_resolution
snake_case__ : Union[str, Any] = size
snake_case__ : Tuple = do_normalize
snake_case__ : List[str] = do_convert_rgb
snake_case__ : List[Any] = [5_1_2, 1_0_2_4, 2_0_4_8, 4_0_9_6]
snake_case__ : Tuple = patch_size if patch_size is not None else {'height': 1_6, 'width': 1_6}
def lowercase_ ( self : str ) ->int:
return {"do_normalize": self.do_normalize, "do_convert_rgb": self.do_convert_rgb}
def lowercase_ ( self : Optional[Any] ) ->str:
snake_case__ : List[Any] = 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg'
snake_case__ : List[Any] = Image.open(requests.get(_snake_case, stream=_snake_case ).raw ).convert('RGB' )
return raw_image
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason="""`Pix2StructImageProcessor` requires `torch>=1.11.0`.""" , )
@require_torch
@require_vision
class snake_case__ ( lowerCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = PixaStructImageProcessor if is_vision_available() else None
def lowercase_ ( self : str ) ->str:
snake_case__ : Optional[int] = PixaStructImageProcessingTester(self )
@property
def lowercase_ ( self : Union[str, Any] ) ->Optional[int]:
return self.image_processor_tester.prepare_image_processor_dict()
def lowercase_ ( self : str ) ->Union[str, Any]:
snake_case__ : Tuple = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_snake_case, 'do_normalize' ) )
self.assertTrue(hasattr(_snake_case, 'do_convert_rgb' ) )
def lowercase_ ( self : str ) ->Union[str, Any]:
snake_case__ : List[str] = self.image_processor_tester.prepare_dummy_image()
snake_case__ : Tuple = self.image_processing_class(**self.image_processor_dict )
snake_case__ : Optional[int] = 2_0_4_8
snake_case__ : Optional[int] = image_processor(_snake_case, return_tensors='pt', max_patches=_snake_case )
self.assertTrue(torch.allclose(inputs.flattened_patches.mean(), torch.tensor(0.0_6_0_6 ), atol=1e-3, rtol=1e-3 ) )
def lowercase_ ( self : Tuple ) ->Dict:
# Initialize image_processor
snake_case__ : int = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case__ : Tuple = prepare_image_inputs(self.image_processor_tester, equal_resolution=_snake_case )
for image in image_inputs:
self.assertIsInstance(_snake_case, Image.Image )
# Test not batched input
snake_case__ : Optional[Any] = (
(self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width'])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
snake_case__ : Optional[Any] = image_processor(
image_inputs[0], return_tensors='pt', max_patches=_snake_case ).flattened_patches
self.assertEqual(
encoded_images.shape, (1, max_patch, expected_hidden_dim), )
# Test batched
snake_case__ : Any = image_processor(
_snake_case, return_tensors='pt', max_patches=_snake_case ).flattened_patches
self.assertEqual(
encoded_images.shape, (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim), )
def lowercase_ ( self : List[str] ) ->Optional[Any]:
# Initialize image_processor
snake_case__ : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case__ : Optional[int] = prepare_image_inputs(self.image_processor_tester, equal_resolution=_snake_case )
for image in image_inputs:
self.assertIsInstance(_snake_case, Image.Image )
# Test not batched input
snake_case__ : Optional[Any] = (
(self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width'])
* self.image_processor_tester.num_channels
) + 2
snake_case__ : Union[str, Any] = True
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
with self.assertRaises(_snake_case ):
snake_case__ : int = image_processor(
image_inputs[0], return_tensors='pt', max_patches=_snake_case ).flattened_patches
snake_case__ : Optional[Any] = 'Hello'
snake_case__ : Dict = image_processor(
image_inputs[0], return_tensors='pt', max_patches=_snake_case, header_text=_snake_case ).flattened_patches
self.assertEqual(
encoded_images.shape, (1, max_patch, expected_hidden_dim), )
# Test batched
snake_case__ : List[Any] = image_processor(
_snake_case, return_tensors='pt', max_patches=_snake_case, header_text=_snake_case ).flattened_patches
self.assertEqual(
encoded_images.shape, (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim), )
def lowercase_ ( self : Any ) ->int:
# Initialize image_processor
snake_case__ : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case__ : Any = prepare_image_inputs(self.image_processor_tester, equal_resolution=_snake_case, numpify=_snake_case )
for image in image_inputs:
self.assertIsInstance(_snake_case, np.ndarray )
snake_case__ : Union[str, Any] = (
(self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width'])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
snake_case__ : List[str] = image_processor(
image_inputs[0], return_tensors='pt', max_patches=_snake_case ).flattened_patches
self.assertEqual(
encoded_images.shape, (1, max_patch, expected_hidden_dim), )
# Test batched
snake_case__ : Dict = image_processor(
_snake_case, return_tensors='pt', max_patches=_snake_case ).flattened_patches
self.assertEqual(
encoded_images.shape, (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim), )
def lowercase_ ( self : List[Any] ) ->List[Any]:
# Initialize image_processor
snake_case__ : Any = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case__ : int = prepare_image_inputs(self.image_processor_tester, equal_resolution=_snake_case, torchify=_snake_case )
for image in image_inputs:
self.assertIsInstance(_snake_case, torch.Tensor )
# Test not batched input
snake_case__ : Any = (
(self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width'])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
snake_case__ : Optional[Any] = image_processor(
image_inputs[0], return_tensors='pt', max_patches=_snake_case ).flattened_patches
self.assertEqual(
encoded_images.shape, (1, max_patch, expected_hidden_dim), )
# Test batched
snake_case__ : int = image_processor(
_snake_case, return_tensors='pt', max_patches=_snake_case ).flattened_patches
self.assertEqual(
encoded_images.shape, (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim), )
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason="""`Pix2StructImageProcessor` requires `torch>=1.11.0`.""" , )
@require_torch
@require_vision
class snake_case__ ( lowerCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = PixaStructImageProcessor if is_vision_available() else None
def lowercase_ ( self : Any ) ->Union[str, Any]:
snake_case__ : Union[str, Any] = PixaStructImageProcessingTester(self, num_channels=4 )
snake_case__ : int = 3
@property
def lowercase_ ( self : Optional[Any] ) ->List[Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def lowercase_ ( self : Optional[int] ) ->Optional[int]:
snake_case__ : Dict = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_snake_case, 'do_normalize' ) )
self.assertTrue(hasattr(_snake_case, 'do_convert_rgb' ) )
def lowercase_ ( self : Optional[int] ) ->str:
# Initialize image_processor
snake_case__ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case__ : Dict = prepare_image_inputs(self.image_processor_tester, equal_resolution=_snake_case )
for image in image_inputs:
self.assertIsInstance(_snake_case, Image.Image )
# Test not batched input
snake_case__ : List[Any] = (
(self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width'])
* (self.image_processor_tester.num_channels - 1)
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
snake_case__ : Any = image_processor(
image_inputs[0], return_tensors='pt', max_patches=_snake_case ).flattened_patches
self.assertEqual(
encoded_images.shape, (1, max_patch, expected_hidden_dim), )
# Test batched
snake_case__ : Dict = image_processor(
_snake_case, return_tensors='pt', max_patches=_snake_case ).flattened_patches
self.assertEqual(
encoded_images.shape, (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim), )
| 478
| 0
|
"""simple docstring"""
def lowercase ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : List[str] ):
'''simple docstring'''
print('''\nThe shortest path matrix using Floyd Warshall algorithm\n''' )
for i in range(_SCREAMING_SNAKE_CASE ):
for j in range(_SCREAMING_SNAKE_CASE ):
if dist[i][j] != float('''inf''' ):
print(int(dist[i][j] ) , end='''\t''' )
else:
print('''INF''' , end='''\t''' )
print()
def lowercase ( _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Tuple ):
'''simple docstring'''
_UpperCAmelCase = [[float('''inf''' ) for _ in range(_SCREAMING_SNAKE_CASE )] for _ in range(_SCREAMING_SNAKE_CASE )]
for i in range(_SCREAMING_SNAKE_CASE ):
for j in range(_SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = graph[i][j]
# check vertex k against all other vertices (i, j)
for k in range(_SCREAMING_SNAKE_CASE ):
# looping through rows of graph array
for i in range(_SCREAMING_SNAKE_CASE ):
# looping through columns of graph array
for j in range(_SCREAMING_SNAKE_CASE ):
if (
dist[i][k] != float('''inf''' )
and dist[k][j] != float('''inf''' )
and dist[i][k] + dist[k][j] < dist[i][j]
):
_UpperCAmelCase = dist[i][k] + dist[k][j]
_print_dist(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return dist, v
if __name__ == "__main__":
__A : Union[str, Any] = int(input("Enter number of vertices: "))
__A : Dict = int(input("Enter number of edges: "))
__A : int = [[float("inf") for i in range(v)] for j in range(v)]
for i in range(v):
__A : List[Any] = 0.0
# src and dst are indices that must be within the array size graph[e][v]
# failure to follow this will result in an error
for i in range(e):
print("\nEdge ", i + 1)
__A : str = int(input("Enter source:"))
__A : int = int(input("Enter destination:"))
__A : List[str] = float(input("Enter weight:"))
__A : Optional[Any] = weight
floyd_warshall(graph, v)
# Example Input
# Enter number of vertices: 3
# Enter number of edges: 2
# # generated graph from vertex and edge inputs
# [[inf, inf, inf], [inf, inf, inf], [inf, inf, inf]]
# [[0.0, inf, inf], [inf, 0.0, inf], [inf, inf, 0.0]]
# specify source, destination and weight for edge #1
# Edge 1
# Enter source:1
# Enter destination:2
# Enter weight:2
# specify source, destination and weight for edge #2
# Edge 2
# Enter source:2
# Enter destination:1
# Enter weight:1
# # Expected Output from the vertice, edge and src, dst, weight inputs!!
# 0 INF INF
# INF 0 2
# INF 1 0
| 95
|
"""simple docstring"""
import functools
def lowercase ( _SCREAMING_SNAKE_CASE : list[int] , _SCREAMING_SNAKE_CASE : list[int] ):
'''simple docstring'''
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) or not all(isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for day in days ):
raise ValueError('''The parameter days should be a list of integers''' )
if len(_SCREAMING_SNAKE_CASE ) != 3 or not all(isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for cost in costs ):
raise ValueError('''The parameter costs should be a list of three integers''' )
if len(_SCREAMING_SNAKE_CASE ) == 0:
return 0
if min(_SCREAMING_SNAKE_CASE ) <= 0:
raise ValueError('''All days elements should be greater than 0''' )
if max(_SCREAMING_SNAKE_CASE ) >= 366:
raise ValueError('''All days elements should be less than 366''' )
_UpperCAmelCase = set(_SCREAMING_SNAKE_CASE )
@functools.cache
def dynamic_programming(_SCREAMING_SNAKE_CASE : int ) -> int:
if index > 365:
return 0
if index not in days_set:
return dynamic_programming(index + 1 )
return min(
costs[0] + dynamic_programming(index + 1 ) , costs[1] + dynamic_programming(index + 7 ) , costs[2] + dynamic_programming(index + 30 ) , )
return dynamic_programming(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 95
| 1
|
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import BatchEncoding, PreTrainedTokenizer
from ...utils import logging
lowerCamelCase =logging.get_logger(__name__)
lowerCamelCase ='''▁'''
lowerCamelCase ={
'''vocab_file''': '''vocab.json''',
'''spm_file''': '''sentencepiece.bpe.model''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
lowerCamelCase ={
'''vocab_file''': {
'''facebook/m2m100_418M''': '''https://huggingface.co/facebook/m2m100_418M/resolve/main/vocab.json''',
'''facebook/m2m100_1.2B''': '''https://huggingface.co/facebook/m2m100_1.2B/resolve/main/vocab.json''',
},
'''spm_file''': {
'''facebook/m2m100_418M''': '''https://huggingface.co/facebook/m2m100_418M/resolve/main/sentencepiece.bpe.model''',
'''facebook/m2m100_1.2B''': '''https://huggingface.co/facebook/m2m100_1.2B/resolve/main/sentencepiece.bpe.model''',
},
'''tokenizer_config_file''': {
'''facebook/m2m100_418M''': '''https://huggingface.co/facebook/m2m100_418M/resolve/main/tokenizer_config.json''',
'''facebook/m2m100_1.2B''': '''https://huggingface.co/facebook/m2m100_1.2B/resolve/main/tokenizer_config.json''',
},
}
lowerCamelCase ={
'''facebook/m2m100_418M''': 1_0_2_4,
}
# fmt: off
lowerCamelCase ={
'''m2m100''': ['''af''', '''am''', '''ar''', '''ast''', '''az''', '''ba''', '''be''', '''bg''', '''bn''', '''br''', '''bs''', '''ca''', '''ceb''', '''cs''', '''cy''', '''da''', '''de''', '''el''', '''en''', '''es''', '''et''', '''fa''', '''ff''', '''fi''', '''fr''', '''fy''', '''ga''', '''gd''', '''gl''', '''gu''', '''ha''', '''he''', '''hi''', '''hr''', '''ht''', '''hu''', '''hy''', '''id''', '''ig''', '''ilo''', '''is''', '''it''', '''ja''', '''jv''', '''ka''', '''kk''', '''km''', '''kn''', '''ko''', '''lb''', '''lg''', '''ln''', '''lo''', '''lt''', '''lv''', '''mg''', '''mk''', '''ml''', '''mn''', '''mr''', '''ms''', '''my''', '''ne''', '''nl''', '''no''', '''ns''', '''oc''', '''or''', '''pa''', '''pl''', '''ps''', '''pt''', '''ro''', '''ru''', '''sd''', '''si''', '''sk''', '''sl''', '''so''', '''sq''', '''sr''', '''ss''', '''su''', '''sv''', '''sw''', '''ta''', '''th''', '''tl''', '''tn''', '''tr''', '''uk''', '''ur''', '''uz''', '''vi''', '''wo''', '''xh''', '''yi''', '''yo''', '''zh''', '''zu'''],
'''wmt21''': ['''en''', '''ha''', '''is''', '''ja''', '''cs''', '''ru''', '''zh''', '''de''']
}
class _lowerCamelCase ( UpperCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ = ["input_ids", "attention_mask"]
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = []
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE="<s>" , __SCREAMING_SNAKE_CASE="</s>" , __SCREAMING_SNAKE_CASE="</s>" , __SCREAMING_SNAKE_CASE="<pad>" , __SCREAMING_SNAKE_CASE="<unk>" , __SCREAMING_SNAKE_CASE="m2m100" , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE=8 , **__SCREAMING_SNAKE_CASE , ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase__ : Optional[int] = {} if sp_model_kwargs is None else sp_model_kwargs
UpperCamelCase__ : Optional[Any] = language_codes
UpperCamelCase__ : str = FAIRSEQ_LANGUAGE_CODES[language_codes]
UpperCamelCase__ : int = {lang_code: F'''__{lang_code}__''' for lang_code in fairseq_language_code}
UpperCamelCase__ : Any = kwargs.get('''additional_special_tokens''' , [] )
kwargs["additional_special_tokens"] += [
self.get_lang_token(__SCREAMING_SNAKE_CASE )
for lang_code in fairseq_language_code
if self.get_lang_token(__SCREAMING_SNAKE_CASE ) not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=__SCREAMING_SNAKE_CASE , tgt_lang=__SCREAMING_SNAKE_CASE , bos_token=__SCREAMING_SNAKE_CASE , eos_token=__SCREAMING_SNAKE_CASE , sep_token=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , language_codes=__SCREAMING_SNAKE_CASE , sp_model_kwargs=self.sp_model_kwargs , num_madeup_words=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
UpperCamelCase__ : Union[str, Any] = vocab_file
UpperCamelCase__ : Union[str, Any] = load_json(__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Optional[Any] = {v: k for k, v in self.encoder.items()}
UpperCamelCase__ : Union[str, Any] = spm_file
UpperCamelCase__ : Optional[int] = load_spm(__SCREAMING_SNAKE_CASE , self.sp_model_kwargs )
UpperCamelCase__ : Any = len(self.encoder )
UpperCamelCase__ : Any = {
self.get_lang_token(__SCREAMING_SNAKE_CASE ): self.encoder_size + i for i, lang_code in enumerate(__SCREAMING_SNAKE_CASE )
}
UpperCamelCase__ : Dict = {lang_code: self.encoder_size + i for i, lang_code in enumerate(__SCREAMING_SNAKE_CASE )}
UpperCamelCase__ : List[str] = {v: k for k, v in self.lang_token_to_id.items()}
UpperCamelCase__ : List[str] = src_lang if src_lang is not None else '''en'''
UpperCamelCase__ : Any = tgt_lang
UpperCamelCase__ : Optional[Any] = self.get_lang_id(self._src_lang )
self.set_src_lang_special_tokens(self._src_lang )
UpperCamelCase__ : Tuple = num_madeup_words
@property
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
"""simple docstring"""
return len(self.encoder ) + len(self.lang_token_to_id )
@property
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
"""simple docstring"""
return self._src_lang
@src_lang.setter
def __SCREAMING_SNAKE_CASE ( self , __SCREAMING_SNAKE_CASE ) -> Tuple:
"""simple docstring"""
UpperCamelCase__ : List[str] = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __SCREAMING_SNAKE_CASE ( self , __SCREAMING_SNAKE_CASE ) -> Optional[Any]:
"""simple docstring"""
return self.sp_model.encode(__SCREAMING_SNAKE_CASE , out_type=__SCREAMING_SNAKE_CASE )
def __SCREAMING_SNAKE_CASE ( self , __SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
if token in self.lang_token_to_id:
return self.lang_token_to_id[token]
return self.encoder.get(__SCREAMING_SNAKE_CASE , self.encoder[self.unk_token] )
def __SCREAMING_SNAKE_CASE ( self , __SCREAMING_SNAKE_CASE ) -> List[Any]:
"""simple docstring"""
if index in self.id_to_lang_token:
return self.id_to_lang_token[index]
return self.decoder.get(__SCREAMING_SNAKE_CASE , self.unk_token )
def __SCREAMING_SNAKE_CASE ( self , __SCREAMING_SNAKE_CASE ) -> List[str]:
"""simple docstring"""
UpperCamelCase__ : Union[str, Any] = []
UpperCamelCase__ : Optional[int] = ''''''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(__SCREAMING_SNAKE_CASE ) + token
UpperCamelCase__ : Optional[Any] = []
else:
current_sub_tokens.append(__SCREAMING_SNAKE_CASE )
out_string += self.sp_model.decode(__SCREAMING_SNAKE_CASE )
return out_string.strip()
def __SCREAMING_SNAKE_CASE ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = False ) -> Union[str, Any]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__SCREAMING_SNAKE_CASE , token_ids_a=__SCREAMING_SNAKE_CASE , already_has_special_tokens=__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Optional[int] = [1] * len(self.prefix_tokens )
UpperCamelCase__ : str = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(__SCREAMING_SNAKE_CASE )) + suffix_ones
return prefix_ones + ([0] * len(__SCREAMING_SNAKE_CASE )) + ([0] * len(__SCREAMING_SNAKE_CASE )) + suffix_ones
def __SCREAMING_SNAKE_CASE ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ) -> Any:
"""simple docstring"""
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase__ : Dict = {self.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase__ : Any = self.__dict__.copy()
UpperCamelCase__ : Dict = None
return state
def __setstate__( self , __SCREAMING_SNAKE_CASE ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase__ : Optional[int] = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
UpperCamelCase__ : str = {}
UpperCamelCase__ : Union[str, Any] = load_spm(self.spm_file , self.sp_model_kwargs )
def __SCREAMING_SNAKE_CASE ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ) -> Dict:
"""simple docstring"""
UpperCamelCase__ : Union[str, Any] = Path(__SCREAMING_SNAKE_CASE )
if not save_dir.is_dir():
raise OSError(F'''{save_directory} should be a directory''' )
UpperCamelCase__ : Optional[int] = save_dir / (
(filename_prefix + '''-''' if filename_prefix else '''''') + self.vocab_files_names['''vocab_file''']
)
UpperCamelCase__ : Optional[int] = save_dir / (
(filename_prefix + '''-''' if filename_prefix else '''''') + self.vocab_files_names['''spm_file''']
)
save_json(self.encoder , __SCREAMING_SNAKE_CASE )
if os.path.abspath(self.spm_file ) != os.path.abspath(__SCREAMING_SNAKE_CASE ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , __SCREAMING_SNAKE_CASE )
elif not os.path.isfile(self.spm_file ):
with open(__SCREAMING_SNAKE_CASE , '''wb''' ) as fi:
UpperCamelCase__ : Tuple = self.sp_model.serialized_model_proto()
fi.write(__SCREAMING_SNAKE_CASE )
return (str(__SCREAMING_SNAKE_CASE ), str(__SCREAMING_SNAKE_CASE ))
def __SCREAMING_SNAKE_CASE ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = "en" , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = "ro" , **__SCREAMING_SNAKE_CASE , ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase__ : Any = src_lang
UpperCamelCase__ : Optional[Any] = tgt_lang
self.set_src_lang_special_tokens(self.src_lang )
return super().prepare_seqaseq_batch(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def __SCREAMING_SNAKE_CASE ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) -> List[str]:
"""simple docstring"""
if src_lang is None or tgt_lang is None:
raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' )
UpperCamelCase__ : Optional[int] = src_lang
UpperCamelCase__ : Dict = self(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : List[Any] = self.get_lang_id(__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Any = tgt_lang_id
return inputs
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
"""simple docstring"""
self.set_src_lang_special_tokens(self.src_lang )
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
"""simple docstring"""
self.set_tgt_lang_special_tokens(self.tgt_lang )
def __SCREAMING_SNAKE_CASE ( self , __SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase__ : Optional[Any] = self.get_lang_token(__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : str = self.lang_token_to_id[lang_token]
UpperCamelCase__ : Tuple = [self.cur_lang_id]
UpperCamelCase__ : Optional[int] = [self.eos_token_id]
def __SCREAMING_SNAKE_CASE ( self , __SCREAMING_SNAKE_CASE ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase__ : Tuple = self.get_lang_token(__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Union[str, Any] = self.lang_token_to_id[lang_token]
UpperCamelCase__ : Optional[Any] = [self.cur_lang_id]
UpperCamelCase__ : Any = [self.eos_token_id]
def __SCREAMING_SNAKE_CASE ( self , __SCREAMING_SNAKE_CASE ) -> Tuple:
"""simple docstring"""
return self.lang_code_to_token[lang]
def __SCREAMING_SNAKE_CASE ( self , __SCREAMING_SNAKE_CASE ) -> Tuple:
"""simple docstring"""
UpperCamelCase__ : Optional[int] = self.get_lang_token(__SCREAMING_SNAKE_CASE )
return self.lang_token_to_id[lang_token]
def SCREAMING_SNAKE_CASE_ ( UpperCamelCase__ , UpperCamelCase__ ):
UpperCamelCase__ : Dict = sentencepiece.SentencePieceProcessor(**__lowercase )
spm.Load(str(__lowercase ) )
return spm
def SCREAMING_SNAKE_CASE_ ( UpperCamelCase__ ):
with open(__lowercase , '''r''' ) as f:
return json.load(__lowercase )
def SCREAMING_SNAKE_CASE_ ( UpperCamelCase__ , UpperCamelCase__ ):
with open(__lowercase , '''w''' ) as f:
json.dump(__lowercase , __lowercase , indent=2 )
| 285
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
_lowerCamelCase : Tuple = {
'''microsoft/resnet-50''': '''https://huggingface.co/microsoft/resnet-50/blob/main/config.json''',
}
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ,UpperCAmelCase ):
'''simple docstring'''
_UpperCAmelCase : List[Any] = "resnet"
_UpperCAmelCase : Any = ["basic", "bottleneck"]
def __init__( self : Union[str, Any] , lowercase : Dict=3 , lowercase : Any=64 , lowercase : Any=[256, 512, 1_024, 2_048] , lowercase : Dict=[3, 4, 6, 3] , lowercase : Any="bottleneck" , lowercase : Optional[Any]="relu" , lowercase : Dict=False , lowercase : str=None , lowercase : Tuple=None , **lowercase : List[Any] , ):
'''simple docstring'''
super().__init__(**lowercase )
if layer_type not in self.layer_types:
raise ValueError(f'''layer_type={layer_type} is not one of {','.join(self.layer_types )}''' )
_snake_case = num_channels
_snake_case = embedding_size
_snake_case = hidden_sizes
_snake_case = depths
_snake_case = layer_type
_snake_case = hidden_act
_snake_case = downsample_in_first_stage
_snake_case = ['stem'] + [f'''stage{idx}''' for idx in range(1 , len(lowercase ) + 1 )]
_snake_case , _snake_case = get_aligned_output_features_output_indices(
out_features=lowercase , out_indices=lowercase , stage_names=self.stage_names )
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
_UpperCAmelCase : Any = version.parse("1.11" )
@property
def A ( self : int ):
'''simple docstring'''
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def A ( self : Optional[Any] ):
'''simple docstring'''
return 1E-3
| 686
| 0
|
"""simple docstring"""
import json
import os
import shutil
import warnings
from argparse import ArgumentParser, Namespace
from pathlib import Path
from typing import List
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from cookiecutter.main import cookiecutter
_lowerCamelCase : Optional[Any] = True
except ImportError:
_lowerCamelCase : str = False
_lowerCamelCase : Any = logging.get_logger(__name__) # pylint: disable=invalid-name
def lowercase_ ( _UpperCAmelCase ):
"""simple docstring"""
return AddNewModelCommand(args.testing , args.testing_file , path=args.path )
class lowercase ( __UpperCAmelCase):
@staticmethod
def a_ ( _lowerCamelCase : ArgumentParser ):
"""simple docstring"""
A_ : int = parser.add_parser('''add-new-model''' )
add_new_model_parser.add_argument('''--testing''' , action='''store_true''' , help='''If in testing mode.''' )
add_new_model_parser.add_argument('''--testing_file''' , type=_lowerCamelCase , help='''Configuration file on which to run.''' )
add_new_model_parser.add_argument(
'''--path''' , type=_lowerCamelCase , help='''Path to cookiecutter. Should only be used for testing purposes.''' )
add_new_model_parser.set_defaults(func=_lowerCamelCase )
def __init__( self : Any , _lowerCamelCase : bool , _lowerCamelCase : str , _lowerCamelCase : Tuple=None , *_lowerCamelCase : Tuple ):
"""simple docstring"""
A_ : str = testing
A_ : List[Any] = testing_file
A_ : str = path
def a_ ( self : Tuple ):
"""simple docstring"""
warnings.warn(
'''The command `transformers-cli add-new-model` is deprecated and will be removed in v5 of Transformers. '''
'''It is not actively maintained anymore, so might give a result that won\'t pass all tests and quality '''
'''checks, you should use `transformers-cli add-new-model-like` instead.''' )
if not _has_cookiecutter:
raise ImportError(
'''Model creation dependencies are required to use the `add_new_model` command. Install them by running '''
'''the following at the root of your `transformers` clone:\n\n\t$ pip install -e .[modelcreation]\n''' )
# Ensure that there is no other `cookiecutter-template-xxx` directory in the current working directory
A_ : str = [directory for directory in os.listdir() if '''cookiecutter-template-''' == directory[:22]]
if len(_lowerCamelCase ) > 0:
raise ValueError(
'''Several directories starting with `cookiecutter-template-` in current working directory. '''
'''Please clean your directory by removing all folders starting with `cookiecutter-template-` or '''
'''change your working directory.''' )
A_ : List[Any] = (
Path(_lowerCamelCase ).parent.parent.parent.parent if self._path is None else Path(self._path ).parent.parent
)
A_ : Tuple = path_to_transformer_root / '''templates''' / '''adding_a_new_model'''
# Execute cookiecutter
if not self._testing:
cookiecutter(str(_lowerCamelCase ) )
else:
with open(self._testing_file , '''r''' ) as configuration_file:
A_ : Dict = json.load(_lowerCamelCase )
cookiecutter(
str(path_to_cookiecutter if self._path is None else self._path ) , no_input=_lowerCamelCase , extra_context=_lowerCamelCase , )
A_ : Optional[Any] = [directory for directory in os.listdir() if '''cookiecutter-template-''' in directory[:22]][0]
# Retrieve configuration
with open(directory + '''/configuration.json''' , '''r''' ) as configuration_file:
A_ : str = json.load(_lowerCamelCase )
A_ : Any = configuration['''lowercase_modelname''']
A_ : Dict = configuration['''generate_tensorflow_pytorch_and_flax''']
os.remove(F"""{directory}/configuration.json""" )
A_ : Tuple = '''PyTorch''' in generate_tensorflow_pytorch_and_flax
A_ : Dict = '''TensorFlow''' in generate_tensorflow_pytorch_and_flax
A_ : int = '''Flax''' in generate_tensorflow_pytorch_and_flax
A_ : Optional[Any] = F"""{path_to_transformer_root}/src/transformers/models/{lowercase_model_name}"""
os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase )
os.makedirs(F"""{path_to_transformer_root}/tests/models/{lowercase_model_name}""" , exist_ok=_lowerCamelCase )
# Tests require submodules as they have parent imports
with open(F"""{path_to_transformer_root}/tests/models/{lowercase_model_name}/__init__.py""" , '''w''' ):
pass
shutil.move(
F"""{directory}/__init__.py""" , F"""{model_dir}/__init__.py""" , )
shutil.move(
F"""{directory}/configuration_{lowercase_model_name}.py""" , F"""{model_dir}/configuration_{lowercase_model_name}.py""" , )
def remove_copy_lines(_lowerCamelCase : Optional[int] ):
with open(_lowerCamelCase , '''r''' ) as f:
A_ : List[Any] = f.readlines()
with open(_lowerCamelCase , '''w''' ) as f:
for line in lines:
if "# Copied from transformers." not in line:
f.write(_lowerCamelCase )
if output_pytorch:
if not self._testing:
remove_copy_lines(F"""{directory}/modeling_{lowercase_model_name}.py""" )
shutil.move(
F"""{directory}/modeling_{lowercase_model_name}.py""" , F"""{model_dir}/modeling_{lowercase_model_name}.py""" , )
shutil.move(
F"""{directory}/test_modeling_{lowercase_model_name}.py""" , F"""{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_{lowercase_model_name}.py""" , )
else:
os.remove(F"""{directory}/modeling_{lowercase_model_name}.py""" )
os.remove(F"""{directory}/test_modeling_{lowercase_model_name}.py""" )
if output_tensorflow:
if not self._testing:
remove_copy_lines(F"""{directory}/modeling_tf_{lowercase_model_name}.py""" )
shutil.move(
F"""{directory}/modeling_tf_{lowercase_model_name}.py""" , F"""{model_dir}/modeling_tf_{lowercase_model_name}.py""" , )
shutil.move(
F"""{directory}/test_modeling_tf_{lowercase_model_name}.py""" , F"""{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_tf_{lowercase_model_name}.py""" , )
else:
os.remove(F"""{directory}/modeling_tf_{lowercase_model_name}.py""" )
os.remove(F"""{directory}/test_modeling_tf_{lowercase_model_name}.py""" )
if output_flax:
if not self._testing:
remove_copy_lines(F"""{directory}/modeling_flax_{lowercase_model_name}.py""" )
shutil.move(
F"""{directory}/modeling_flax_{lowercase_model_name}.py""" , F"""{model_dir}/modeling_flax_{lowercase_model_name}.py""" , )
shutil.move(
F"""{directory}/test_modeling_flax_{lowercase_model_name}.py""" , F"""{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_flax_{lowercase_model_name}.py""" , )
else:
os.remove(F"""{directory}/modeling_flax_{lowercase_model_name}.py""" )
os.remove(F"""{directory}/test_modeling_flax_{lowercase_model_name}.py""" )
shutil.move(
F"""{directory}/{lowercase_model_name}.md""" , F"""{path_to_transformer_root}/docs/source/en/model_doc/{lowercase_model_name}.md""" , )
shutil.move(
F"""{directory}/tokenization_{lowercase_model_name}.py""" , F"""{model_dir}/tokenization_{lowercase_model_name}.py""" , )
shutil.move(
F"""{directory}/tokenization_fast_{lowercase_model_name}.py""" , F"""{model_dir}/tokenization_{lowercase_model_name}_fast.py""" , )
from os import fdopen, remove
from shutil import copymode, move
from tempfile import mkstemp
def replace(_lowerCamelCase : str , _lowerCamelCase : str , _lowerCamelCase : List[str] ):
# Create temp file
A_ : Union[str, Any] = mkstemp()
A_ : List[Any] = False
with fdopen(_lowerCamelCase , '''w''' ) as new_file:
with open(_lowerCamelCase ) as old_file:
for line in old_file:
new_file.write(_lowerCamelCase )
if line_to_copy_below in line:
A_ : Union[str, Any] = True
for line_to_copy in lines_to_copy:
new_file.write(_lowerCamelCase )
if not line_found:
raise ValueError(F"""Line {line_to_copy_below} was not found in file.""" )
# Copy the file permissions from the old file to the new file
copymode(_lowerCamelCase , _lowerCamelCase )
# Remove original file
remove(_lowerCamelCase )
# Move new file
move(_lowerCamelCase , _lowerCamelCase )
def skip_units(_lowerCamelCase : str ):
return (
("generating PyTorch" in line and not output_pytorch)
or ("generating TensorFlow" in line and not output_tensorflow)
or ("generating Flax" in line and not output_flax)
)
def replace_in_files(_lowerCamelCase : Any ):
with open(_lowerCamelCase ) as datafile:
A_ : Optional[Any] = []
A_ : Optional[int] = False
A_ : str = False
for line in datafile:
if "# To replace in: " in line and "##" not in line:
A_ : Dict = line.split('''"''' )[1]
A_ : str = skip_units(_lowerCamelCase )
elif "# Below: " in line and "##" not in line:
A_ : Optional[int] = line.split('''"''' )[1]
A_ : Any = skip_units(_lowerCamelCase )
elif "# End." in line and "##" not in line:
if not skip_file and not skip_snippet:
replace(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
A_ : int = []
elif "# Replace with" in line and "##" not in line:
A_ : Any = []
elif "##" not in line:
lines_to_copy.append(_lowerCamelCase )
remove(_lowerCamelCase )
replace_in_files(F"""{directory}/to_replace_{lowercase_model_name}.py""" )
os.rmdir(_lowerCamelCase )
| 706
|
"""simple docstring"""
def lowercase_ ( _UpperCAmelCase ):
"""simple docstring"""
A_ : int = len(_UpperCAmelCase )
for i in range(length - 1 ):
A_ : str = i
for k in range(i + 1 , _UpperCAmelCase ):
if collection[k] < collection[least]:
A_ : Tuple = k
if least != i:
A_ , A_ : Optional[int] = (collection[i], collection[least])
return collection
if __name__ == "__main__":
_lowerCamelCase : Optional[int] = input('Enter numbers separated by a comma:\n').strip()
_lowerCamelCase : List[str] = [int(item) for item in user_input.split(',')]
print(selection_sort(unsorted))
| 361
| 0
|
import doctest
from collections import deque
import numpy as np
class a :
"""simple docstring"""
def __init__( self : List[str] ) -> None:
__snake_case : List[Any] = [2, 1, 2, -1]
__snake_case : Union[str, Any] = [1, 2, 3, 4]
def __snake_case ( self : int ) -> list[float]:
__snake_case : int = len(self.first_signal )
__snake_case : Union[str, Any] = len(self.second_signal )
__snake_case : Any = max(lowerCamelCase , lowerCamelCase )
# create a zero matrix of max_length x max_length
__snake_case : str = [[0] * max_length for i in range(lowerCamelCase )]
# fills the smaller signal with zeros to make both signals of same length
if length_first_signal < length_second_signal:
self.first_signal += [0] * (max_length - length_first_signal)
elif length_first_signal > length_second_signal:
self.second_signal += [0] * (max_length - length_second_signal)
for i in range(lowerCamelCase ):
__snake_case : Tuple = deque(self.second_signal )
rotated_signal.rotate(lowerCamelCase )
for j, item in enumerate(lowerCamelCase ):
matrix[i][j] += item
# multiply the matrix with the first signal
__snake_case : Optional[int] = np.matmul(np.transpose(lowerCamelCase ) , np.transpose(self.first_signal ) )
# rounding-off to two decimal places
return [round(lowerCamelCase , 2 ) for i in final_signal]
if __name__ == "__main__":
doctest.testmod()
| 81
|
"""simple docstring"""
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__snake_case : Tuple = {
'configuration_autoformer': [
'AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'AutoformerConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Optional[Any] = [
'AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'AutoformerForPrediction',
'AutoformerModel',
'AutoformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_autoformer import (
AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_autoformer import (
AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
AutoformerForPrediction,
AutoformerModel,
AutoformerPreTrainedModel,
)
else:
import sys
__snake_case : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 293
| 0
|
# This code is adapted from OpenAI's release
# https://github.com/openai/human-eval/blob/master/human_eval/execution.py
import contextlib
import faulthandler
import io
import multiprocessing
import os
import platform
import signal
import tempfile
def UpperCamelCase__( UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Dict )->Optional[Any]:
A__ = multiprocessing.Manager()
A__ = manager.list()
A__ = multiprocessing.Process(target=a__ , args=(check_program, result, timeout) )
p.start()
p.join(timeout=timeout + 1 )
if p.is_alive():
p.kill()
if not result:
result.append('''timed out''' )
return {
"task_id": task_id,
"passed": result[0] == "passed",
"result": result[0],
"completion_id": completion_id,
}
def UpperCamelCase__( UpperCamelCase__ : Any , UpperCamelCase__ : str , UpperCamelCase__ : Dict )->Tuple:
with create_tempdir():
# These system calls are needed when cleaning up tempdir.
import os
import shutil
A__ = shutil.rmtree
A__ = os.rmdir
A__ = os.chdir
# Disable functionalities that can make destructive changes to the test.
reliability_guard()
# Run program.
try:
A__ = {}
with swallow_io():
with time_limit(a__ ):
exec(a__ , a__ )
result.append('''passed''' )
except TimeoutException:
result.append('''timed out''' )
except BaseException as e:
result.append(f"failed: {e}" )
# Needed for cleaning up.
A__ = rmtree
A__ = rmdir
A__ = chdir
@contextlib.contextmanager
def UpperCamelCase__( UpperCamelCase__ : int )->List[str]:
def signal_handler(UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Tuple ):
raise TimeoutException('''Timed out!''' )
signal.setitimer(signal.ITIMER_REAL , a__ )
signal.signal(signal.SIGALRM , a__ )
try:
yield
finally:
signal.setitimer(signal.ITIMER_REAL , 0 )
@contextlib.contextmanager
def UpperCamelCase__( )->Dict:
A__ = WriteOnlyStringIO()
with contextlib.redirect_stdout(a__ ):
with contextlib.redirect_stderr(a__ ):
with redirect_stdin(a__ ):
yield
@contextlib.contextmanager
def UpperCamelCase__( )->Dict:
with tempfile.TemporaryDirectory() as dirname:
with chdir(a__ ):
yield dirname
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase__ ):
pass
class SCREAMING_SNAKE_CASE__ ( io.StringIO ):
def UpperCamelCase ( self,*__lowerCamelCase,**__lowerCamelCase ):
raise OSError
def UpperCamelCase ( self,*__lowerCamelCase,**__lowerCamelCase ):
raise OSError
def UpperCamelCase ( self,*__lowerCamelCase,**__lowerCamelCase ):
raise OSError
def UpperCamelCase ( self,*__lowerCamelCase,**__lowerCamelCase ):
return False
class SCREAMING_SNAKE_CASE__ ( contextlib._RedirectStream ): # type: ignore
__SCREAMING_SNAKE_CASE = '''stdin'''
@contextlib.contextmanager
def UpperCamelCase__( UpperCamelCase__ : Tuple )->Union[str, Any]:
if root == ".":
yield
return
A__ = os.getcwd()
os.chdir(a__ )
try:
yield
except BaseException as exc:
raise exc
finally:
os.chdir(a__ )
def UpperCamelCase__( UpperCamelCase__ : List[str]=None )->Dict:
if maximum_memory_bytes is not None:
import resource
resource.setrlimit(resource.RLIMIT_AS , (maximum_memory_bytes, maximum_memory_bytes) )
resource.setrlimit(resource.RLIMIT_DATA , (maximum_memory_bytes, maximum_memory_bytes) )
if not platform.uname().system == "Darwin":
resource.setrlimit(resource.RLIMIT_STACK , (maximum_memory_bytes, maximum_memory_bytes) )
faulthandler.disable()
import builtins
A__ = None
A__ = None
import os
A__ = '''1'''
A__ = None
A__ = None
A__ = None
A__ = None
A__ = None
A__ = None
A__ = None
A__ = None
A__ = None
A__ = None
A__ = None
A__ = None
A__ = None
A__ = None
A__ = None
A__ = None
A__ = None
A__ = None
A__ = None
A__ = None
A__ = None
A__ = None
A__ = None
A__ = None
A__ = None
A__ = None
A__ = None
import shutil
A__ = None
A__ = None
A__ = None
import subprocess
A__ = None # type: ignore
A__ = None
import sys
A__ = None
A__ = None
A__ = None
A__ = None
A__ = None
| 716
|
def UpperCamelCase__( UpperCamelCase__ : int = 50 )->int:
A__ = [1] * (length + 1)
for row_length in range(3 , length + 1 ):
for block_length in range(3 , row_length + 1 ):
for block_start in range(row_length - block_length ):
ways_number[row_length] += ways_number[
row_length - block_start - block_length - 1
]
ways_number[row_length] += 1
return ways_number[length]
if __name__ == "__main__":
print(F"{solution() = }")
| 212
| 0
|
import argparse
import os
import re
import zipfile
import torch
from transformers import AutoTokenizer, GPTaConfig
def __lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=0 ) -> Optional[Any]:
'''simple docstring'''
if name is None:
lowerCamelCase__: Any = None
else:
lowerCamelCase__: List[str] = """.""" * max(0 , spaces - 2 ) + """# {:""" + str(50 - spaces ) + """s}"""
lowerCamelCase__: Optional[int] = fmt.format(_UpperCamelCase )
# Print and recurse (if needed).
if isinstance(_UpperCamelCase , _UpperCamelCase ):
if msg is not None:
print(_UpperCamelCase )
for k in val.keys():
recursive_print(_UpperCamelCase , val[k] , spaces + 2 )
elif isinstance(_UpperCamelCase , torch.Tensor ):
print(_UpperCamelCase , """:""" , val.size() )
else:
print(_UpperCamelCase , """:""" , _UpperCamelCase )
def __lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> List[Any]:
'''simple docstring'''
lowerCamelCase__: Optional[int] = param.size()
if checkpoint_version == 1.0:
# version 1.0 stores [num_heads * hidden_size * num_splits, :]
lowerCamelCase__: List[Any] = (num_heads, hidden_size, num_splits) + input_shape[1:]
lowerCamelCase__: Union[str, Any] = param.view(*_UpperCamelCase )
lowerCamelCase__: List[Any] = param.transpose(0 , 2 )
lowerCamelCase__: Union[str, Any] = param.transpose(1 , 2 ).contiguous()
elif checkpoint_version >= 2.0:
# other versions store [num_heads * num_splits * hidden_size, :]
lowerCamelCase__: List[str] = (num_heads, num_splits, hidden_size) + input_shape[1:]
lowerCamelCase__: Any = param.view(*_UpperCamelCase )
lowerCamelCase__: List[Any] = param.transpose(0 , 1 ).contiguous()
lowerCamelCase__: Dict = param.view(*_UpperCamelCase )
return param
def __lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Tuple:
'''simple docstring'''
lowerCamelCase__: Union[str, Any] = {}
# old versions did not store training args
lowerCamelCase__: str = input_state_dict.get("""args""" , _UpperCamelCase )
if ds_args is not None:
# do not make the user write a config file when the exact dimensions/sizes are already in the checkpoint
# from pprint import pprint
# pprint(vars(ds_args))
lowerCamelCase__: List[Any] = ds_args.padded_vocab_size
lowerCamelCase__: Optional[Any] = ds_args.max_position_embeddings
lowerCamelCase__: Any = ds_args.hidden_size
lowerCamelCase__: Dict = ds_args.num_layers
lowerCamelCase__: List[Any] = ds_args.num_attention_heads
lowerCamelCase__: Dict = ds_args.ffn_hidden_size
# pprint(config)
# The number of heads.
lowerCamelCase__: List[str] = config.n_head
# The hidden_size per head.
lowerCamelCase__: int = config.n_embd // config.n_head
# Megatron-LM checkpoint version
if "checkpoint_version" in input_state_dict.keys():
lowerCamelCase__: Any = input_state_dict["""checkpoint_version"""]
else:
lowerCamelCase__: Tuple = 0.0
# The model.
lowerCamelCase__: Tuple = input_state_dict["""model"""]
# The language model.
lowerCamelCase__: Dict = model["""language_model"""]
# The embeddings.
lowerCamelCase__: Optional[int] = lm["""embedding"""]
# The word embeddings.
lowerCamelCase__: Union[str, Any] = embeddings["""word_embeddings"""]["""weight"""]
# Truncate the embedding table to vocab_size rows.
lowerCamelCase__: List[str] = word_embeddings[: config.vocab_size, :]
lowerCamelCase__: Union[str, Any] = word_embeddings
# The position embeddings.
lowerCamelCase__: str = embeddings["""position_embeddings"""]["""weight"""]
# Read the causal mask dimension (seqlen). [max_sequence_length, hidden_size]
lowerCamelCase__: List[str] = pos_embeddings.size(0 )
if n_positions != config.n_positions:
raise ValueError(
f"""pos_embeddings.max_sequence_length={n_positions} and config.n_positions={config.n_positions} don\'t match""" )
# Store the position embeddings.
lowerCamelCase__: int = pos_embeddings
# The transformer.
lowerCamelCase__: List[Any] = lm["""transformer"""] if """transformer""" in lm.keys() else lm["""encoder"""]
# The regex to extract layer names.
lowerCamelCase__: List[Any] = re.compile(r"""layers\.(\d+)\.([a-z0-9_.]+)\.([a-z]+)""" )
# The simple map of names for "automated" rules.
lowerCamelCase__: int = {
"""attention.dense""": """.attn.c_proj.""",
"""self_attention.dense""": """.attn.c_proj.""",
"""mlp.dense_h_to_4h""": """.mlp.c_fc.""",
"""mlp.dense_4h_to_h""": """.mlp.c_proj.""",
}
# Extract the layers.
for key, val in transformer.items():
# Match the name.
lowerCamelCase__: Optional[Any] = layer_re.match(_UpperCamelCase )
# Stop if that's not a layer
if m is None:
break
# The index of the layer.
lowerCamelCase__: List[Any] = int(m.group(1 ) )
# The name of the operation.
lowerCamelCase__: Optional[Any] = m.group(2 )
# Is it a weight or a bias?
lowerCamelCase__: int = m.group(3 )
# The name of the layer.
lowerCamelCase__: List[str] = f"""transformer.h.{layer_idx}"""
# For layernorm(s), simply store the layer norm.
if op_name.endswith("""layernorm""" ):
lowerCamelCase__: str = """ln_1""" if op_name.startswith("""input""" ) else """ln_2"""
lowerCamelCase__: Union[str, Any] = val
# Transpose the QKV matrix.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "weight":
# Insert a tensor of 1x1xDxD bias.
lowerCamelCase__: List[Any] = torch.tril(torch.ones((n_positions, n_positions) , dtype=torch.floataa ) ).view(
1 , 1 , _UpperCamelCase , _UpperCamelCase )
lowerCamelCase__: Dict = causal_mask
# Insert a "dummy" tensor for masked_bias.
lowerCamelCase__: List[Any] = torch.tensor(-1E4 , dtype=torch.floataa )
lowerCamelCase__: int = masked_bias
lowerCamelCase__: Dict = fix_query_key_value_ordering(_UpperCamelCase , _UpperCamelCase , 3 , _UpperCamelCase , _UpperCamelCase )
# Megatron stores (3*D) x D but transformers-GPT2 expects D x 3*D.
lowerCamelCase__: Union[str, Any] = out_val.transpose(0 , 1 ).contiguous()
# Store.
lowerCamelCase__: Any = out_val
# Transpose the bias.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "bias":
lowerCamelCase__: Tuple = fix_query_key_value_ordering(_UpperCamelCase , _UpperCamelCase , 3 , _UpperCamelCase , _UpperCamelCase )
# Store. No change of shape.
lowerCamelCase__: Optional[Any] = out_val
# Transpose the weights.
elif weight_or_bias == "weight":
lowerCamelCase__: Dict = megatron_to_transformers[op_name]
lowerCamelCase__: Optional[int] = val.transpose(0 , 1 )
# Copy the bias.
elif weight_or_bias == "bias":
lowerCamelCase__: str = megatron_to_transformers[op_name]
lowerCamelCase__: Union[str, Any] = val
# DEBUG.
assert config.n_layer == layer_idx + 1
# The final layernorm.
lowerCamelCase__: Dict = transformer["""final_layernorm.weight"""]
lowerCamelCase__: List[str] = transformer["""final_layernorm.bias"""]
# For LM head, transformers' wants the matrix to weight embeddings.
lowerCamelCase__: Any = word_embeddings
# It should be done!
return output_state_dict
def __lowerCAmelCase ( ) -> List[str]:
'''simple docstring'''
lowerCamelCase__: Union[str, Any] = argparse.ArgumentParser()
parser.add_argument("""--print-checkpoint-structure""" , action="""store_true""" )
parser.add_argument(
"""path_to_checkpoint""" , type=_UpperCamelCase , help="""Path to the checkpoint file (.zip archive or direct .pt file)""" , )
parser.add_argument(
"""--config_file""" , default="""""" , type=_UpperCamelCase , help="""An optional config json file describing the pre-trained model.""" , )
lowerCamelCase__: str = parser.parse_args()
# Extract the basename.
lowerCamelCase__: Dict = os.path.dirname(args.path_to_checkpoint )
# Load the model.
# the .zip is very optional, let's keep it for backward compatibility
print(f"""Extracting PyTorch state dictionary from {args.path_to_checkpoint}""" )
if args.path_to_checkpoint.endswith(""".zip""" ):
with zipfile.ZipFile(args.path_to_checkpoint , """r""" ) as checkpoint:
with checkpoint.open("""release/mp_rank_00/model_optim_rng.pt""" ) as pytorch_dict:
lowerCamelCase__: List[Any] = torch.load(_UpperCamelCase , map_location="""cpu""" )
else:
lowerCamelCase__: str = torch.load(args.path_to_checkpoint , map_location="""cpu""" )
lowerCamelCase__: Optional[int] = input_state_dict.get("""args""" , _UpperCamelCase )
# Read the config, or default to the model released by NVIDIA.
if args.config_file == "":
if ds_args is not None:
if ds_args.bias_gelu_fusion:
lowerCamelCase__: List[Any] = """gelu_fast"""
elif ds_args.openai_gelu:
lowerCamelCase__: List[str] = """gelu_new"""
else:
lowerCamelCase__: Optional[Any] = """gelu"""
else:
# in the very early days this used to be "gelu_new"
lowerCamelCase__: Dict = """gelu_new"""
# Spell out all parameters in case the defaults change.
lowerCamelCase__: str = GPTaConfig(
vocab_size=50257 , n_positions=1024 , n_embd=1024 , n_layer=24 , n_head=16 , n_inner=4096 , activation_function=_UpperCamelCase , resid_pdrop=0.1 , embd_pdrop=0.1 , attn_pdrop=0.1 , layer_norm_epsilon=1E-5 , initializer_range=0.02 , summary_type="""cls_index""" , summary_use_proj=_UpperCamelCase , summary_activation=_UpperCamelCase , summary_proj_to_labels=_UpperCamelCase , summary_first_dropout=0.1 , scale_attn_weights=_UpperCamelCase , use_cache=_UpperCamelCase , bos_token_id=50256 , eos_token_id=50256 , )
else:
lowerCamelCase__: Dict = GPTaConfig.from_json_file(args.config_file )
lowerCamelCase__: Optional[int] = ["""GPT2LMHeadModel"""]
# Convert.
print("""Converting""" )
lowerCamelCase__: Dict = convert_megatron_checkpoint(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# Print the structure of converted state dict.
if args.print_checkpoint_structure:
recursive_print(_UpperCamelCase , _UpperCamelCase )
# Add tokenizer class info to config
# see https://github.com/huggingface/transformers/issues/13906)
if ds_args is not None:
lowerCamelCase__: Optional[Any] = ds_args.tokenizer_type
if tokenizer_type == "GPT2BPETokenizer":
lowerCamelCase__: Tuple = """gpt2"""
elif tokenizer_type == "PretrainedFromHF":
lowerCamelCase__: Any = ds_args.tokenizer_name_or_path
else:
raise ValueError(f"""Unrecognized tokenizer_type {tokenizer_type}""" )
else:
lowerCamelCase__: Any = """gpt2"""
lowerCamelCase__: List[str] = AutoTokenizer.from_pretrained(_UpperCamelCase )
lowerCamelCase__: Any = type(_UpperCamelCase ).__name__
lowerCamelCase__: str = tokenizer_class
# Store the config to file.
print("""Saving config""" )
config.save_pretrained(_UpperCamelCase )
# Save tokenizer based on args
print(f"""Adding {tokenizer_class} tokenizer files""" )
tokenizer.save_pretrained(_UpperCamelCase )
# Store the state_dict to file.
lowerCamelCase__: List[Any] = os.path.join(_UpperCamelCase , """pytorch_model.bin""" )
print(f"""Saving checkpoint to \"{output_checkpoint_file}\"""" )
torch.save(_UpperCamelCase , _UpperCamelCase )
####################################################################################################
if __name__ == "__main__":
main()
####################################################################################################
| 306
|
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A : str = logging.get_logger(__name__)
__A : Optional[Any] = {
'Salesforce/blip-vqa-base': 'https://huggingface.co/Salesforce/blip-vqa-base/resolve/main/config.json',
'Salesforce/blip-vqa-capfit-large': (
'https://huggingface.co/Salesforce/blip-vqa-base-capfit/resolve/main/config.json'
),
'Salesforce/blip-image-captioning-base': (
'https://huggingface.co/Salesforce/blip-image-captioning-base/resolve/main/config.json'
),
'Salesforce/blip-image-captioning-large': (
'https://huggingface.co/Salesforce/blip-image-captioning-large/resolve/main/config.json'
),
'Salesforce/blip-itm-base-coco': 'https://huggingface.co/Salesforce/blip-itm-base-coco/resolve/main/config.json',
'Salesforce/blip-itm-large-coco': 'https://huggingface.co/Salesforce/blip-itm-large-coco/resolve/main/config.json',
'Salesforce/blip-itm-base-flikr': 'https://huggingface.co/Salesforce/blip-itm-base-flikr/resolve/main/config.json',
'Salesforce/blip-itm-large-flikr': (
'https://huggingface.co/Salesforce/blip-itm-large-flikr/resolve/main/config.json'
),
}
class _UpperCamelCase ( _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE:Optional[Any] = 'blip_text_model'
def __init__( self , _a=3_0524 , _a=768 , _a=768 , _a=3072 , _a=768 , _a=12 , _a=8 , _a=512 , _a="gelu" , _a=1e-12 , _a=0.0 , _a=0.0 , _a=0.02 , _a=3_0522 , _a=2 , _a=0 , _a=102 , _a=True , _a=True , **_a , ):
"""simple docstring"""
super().__init__(
pad_token_id=_a , bos_token_id=_a , eos_token_id=_a , sep_token_id=_a , **_a , )
a__ = vocab_size
a__ = hidden_size
a__ = encoder_hidden_size
a__ = intermediate_size
a__ = projection_dim
a__ = hidden_dropout_prob
a__ = num_hidden_layers
a__ = num_attention_heads
a__ = max_position_embeddings
a__ = layer_norm_eps
a__ = hidden_act
a__ = initializer_range
a__ = attention_probs_dropout_prob
a__ = is_decoder
a__ = use_cache
@classmethod
def lowercase__ ( cls , _a , **_a ):
"""simple docstring"""
cls._set_token_in_kwargs(_a )
a__ , a__ = cls.get_config_dict(_a , **_a )
# get the text config dict if we are loading from BlipConfig
if config_dict.get('model_type' ) == "blip":
a__ = config_dict['text_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(_a , **_a )
class _UpperCamelCase ( _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE:str = 'blip_vision_model'
def __init__( self , _a=768 , _a=3072 , _a=512 , _a=12 , _a=12 , _a=384 , _a=16 , _a="gelu" , _a=1e-5 , _a=0.0 , _a=1e-10 , **_a , ):
"""simple docstring"""
super().__init__(**_a )
a__ = hidden_size
a__ = intermediate_size
a__ = projection_dim
a__ = num_hidden_layers
a__ = num_attention_heads
a__ = patch_size
a__ = image_size
a__ = initializer_range
a__ = attention_dropout
a__ = layer_norm_eps
a__ = hidden_act
@classmethod
def lowercase__ ( cls , _a , **_a ):
"""simple docstring"""
cls._set_token_in_kwargs(_a )
a__ , a__ = cls.get_config_dict(_a , **_a )
# get the vision config dict if we are loading from BlipConfig
if config_dict.get('model_type' ) == "blip":
a__ = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(_a , **_a )
class _UpperCamelCase ( _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE:Any = 'blip'
SCREAMING_SNAKE_CASE:List[str] = True
def __init__( self , _a=None , _a=None , _a=512 , _a=2.6592 , _a=256 , **_a , ):
"""simple docstring"""
super().__init__(**_a )
if text_config is None:
a__ = {}
logger.info('`text_config` is `None`. Initializing the `BlipTextConfig` with default values.' )
if vision_config is None:
a__ = {}
logger.info('`vision_config` is `None`. Initializing the `BlipVisionConfig` with default values.' )
a__ = BlipTextConfig(**_a )
a__ = BlipVisionConfig(**_a )
a__ = self.vision_config.hidden_size
a__ = projection_dim
a__ = logit_scale_init_value
a__ = 1.0
a__ = 0.02
a__ = image_text_hidden_size
@classmethod
def lowercase__ ( cls , _a , _a , **_a ):
"""simple docstring"""
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **_a )
def lowercase__ ( self ):
"""simple docstring"""
a__ = copy.deepcopy(self.__dict__ )
a__ = self.text_config.to_dict()
a__ = self.vision_config.to_dict()
a__ = self.__class__.model_type
return output
| 394
| 0
|
'''simple docstring'''
import math
def lowerCamelCase ( UpperCAmelCase__ : float , UpperCAmelCase__ : float ) -> float:
'''simple docstring'''
if initial_intensity < 0:
raise ValueError('The value of intensity cannot be negative' )
# handling of negative values of initial intensity
if angle < 0 or angle > 3_6_0:
raise ValueError('In Malus Law, the angle is in the range 0-360 degrees' )
# handling of values out of allowed range
return initial_intensity * (math.cos(math.radians(UpperCAmelCase__ ) ) ** 2)
if __name__ == "__main__":
import doctest
doctest.testmod(name='''malus_law''')
| 320
|
'''simple docstring'''
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
UpperCamelCase_ = logging.get_logger(__name__)
class _SCREAMING_SNAKE_CASE( _SCREAMING_SNAKE_CASE ):
A_ : Tuple = ['pixel_values']
def __init__( self : Optional[Any] , UpperCamelCase_ : bool = True , UpperCamelCase_ : Optional[Dict[str, int]] = None , UpperCamelCase_ : PILImageResampling = PILImageResampling.BILINEAR , UpperCamelCase_ : bool = True , UpperCamelCase_ : Dict[str, int] = None , UpperCamelCase_ : bool = True , UpperCamelCase_ : Union[int, float] = 1 / 2_55 , UpperCamelCase_ : bool = True , UpperCamelCase_ : Optional[Union[float, List[float]]] = None , UpperCamelCase_ : Optional[Union[float, List[float]]] = None , **UpperCamelCase_ : str , ) -> None:
super().__init__(**UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :Union[str, Any] = size if size is not None else {'shortest_edge': 2_56}
SCREAMING_SNAKE_CASE__ :Optional[int] = get_size_dict(UpperCamelCase_ , default_to_square=UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :Union[str, Any] = crop_size if crop_size is not None else {'height': 2_24, 'width': 2_24}
SCREAMING_SNAKE_CASE__ :Dict = get_size_dict(UpperCamelCase_ , param_name='crop_size' )
SCREAMING_SNAKE_CASE__ :Optional[int] = do_resize
SCREAMING_SNAKE_CASE__ :List[Any] = size
SCREAMING_SNAKE_CASE__ :str = resample
SCREAMING_SNAKE_CASE__ :Dict = do_center_crop
SCREAMING_SNAKE_CASE__ :List[Any] = crop_size
SCREAMING_SNAKE_CASE__ :List[str] = do_rescale
SCREAMING_SNAKE_CASE__ :List[Any] = rescale_factor
SCREAMING_SNAKE_CASE__ :int = do_normalize
SCREAMING_SNAKE_CASE__ :Tuple = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
SCREAMING_SNAKE_CASE__ :Optional[int] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __lowerCamelCase ( self : List[str] , UpperCamelCase_ : np.ndarray , UpperCamelCase_ : Dict[str, int] , UpperCamelCase_ : PILImageResampling = PILImageResampling.BICUBIC , UpperCamelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase_ : Optional[Any] , ) -> np.ndarray:
SCREAMING_SNAKE_CASE__ :Union[str, Any] = get_size_dict(UpperCamelCase_ , default_to_square=UpperCamelCase_ )
if "shortest_edge" not in size:
raise ValueError(f'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' )
SCREAMING_SNAKE_CASE__ :str = get_resize_output_image_size(UpperCamelCase_ , size=size['shortest_edge'] , default_to_square=UpperCamelCase_ )
return resize(UpperCamelCase_ , size=UpperCamelCase_ , resample=UpperCamelCase_ , data_format=UpperCamelCase_ , **UpperCamelCase_ )
def __lowerCamelCase ( self : Any , UpperCamelCase_ : np.ndarray , UpperCamelCase_ : Dict[str, int] , UpperCamelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase_ : str , ) -> np.ndarray:
SCREAMING_SNAKE_CASE__ :Dict = get_size_dict(UpperCamelCase_ )
if "height" not in size or "width" not in size:
raise ValueError(f'''The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}''' )
return center_crop(UpperCamelCase_ , size=(size['height'], size['width']) , data_format=UpperCamelCase_ , **UpperCamelCase_ )
def __lowerCamelCase ( self : Union[str, Any] , UpperCamelCase_ : np.ndarray , UpperCamelCase_ : float , UpperCamelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase_ : Any ) -> np.ndarray:
return rescale(UpperCamelCase_ , scale=UpperCamelCase_ , data_format=UpperCamelCase_ , **UpperCamelCase_ )
def __lowerCamelCase ( self : Dict , UpperCamelCase_ : np.ndarray , UpperCamelCase_ : Union[float, List[float]] , UpperCamelCase_ : Union[float, List[float]] , UpperCamelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase_ : int , ) -> np.ndarray:
return normalize(UpperCamelCase_ , mean=UpperCamelCase_ , std=UpperCamelCase_ , data_format=UpperCamelCase_ , **UpperCamelCase_ )
def __lowerCamelCase ( self : Optional[int] , UpperCamelCase_ : ImageInput , UpperCamelCase_ : Optional[bool] = None , UpperCamelCase_ : Dict[str, int] = None , UpperCamelCase_ : PILImageResampling = None , UpperCamelCase_ : bool = None , UpperCamelCase_ : Dict[str, int] = None , UpperCamelCase_ : Optional[bool] = None , UpperCamelCase_ : Optional[float] = None , UpperCamelCase_ : Optional[bool] = None , UpperCamelCase_ : Optional[Union[float, List[float]]] = None , UpperCamelCase_ : Optional[Union[float, List[float]]] = None , UpperCamelCase_ : Optional[Union[str, TensorType]] = None , UpperCamelCase_ : Union[str, ChannelDimension] = ChannelDimension.FIRST , **UpperCamelCase_ : Any , ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ :List[str] = do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE__ :int = size if size is not None else self.size
SCREAMING_SNAKE_CASE__ :int = get_size_dict(UpperCamelCase_ , default_to_square=UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :Union[str, Any] = resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE__ :Optional[Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
SCREAMING_SNAKE_CASE__ :Dict = crop_size if crop_size is not None else self.crop_size
SCREAMING_SNAKE_CASE__ :Optional[int] = get_size_dict(UpperCamelCase_ , param_name='crop_size' )
SCREAMING_SNAKE_CASE__ :List[str] = do_rescale if do_rescale is not None else self.do_rescale
SCREAMING_SNAKE_CASE__ :List[str] = rescale_factor if rescale_factor is not None else self.rescale_factor
SCREAMING_SNAKE_CASE__ :Any = do_normalize if do_normalize is not None else self.do_normalize
SCREAMING_SNAKE_CASE__ :List[str] = image_mean if image_mean is not None else self.image_mean
SCREAMING_SNAKE_CASE__ :Optional[int] = image_std if image_std is not None else self.image_std
SCREAMING_SNAKE_CASE__ :List[Any] = make_list_of_images(UpperCamelCase_ )
if not valid_images(UpperCamelCase_ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE__ :Optional[Any] = [to_numpy_array(UpperCamelCase_ ) for image in images]
if do_resize:
SCREAMING_SNAKE_CASE__ :List[Any] = [self.resize(image=UpperCamelCase_ , size=UpperCamelCase_ , resample=UpperCamelCase_ ) for image in images]
if do_center_crop:
SCREAMING_SNAKE_CASE__ :Union[str, Any] = [self.center_crop(image=UpperCamelCase_ , size=UpperCamelCase_ ) for image in images]
if do_rescale:
SCREAMING_SNAKE_CASE__ :List[str] = [self.rescale(image=UpperCamelCase_ , scale=UpperCamelCase_ ) for image in images]
if do_normalize:
SCREAMING_SNAKE_CASE__ :str = [self.normalize(image=UpperCamelCase_ , mean=UpperCamelCase_ , std=UpperCamelCase_ ) for image in images]
SCREAMING_SNAKE_CASE__ :Optional[Any] = [to_channel_dimension_format(UpperCamelCase_ , UpperCamelCase_ ) for image in images]
SCREAMING_SNAKE_CASE__ :Union[str, Any] = {'pixel_values': images}
return BatchFeature(data=UpperCamelCase_ , tensor_type=UpperCamelCase_ )
def __lowerCamelCase ( self : Optional[int] , UpperCamelCase_ : List[str] , UpperCamelCase_ : List[Tuple] = None ) -> str:
SCREAMING_SNAKE_CASE__ :Any = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(UpperCamelCase_ ) != len(UpperCamelCase_ ):
raise ValueError(
'Make sure that you pass in as many target sizes as the batch dimension of the logits' )
if is_torch_tensor(UpperCamelCase_ ):
SCREAMING_SNAKE_CASE__ :List[Any] = target_sizes.numpy()
SCREAMING_SNAKE_CASE__ :Any = []
for idx in range(len(UpperCamelCase_ ) ):
SCREAMING_SNAKE_CASE__ :Union[str, Any] = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='bilinear' , align_corners=UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :Tuple = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(UpperCamelCase_ )
else:
SCREAMING_SNAKE_CASE__ :List[str] = logits.argmax(dim=1 )
SCREAMING_SNAKE_CASE__ :Any = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 320
| 1
|
'''simple docstring'''
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
from .record_evaluation import evaluate as evaluate_record
_a : Dict = """\\n@article{wang2019superglue,\n title={SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems},\n author={Wang, Alex and Pruksachatkun, Yada and Nangia, Nikita and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R},\n journal={arXiv preprint arXiv:1905.00537},\n year={2019}\n}\n"""
_a : Tuple = """\\nSuperGLUE (https://super.gluebenchmark.com/) is a new benchmark styled after\nGLUE with a new set of more difficult language understanding tasks, improved\nresources, and a new public leaderboard.\n"""
_a : Optional[int] = """\nCompute SuperGLUE evaluation metric associated to each SuperGLUE dataset.\nArgs:\n predictions: list of predictions to score. Depending on the SuperGlUE subset:\n - for 'record': list of question-answer dictionaries with the following keys:\n - 'idx': index of the question as specified by the dataset\n - 'prediction_text': the predicted answer text\n - for 'multirc': list of question-answer dictionaries with the following keys:\n - 'idx': index of the question-answer pair as specified by the dataset\n - 'prediction': the predicted answer label\n - otherwise: list of predicted labels\n references: list of reference labels. Depending on the SuperGLUE subset:\n - for 'record': list of question-answers dictionaries with the following keys:\n - 'idx': index of the question as specified by the dataset\n - 'answers': list of possible answers\n - otherwise: list of reference labels\nReturns: depending on the SuperGLUE subset:\n - for 'record':\n - 'exact_match': Exact match between answer and gold answer\n - 'f1': F1 score\n - for 'multirc':\n - 'exact_match': Exact match between answer and gold answer\n - 'f1_m': Per-question macro-F1 score\n - 'f1_a': Average F1 score over all answers\n - for 'axb':\n 'matthews_correlation': Matthew Correlation\n - for 'cb':\n - 'accuracy': Accuracy\n - 'f1': F1 score\n - for all others:\n - 'accuracy': Accuracy\nExamples:\n\n >>> super_glue_metric = datasets.load_metric('super_glue', 'copa') # any of [\"copa\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"boolq\", \"axg\"]\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'accuracy': 1.0}\n\n >>> super_glue_metric = datasets.load_metric('super_glue', 'cb')\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'accuracy': 1.0, 'f1': 1.0}\n\n >>> super_glue_metric = datasets.load_metric('super_glue', 'record')\n >>> predictions = [{'idx': {'passage': 0, 'query': 0}, 'prediction_text': 'answer'}]\n >>> references = [{'idx': {'passage': 0, 'query': 0}, 'answers': ['answer', 'another_answer']}]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'exact_match': 1.0, 'f1': 1.0}\n\n >>> super_glue_metric = datasets.load_metric('super_glue', 'multirc')\n >>> predictions = [{'idx': {'answer': 0, 'paragraph': 0, 'question': 0}, 'prediction': 0}, {'idx': {'answer': 1, 'paragraph': 2, 'question': 3}, 'prediction': 1}]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'exact_match': 1.0, 'f1_m': 1.0, 'f1_a': 1.0}\n\n >>> super_glue_metric = datasets.load_metric('super_glue', 'axb')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'matthews_correlation': 1.0}\n"""
def _lowerCAmelCase ( lowercase , lowercase ) -> List[Any]:
return float((preds == labels).mean() )
def _lowerCAmelCase ( lowercase , lowercase , lowercase="binary" ) -> Dict:
__lowerCAmelCase = simple_accuracy(UpperCamelCase_ , UpperCamelCase_ )
__lowerCAmelCase = float(fa_score(y_true=UpperCamelCase_ , y_pred=UpperCamelCase_ , average=UpperCamelCase_ ) )
return {
"accuracy": acc,
"f1": fa,
}
def _lowerCAmelCase ( lowercase , lowercase ) -> Any:
__lowerCAmelCase = {}
for id_pred, label in zip(UpperCamelCase_ , UpperCamelCase_ ):
__lowerCAmelCase = f'{id_pred["idx"]["paragraph"]}-{id_pred["idx"]["question"]}'
__lowerCAmelCase = id_pred["""prediction"""]
if question_id in question_map:
question_map[question_id].append((pred, label) )
else:
__lowerCAmelCase = [(pred, label)]
__lowerCAmelCase , __lowerCAmelCase = [], []
for question, preds_labels in question_map.items():
__lowerCAmelCase , __lowerCAmelCase = zip(*UpperCamelCase_ )
__lowerCAmelCase = fa_score(y_true=UpperCamelCase_ , y_pred=UpperCamelCase_ , average="""macro""" )
fas.append(UpperCamelCase_ )
__lowerCAmelCase = int(sum(pred == label for pred, label in preds_labels ) == len(UpperCamelCase_ ) )
ems.append(UpperCamelCase_ )
__lowerCAmelCase = float(sum(UpperCamelCase_ ) / len(UpperCamelCase_ ) )
__lowerCAmelCase = sum(UpperCamelCase_ ) / len(UpperCamelCase_ )
__lowerCAmelCase = float(fa_score(y_true=UpperCamelCase_ , y_pred=[id_pred["""prediction"""] for id_pred in ids_preds] ) )
return {"exact_match": em, "f1_m": fa_m, "f1_a": fa_a}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _UpperCAmelCase ( datasets.Metric ):
def lowerCamelCase__ ( self ):
'''simple docstring'''
if self.config_name not in [
"boolq",
"cb",
"copa",
"multirc",
"record",
"rte",
"wic",
"wsc",
"wsc.fixed",
"axb",
"axg",
]:
raise KeyError(
"""You should supply a configuration name selected in """
"""[\"boolq\", \"cb\", \"copa\", \"multirc\", \"record\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"axb\", \"axg\",]""" )
return datasets.MetricInfo(
description=_DESCRIPTION,citation=_CITATION,inputs_description=_KWARGS_DESCRIPTION,features=datasets.Features(self._get_feature_types() ),codebase_urls=[],reference_urls=[],format="""numpy""" if not self.config_name == """record""" and not self.config_name == """multirc""" else None,)
def lowerCamelCase__ ( self ):
'''simple docstring'''
if self.config_name == "record":
return {
"predictions": {
"idx": {
"passage": datasets.Value("""int64""" ),
"query": datasets.Value("""int64""" ),
},
"prediction_text": datasets.Value("""string""" ),
},
"references": {
"idx": {
"passage": datasets.Value("""int64""" ),
"query": datasets.Value("""int64""" ),
},
"answers": datasets.Sequence(datasets.Value("""string""" ) ),
},
}
elif self.config_name == "multirc":
return {
"predictions": {
"idx": {
"answer": datasets.Value("""int64""" ),
"paragraph": datasets.Value("""int64""" ),
"question": datasets.Value("""int64""" ),
},
"prediction": datasets.Value("""int64""" ),
},
"references": datasets.Value("""int64""" ),
}
else:
return {
"predictions": datasets.Value("""int64""" ),
"references": datasets.Value("""int64""" ),
}
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if self.config_name == "axb":
return {"matthews_correlation": matthews_corrcoef(lowerCAmelCase__,lowerCAmelCase__ )}
elif self.config_name == "cb":
return acc_and_fa(lowerCAmelCase__,lowerCAmelCase__,fa_avg="""macro""" )
elif self.config_name == "record":
__lowerCAmelCase = [
{
"""qas""": [
{"""id""": ref["""idx"""]["""query"""], """answers""": [{"""text""": ans} for ans in ref["""answers"""]]}
for ref in references
]
}
]
__lowerCAmelCase = {pred["""idx"""]["""query"""]: pred["""prediction_text"""] for pred in predictions}
return evaluate_record(lowerCAmelCase__,lowerCAmelCase__ )[0]
elif self.config_name == "multirc":
return evaluate_multirc(lowerCAmelCase__,lowerCAmelCase__ )
elif self.config_name in ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]:
return {"accuracy": simple_accuracy(lowerCAmelCase__,lowerCAmelCase__ )}
else:
raise KeyError(
"""You should supply a configuration name selected in """
"""[\"boolq\", \"cb\", \"copa\", \"multirc\", \"record\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"axb\", \"axg\",]""" )
| 689
|
"""simple docstring"""
from __future__ import annotations
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ ):
__SCREAMING_SNAKE_CASE = []
create_all_state(1 , UpperCamelCase_ , UpperCamelCase_ , [] , UpperCamelCase_ )
return result
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , ):
if level == 0:
total_list.append(current_list[:] )
return
for i in range(UpperCamelCase_ , total_number - level + 2 ):
current_list.append(UpperCamelCase_ )
create_all_state(i + 1 , UpperCamelCase_ , level - 1 , UpperCamelCase_ , UpperCamelCase_ )
current_list.pop()
def _lowerCAmelCase ( UpperCamelCase_ ):
for i in total_list:
print(*UpperCamelCase_ )
if __name__ == "__main__":
__magic_name__ = 4
__magic_name__ = 2
__magic_name__ = generate_all_combinations(n, k)
print_all_state(total_list)
| 155
| 0
|
import numpy as np
lowerCamelCase__ : Optional[Any] = [
["""a""", """b""", """c""", """d""", """e"""],
["""f""", """g""", """h""", """i""", """k"""],
["""l""", """m""", """n""", """o""", """p"""],
["""q""", """r""", """s""", """t""", """u"""],
["""v""", """w""", """x""", """y""", """z"""],
]
class _snake_case :
def __init__( self):
'''simple docstring'''
lowercase__ : Any = np.array(SCREAMING_SNAKE_CASE_)
def lowercase__ ( self , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : Any = np.where(letter == self.SQUARE)
lowercase__ : Optional[Any] = np.concatenate([indexa + 1, indexa + 1])
return indexes
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : Union[str, Any] = self.SQUARE[indexa - 1, indexa - 1]
return letter
def lowercase__ ( self , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : str = message.lower()
lowercase__ : Optional[int] = message.replace(""" """ , """""")
lowercase__ : str = message.replace("""j""" , """i""")
lowercase__ : str = np.empty((2, len(SCREAMING_SNAKE_CASE_)))
for letter_index in range(len(SCREAMING_SNAKE_CASE_)):
lowercase__ : List[str] = self.letter_to_numbers(message[letter_index])
lowercase__ : List[Any] = numbers[0]
lowercase__ : str = numbers[1]
lowercase__ : Dict = first_step.reshape(2 * len(SCREAMING_SNAKE_CASE_))
lowercase__ : List[Any] = """"""
for numbers_index in range(len(SCREAMING_SNAKE_CASE_)):
lowercase__ : List[Any] = int(second_step[numbers_index * 2])
lowercase__ : Dict = int(second_step[(numbers_index * 2) + 1])
lowercase__ : int = self.numbers_to_letter(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
lowercase__ : Union[str, Any] = encoded_message + letter
return encoded_message
def lowercase__ ( self , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : Optional[int] = message.lower()
message.replace(""" """ , """""")
lowercase__ : Dict = np.empty(2 * len(SCREAMING_SNAKE_CASE_))
for letter_index in range(len(SCREAMING_SNAKE_CASE_)):
lowercase__ : Optional[int] = self.letter_to_numbers(message[letter_index])
lowercase__ : int = numbers[0]
lowercase__ : List[str] = numbers[1]
lowercase__ : Optional[Any] = first_step.reshape((2, len(SCREAMING_SNAKE_CASE_)))
lowercase__ : List[Any] = """"""
for numbers_index in range(len(SCREAMING_SNAKE_CASE_)):
lowercase__ : Union[str, Any] = int(second_step[0, numbers_index])
lowercase__ : Optional[int] = int(second_step[1, numbers_index])
lowercase__ : str = self.numbers_to_letter(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
lowercase__ : Dict = decoded_message + letter
return decoded_message
| 717
|
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
from .record_evaluation import evaluate as evaluate_record
lowerCamelCase__ : Dict = """\
@article{wang2019superglue,
title={SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems},
author={Wang, Alex and Pruksachatkun, Yada and Nangia, Nikita and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R},
journal={arXiv preprint arXiv:1905.00537},
year={2019}
}
"""
lowerCamelCase__ : Tuple = """\
SuperGLUE (https://super.gluebenchmark.com/) is a new benchmark styled after
GLUE with a new set of more difficult language understanding tasks, improved
resources, and a new public leaderboard.
"""
lowerCamelCase__ : Any = """
Compute SuperGLUE evaluation metric associated to each SuperGLUE dataset.
Args:
predictions: list of predictions to score. Depending on the SuperGlUE subset:
- for 'record': list of question-answer dictionaries with the following keys:
- 'idx': index of the question as specified by the dataset
- 'prediction_text': the predicted answer text
- for 'multirc': list of question-answer dictionaries with the following keys:
- 'idx': index of the question-answer pair as specified by the dataset
- 'prediction': the predicted answer label
- otherwise: list of predicted labels
references: list of reference labels. Depending on the SuperGLUE subset:
- for 'record': list of question-answers dictionaries with the following keys:
- 'idx': index of the question as specified by the dataset
- 'answers': list of possible answers
- otherwise: list of reference labels
Returns: depending on the SuperGLUE subset:
- for 'record':
- 'exact_match': Exact match between answer and gold answer
- 'f1': F1 score
- for 'multirc':
- 'exact_match': Exact match between answer and gold answer
- 'f1_m': Per-question macro-F1 score
- 'f1_a': Average F1 score over all answers
- for 'axb':
'matthews_correlation': Matthew Correlation
- for 'cb':
- 'accuracy': Accuracy
- 'f1': F1 score
- for all others:
- 'accuracy': Accuracy
Examples:
>>> super_glue_metric = datasets.load_metric('super_glue', 'copa') # any of [\"copa\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"boolq\", \"axg\"]
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'accuracy': 1.0}
>>> super_glue_metric = datasets.load_metric('super_glue', 'cb')
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'accuracy': 1.0, 'f1': 1.0}
>>> super_glue_metric = datasets.load_metric('super_glue', 'record')
>>> predictions = [{'idx': {'passage': 0, 'query': 0}, 'prediction_text': 'answer'}]
>>> references = [{'idx': {'passage': 0, 'query': 0}, 'answers': ['answer', 'another_answer']}]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'exact_match': 1.0, 'f1': 1.0}
>>> super_glue_metric = datasets.load_metric('super_glue', 'multirc')
>>> predictions = [{'idx': {'answer': 0, 'paragraph': 0, 'question': 0}, 'prediction': 0}, {'idx': {'answer': 1, 'paragraph': 2, 'question': 3}, 'prediction': 1}]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'exact_match': 1.0, 'f1_m': 1.0, 'f1_a': 1.0}
>>> super_glue_metric = datasets.load_metric('super_glue', 'axb')
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'matthews_correlation': 1.0}
"""
def UpperCamelCase ( lowercase_ , lowercase_ ) -> Optional[Any]:
'''simple docstring'''
return float((preds == labels).mean() )
def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_="binary" ) -> int:
'''simple docstring'''
lowercase__ : List[str] = simple_accuracy(lowercase_ , lowercase_ )
lowercase__ : Optional[Any] = float(fa_score(y_true=lowercase_ , y_pred=lowercase_ , average=lowercase_ ) )
return {
"accuracy": acc,
"f1": fa,
}
def UpperCamelCase ( lowercase_ , lowercase_ ) -> Dict:
'''simple docstring'''
lowercase__ : List[Any] = {}
for id_pred, label in zip(lowercase_ , lowercase_ ):
lowercase__ : Optional[Any] = F'{id_pred["idx"]["paragraph"]}-{id_pred["idx"]["question"]}'
lowercase__ : Optional[Any] = id_pred["""prediction"""]
if question_id in question_map:
question_map[question_id].append((pred, label) )
else:
lowercase__ : Optional[int] = [(pred, label)]
lowercase__ , lowercase__ : List[str] = [], []
for question, preds_labels in question_map.items():
lowercase__ , lowercase__ : List[str] = zip(*lowercase_ )
lowercase__ : Optional[Any] = fa_score(y_true=lowercase_ , y_pred=lowercase_ , average="""macro""" )
fas.append(lowercase_ )
lowercase__ : List[Any] = int(sum(pred == label for pred, label in preds_labels ) == len(lowercase_ ) )
ems.append(lowercase_ )
lowercase__ : str = float(sum(lowercase_ ) / len(lowercase_ ) )
lowercase__ : str = sum(lowercase_ ) / len(lowercase_ )
lowercase__ : List[str] = float(fa_score(y_true=lowercase_ , y_pred=[id_pred["""prediction"""] for id_pred in ids_preds] ) )
return {"exact_match": em, "f1_m": fa_m, "f1_a": fa_a}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _snake_case ( datasets.Metric ):
def lowercase__ ( self):
'''simple docstring'''
if self.config_name not in [
"boolq",
"cb",
"copa",
"multirc",
"record",
"rte",
"wic",
"wsc",
"wsc.fixed",
"axb",
"axg",
]:
raise KeyError(
"""You should supply a configuration name selected in """
"""[\"boolq\", \"cb\", \"copa\", \"multirc\", \"record\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"axb\", \"axg\",]""")
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types()) , codebase_urls=[] , reference_urls=[] , format="""numpy""" if not self.config_name == """record""" and not self.config_name == """multirc""" else None , )
def lowercase__ ( self):
'''simple docstring'''
if self.config_name == "record":
return {
"predictions": {
"idx": {
"passage": datasets.Value("""int64"""),
"query": datasets.Value("""int64"""),
},
"prediction_text": datasets.Value("""string"""),
},
"references": {
"idx": {
"passage": datasets.Value("""int64"""),
"query": datasets.Value("""int64"""),
},
"answers": datasets.Sequence(datasets.Value("""string""")),
},
}
elif self.config_name == "multirc":
return {
"predictions": {
"idx": {
"answer": datasets.Value("""int64"""),
"paragraph": datasets.Value("""int64"""),
"question": datasets.Value("""int64"""),
},
"prediction": datasets.Value("""int64"""),
},
"references": datasets.Value("""int64"""),
}
else:
return {
"predictions": datasets.Value("""int64"""),
"references": datasets.Value("""int64"""),
}
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
if self.config_name == "axb":
return {"matthews_correlation": matthews_corrcoef(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)}
elif self.config_name == "cb":
return acc_and_fa(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , fa_avg="""macro""")
elif self.config_name == "record":
lowercase__ : Optional[Any] = [
{
"""qas""": [
{"""id""": ref["""idx"""]["""query"""], """answers""": [{"""text""": ans} for ans in ref["""answers"""]]}
for ref in references
]
}
]
lowercase__ : str = {pred["""idx"""]["""query"""]: pred["""prediction_text"""] for pred in predictions}
return evaluate_record(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)[0]
elif self.config_name == "multirc":
return evaluate_multirc(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
elif self.config_name in ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]:
return {"accuracy": simple_accuracy(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)}
else:
raise KeyError(
"""You should supply a configuration name selected in """
"""[\"boolq\", \"cb\", \"copa\", \"multirc\", \"record\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"axb\", \"axg\",]""")
| 495
| 0
|
'''simple docstring'''
from statistics import mean
import numpy as np
def A__ ( __lowerCAmelCase : list , __lowerCAmelCase : list , __lowerCAmelCase : list , __lowerCAmelCase : int ):
lowerCamelCase__ = 0
# Number of processes finished
lowerCamelCase__ = 0
# Displays the finished process.
# If it is 0, the performance is completed if it is 1, before the performance.
lowerCamelCase__ = [0] * no_of_process
# List to include calculation results
lowerCamelCase__ = [0] * no_of_process
# Sort by arrival time.
lowerCamelCase__ = [burst_time[i] for i in np.argsort(__lowerCAmelCase )]
lowerCamelCase__ = [process_name[i] for i in np.argsort(__lowerCAmelCase )]
arrival_time.sort()
while no_of_process > finished_process_count:
lowerCamelCase__ = 0
while finished_process[i] == 1:
i += 1
if current_time < arrival_time[i]:
lowerCamelCase__ = arrival_time[i]
lowerCamelCase__ = 0
# Index showing the location of the process being performed
lowerCamelCase__ = 0
# Saves the current response ratio.
lowerCamelCase__ = 0
for i in range(0 , __lowerCAmelCase ):
if finished_process[i] == 0 and arrival_time[i] <= current_time:
lowerCamelCase__ = (burst_time[i] + (current_time - arrival_time[i])) / burst_time[
i
]
if response_ratio < temp:
lowerCamelCase__ = temp
lowerCamelCase__ = i
# Calculate the turn around time
lowerCamelCase__ = current_time + burst_time[loc] - arrival_time[loc]
current_time += burst_time[loc]
# Indicates that the process has been performed.
lowerCamelCase__ = 1
# Increase finished_process_count by 1
finished_process_count += 1
return turn_around_time
def A__ ( __lowerCAmelCase : list , __lowerCAmelCase : list , __lowerCAmelCase : list , __lowerCAmelCase : int ):
lowerCamelCase__ = [0] * no_of_process
for i in range(0 , __lowerCAmelCase ):
lowerCamelCase__ = turn_around_time[i] - burst_time[i]
return waiting_time
if __name__ == "__main__":
UpperCamelCase : Optional[int] = 5
UpperCamelCase : str = ['A', 'B', 'C', 'D', 'E']
UpperCamelCase : Optional[int] = [1, 2, 3, 4, 5]
UpperCamelCase : Tuple = [1, 2, 3, 4, 5]
UpperCamelCase : str = calculate_turn_around_time(
process_name, arrival_time, burst_time, no_of_process
)
UpperCamelCase : Dict = calculate_waiting_time(
process_name, turn_around_time, burst_time, no_of_process
)
print('Process name \tArrival time \tBurst time \tTurn around time \tWaiting time')
for i in range(0, no_of_process):
print(
F'{process_name[i]}\t\t{arrival_time[i]}\t\t{burst_time[i]}\t\t'
F'{turn_around_time[i]}\t\t\t{waiting_time[i]}'
)
print(F'average waiting time : {mean(waiting_time):.5f}')
print(F'average turn around time : {mean(turn_around_time):.5f}')
| 50
|
'''simple docstring'''
import argparse
import os
import re
import packaging.version
UpperCamelCase : List[Any] = 'examples/'
UpperCamelCase : int = {
'examples': (re.compile(r'^check_min_version\("[^"]+"\)\s*$', re.MULTILINE), 'check_min_version("VERSION")\n'),
'init': (re.compile(r'^__version__\s+=\s+"([^"]+)"\s*$', re.MULTILINE), '__version__ = "VERSION"\n'),
'setup': (re.compile(r'^(\s*)version\s*=\s*"[^"]+",', re.MULTILINE), r'\1version="VERSION",'),
'doc': (re.compile(r'^(\s*)release\s*=\s*"[^"]+"$', re.MULTILINE), 'release = "VERSION"\n'),
}
UpperCamelCase : Any = {
'init': 'src/transformers/__init__.py',
'setup': 'setup.py',
}
UpperCamelCase : Any = 'README.md'
def A__ ( __lowerCAmelCase : Any , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Optional[int] ):
with open(__lowerCAmelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
lowerCamelCase__ = f.read()
lowerCamelCase__ , lowerCamelCase__ = REPLACE_PATTERNS[pattern]
lowerCamelCase__ = replace.replace("""VERSION""" , __lowerCAmelCase )
lowerCamelCase__ = re_pattern.sub(__lowerCAmelCase , __lowerCAmelCase )
with open(__lowerCAmelCase , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.write(__lowerCAmelCase )
def A__ ( __lowerCAmelCase : str ):
for folder, directories, fnames in os.walk(__lowerCAmelCase ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove("""research_projects""" )
if "legacy" in directories:
directories.remove("""legacy""" )
for fname in fnames:
if fname.endswith(""".py""" ):
update_version_in_file(os.path.join(__lowerCAmelCase , __lowerCAmelCase ) , __lowerCAmelCase , pattern="""examples""" )
def A__ ( __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Optional[Any]=False ):
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
if not patch:
update_version_in_examples(__lowerCAmelCase )
def A__ ( ):
lowerCamelCase__ = """🤗 Transformers currently provides the following architectures"""
lowerCamelCase__ = """1. Want to contribute a new model?"""
with open(__lowerCAmelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
lowerCamelCase__ = f.readlines()
# Find the start of the list.
lowerCamelCase__ = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
lowerCamelCase__ = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith("""1.""" ):
lowerCamelCase__ = lines[index].replace(
"""https://huggingface.co/docs/transformers/main/model_doc""" , """https://huggingface.co/docs/transformers/model_doc""" , )
index += 1
with open(__lowerCAmelCase , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.writelines(__lowerCAmelCase )
def A__ ( ):
with open(REPLACE_FILES["""init"""] , """r""" ) as f:
lowerCamelCase__ = f.read()
lowerCamelCase__ = REPLACE_PATTERNS["""init"""][0].search(__lowerCAmelCase ).groups()[0]
return packaging.version.parse(__lowerCAmelCase )
def A__ ( __lowerCAmelCase : Union[str, Any]=False ):
lowerCamelCase__ = get_version()
if patch and default_version.is_devrelease:
raise ValueError("""Can't create a patch version from the dev branch, checkout a released version!""" )
if default_version.is_devrelease:
lowerCamelCase__ = default_version.base_version
elif patch:
lowerCamelCase__ = F'''{default_version.major}.{default_version.minor}.{default_version.micro + 1}'''
else:
lowerCamelCase__ = F'''{default_version.major}.{default_version.minor + 1}.0'''
# Now let's ask nicely if that's the right one.
lowerCamelCase__ = input(F'''Which version are you releasing? [{default_version}]''' )
if len(__lowerCAmelCase ) == 0:
lowerCamelCase__ = default_version
print(F'''Updating version to {version}.''' )
global_version_update(__lowerCAmelCase , patch=__lowerCAmelCase )
if not patch:
print("""Cleaning main README, don't forget to run `make fix-copies`.""" )
clean_main_ref_in_model_list()
def A__ ( ):
lowerCamelCase__ = get_version()
lowerCamelCase__ = F'''{current_version.major}.{current_version.minor + 1}.0.dev0'''
lowerCamelCase__ = current_version.base_version
# Check with the user we got that right.
lowerCamelCase__ = input(F'''Which version are we developing now? [{dev_version}]''' )
if len(__lowerCAmelCase ) == 0:
lowerCamelCase__ = dev_version
print(F'''Updating version to {version}.''' )
global_version_update(__lowerCAmelCase )
print("""Cleaning main README, don't forget to run `make fix-copies`.""" )
clean_main_ref_in_model_list()
if __name__ == "__main__":
UpperCamelCase : Optional[int] = argparse.ArgumentParser()
parser.add_argument('--post_release', action='store_true', help='Whether this is pre or post release.')
parser.add_argument('--patch', action='store_true', help='Whether or not this is a patch release.')
UpperCamelCase : Any = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print('Nothing to do after a patch :-)')
else:
post_release_work()
| 50
| 1
|
'''simple docstring'''
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotSmallConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
SCREAMING_SNAKE_CASE = 'platform'
import jax
import jax.numpy as jnp
from transformers.models.blenderbot_small.modeling_flax_blenderbot_small import (
FlaxBlenderbotSmallForConditionalGeneration,
FlaxBlenderbotSmallModel,
shift_tokens_right,
)
def lowercase_ ( __A : str , __A : Union[str, Any] , __A : List[str]=None , __A : Optional[Any]=None , __A : List[str]=None , __A : List[str]=None , __A : Optional[Any]=None , __A : int=None , ) -> Dict:
"""simple docstring"""
if attention_mask is None:
lowercase : Union[str, Any] =np.where(input_ids != config.pad_token_id , 1 , 0 )
if decoder_attention_mask is None:
lowercase : Optional[int] =np.where(decoder_input_ids != config.pad_token_id , 1 , 0 )
if head_mask is None:
lowercase : List[Any] =np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
lowercase : List[Any] =np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
lowercase : Optional[Any] =np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self : Optional[Any] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : List[Any]=13 , UpperCAmelCase : Any=7 , UpperCAmelCase : List[str]=True , UpperCAmelCase : Any=False , UpperCAmelCase : Optional[int]=99 , UpperCAmelCase : List[Any]=16 , UpperCAmelCase : str=2 , UpperCAmelCase : List[Any]=4 , UpperCAmelCase : List[Any]=4 , UpperCAmelCase : Tuple="gelu" , UpperCAmelCase : str=0.1 , UpperCAmelCase : str=0.1 , UpperCAmelCase : Optional[Any]=32 , UpperCAmelCase : int=2 , UpperCAmelCase : Optional[int]=1 , UpperCAmelCase : str=0 , UpperCAmelCase : str=0.0_2 , ) -> Optional[Any]:
'''simple docstring'''
lowercase : Optional[Any] =parent
lowercase : Any =batch_size
lowercase : str =seq_length
lowercase : Optional[Any] =is_training
lowercase : Tuple =use_labels
lowercase : List[str] =vocab_size
lowercase : Any =hidden_size
lowercase : str =num_hidden_layers
lowercase : Any =num_attention_heads
lowercase : Tuple =intermediate_size
lowercase : List[Any] =hidden_act
lowercase : Tuple =hidden_dropout_prob
lowercase : Dict =attention_probs_dropout_prob
lowercase : List[Any] =max_position_embeddings
lowercase : List[str] =eos_token_id
lowercase : Tuple =pad_token_id
lowercase : Tuple =bos_token_id
lowercase : int =initializer_range
def A__ ( self : str ) -> Optional[int]:
'''simple docstring'''
lowercase : str =np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size )
lowercase : Union[str, Any] =np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 )
lowercase : List[Any] =shift_tokens_right(__snake_case , 1 , 2 )
lowercase : Optional[int] =BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=__snake_case , )
lowercase : Dict =prepare_blenderbot_inputs_dict(__snake_case , __snake_case , __snake_case )
return config, inputs_dict
def A__ ( self : Optional[Any] ) -> Dict:
'''simple docstring'''
lowercase : Optional[int] =self.prepare_config_and_inputs()
return config, inputs_dict
def A__ ( self : List[str] , UpperCAmelCase : Tuple , UpperCAmelCase : Optional[int] , UpperCAmelCase : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
lowercase : Tuple =20
lowercase : Union[str, Any] =model_class_name(__snake_case )
lowercase : Optional[Any] =model.encode(inputs_dict['''input_ids'''] )
lowercase : Dict =(
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
lowercase : Optional[Any] =model.init_cache(decoder_input_ids.shape[0] , __snake_case , __snake_case )
lowercase : Union[str, Any] =jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='''i4''' )
lowercase : int =jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
lowercase : Optional[Any] =model.decode(
decoder_input_ids[:, :-1] , __snake_case , decoder_attention_mask=__snake_case , past_key_values=__snake_case , decoder_position_ids=__snake_case , )
lowercase : Optional[int] =jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' )
lowercase : str =model.decode(
decoder_input_ids[:, -1:] , __snake_case , decoder_attention_mask=__snake_case , past_key_values=outputs_cache.past_key_values , decoder_position_ids=__snake_case , )
lowercase : Optional[int] =model.decode(__snake_case , __snake_case )
lowercase : Tuple =np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f'Max diff is {diff}' )
def A__ ( self : Optional[int] , UpperCAmelCase : List[str] , UpperCAmelCase : List[Any] , UpperCAmelCase : Tuple ) -> int:
'''simple docstring'''
lowercase : List[str] =20
lowercase : Union[str, Any] =model_class_name(__snake_case )
lowercase : Any =model.encode(inputs_dict['''input_ids'''] )
lowercase : Optional[int] =(
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
lowercase : str =jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
lowercase : Optional[int] =model.init_cache(decoder_input_ids.shape[0] , __snake_case , __snake_case )
lowercase : Any =jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
lowercase : int =model.decode(
decoder_input_ids[:, :-1] , __snake_case , decoder_attention_mask=__snake_case , past_key_values=__snake_case , decoder_position_ids=__snake_case , )
lowercase : Union[str, Any] =jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' )
lowercase : int =model.decode(
decoder_input_ids[:, -1:] , __snake_case , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=__snake_case , decoder_position_ids=__snake_case , )
lowercase : str =model.decode(__snake_case , __snake_case , decoder_attention_mask=__snake_case )
lowercase : Any =np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f'Max diff is {diff}' )
@require_flax
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ = 99
def A__ ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
lowercase : Dict =np.array(
[
[71, 82, 18, 33, 46, 91, 2],
[68, 34, 26, 58, 30, 82, 2],
[5, 97, 17, 39, 94, 40, 2],
[76, 83, 94, 25, 70, 78, 2],
[87, 59, 41, 35, 48, 66, 2],
[55, 13, 16, 58, 5, 2, 1], # note padding
[64, 27, 31, 51, 12, 75, 2],
[52, 64, 86, 17, 83, 39, 2],
[48, 61, 9, 24, 71, 82, 2],
[26, 1, 60, 48, 22, 13, 2],
[21, 5, 62, 28, 14, 76, 2],
[45, 98, 37, 86, 59, 48, 2],
[70, 70, 50, 9, 28, 0, 2],
] , dtype=np.intaa , )
lowercase : Union[str, Any] =input_ids.shape[0]
lowercase : Any =BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=24 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=32 , decoder_ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
def A__ ( self : List[str] ) -> Any:
'''simple docstring'''
lowercase : List[Any] =self._get_config_and_data()
lowercase : str =FlaxBlenderbotSmallForConditionalGeneration(__snake_case )
lowercase : Any =lm_model(input_ids=__snake_case )
lowercase : Union[str, Any] =(batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs['''logits'''].shape , __snake_case )
def A__ ( self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
lowercase : int =BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=14 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=48 , )
lowercase : Optional[int] =FlaxBlenderbotSmallForConditionalGeneration(__snake_case )
lowercase : List[Any] =np.array([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]] , dtype=np.intaa )
lowercase : List[Any] =np.array([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]] , dtype=np.intaa )
lowercase : List[str] =lm_model(input_ids=__snake_case , decoder_input_ids=__snake_case )
lowercase : Tuple =(*summary.shape, config.vocab_size)
self.assertEqual(outputs['''logits'''].shape , __snake_case )
def A__ ( self : Dict ) -> int:
'''simple docstring'''
lowercase : Optional[Any] =np.array([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]] , dtype=np.intaa )
lowercase : Union[str, Any] =shift_tokens_right(__snake_case , 1 , 2 )
lowercase : Dict =np.equal(__snake_case , 1 ).astype(np.floataa ).sum()
lowercase : str =np.equal(__snake_case , 1 ).astype(np.floataa ).sum()
self.assertEqual(shifted.shape , input_ids.shape )
self.assertEqual(__snake_case , n_pad_before - 1 )
self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() )
@require_flax
class UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , unittest.TestCase , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase_ = True
UpperCamelCase_ = (
(
FlaxBlenderbotSmallModel,
FlaxBlenderbotSmallForConditionalGeneration,
)
if is_flax_available()
else ()
)
UpperCamelCase_ = (FlaxBlenderbotSmallForConditionalGeneration,) if is_flax_available() else ()
def A__ ( self : int ) -> Any:
'''simple docstring'''
lowercase : Optional[int] =FlaxBlenderbotSmallModelTester(self )
def A__ ( self : Union[str, Any] ) -> str:
'''simple docstring'''
lowercase : Any =self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(__snake_case , __snake_case , __snake_case )
def A__ ( self : str ) -> Dict:
'''simple docstring'''
lowercase : Any =self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(__snake_case , __snake_case , __snake_case )
def A__ ( self : Union[str, Any] ) -> str:
'''simple docstring'''
lowercase : List[Any] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
lowercase : Tuple =self._prepare_for_class(__snake_case , __snake_case )
lowercase : Optional[int] =model_class(__snake_case )
@jax.jit
def encode_jitted(UpperCAmelCase : Tuple , UpperCAmelCase : Optional[int]=None , **UpperCAmelCase : Optional[Any] ):
return model.encode(input_ids=__snake_case , attention_mask=__snake_case )
with self.subTest('''JIT Enabled''' ):
lowercase : Optional[int] =encode_jitted(**__snake_case ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
lowercase : Any =encode_jitted(**__snake_case ).to_tuple()
self.assertEqual(len(__snake_case ) , len(__snake_case ) )
for jitted_output, output in zip(__snake_case , __snake_case ):
self.assertEqual(jitted_output.shape , output.shape )
def A__ ( self : int ) -> List[Any]:
'''simple docstring'''
lowercase : Optional[Any] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
lowercase : Tuple =model_class(__snake_case )
lowercase : Dict =model.encode(inputs_dict['''input_ids'''] , inputs_dict['''attention_mask'''] )
lowercase : Tuple ={
'''decoder_input_ids''': inputs_dict['''decoder_input_ids'''],
'''decoder_attention_mask''': inputs_dict['''decoder_attention_mask'''],
'''encoder_outputs''': encoder_outputs,
}
@jax.jit
def decode_jitted(UpperCAmelCase : Any , UpperCAmelCase : Any , UpperCAmelCase : Union[str, Any] ):
return model.decode(
decoder_input_ids=__snake_case , decoder_attention_mask=__snake_case , encoder_outputs=__snake_case , )
with self.subTest('''JIT Enabled''' ):
lowercase : Tuple =decode_jitted(**__snake_case ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
lowercase : Optional[Any] =decode_jitted(**__snake_case ).to_tuple()
self.assertEqual(len(__snake_case ) , len(__snake_case ) )
for jitted_output, output in zip(__snake_case , __snake_case ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def A__ ( self : List[str] ) -> Optional[Any]:
'''simple docstring'''
for model_class_name in self.all_model_classes:
lowercase : Optional[Any] =model_class_name.from_pretrained('''facebook/blenderbot_small-90M''' )
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
lowercase : Optional[int] =np.ones((1, 1) ) * model.config.eos_token_id
lowercase : Union[str, Any] =model(__snake_case )
self.assertIsNotNone(__snake_case )
| 704
|
'''simple docstring'''
SCREAMING_SNAKE_CASE = 'Alexander Joslin'
import operator as op
from .stack import Stack
def lowercase_ ( __A : str ) -> int:
"""simple docstring"""
lowercase : int ={'''*''': op.mul, '''/''': op.truediv, '''+''': op.add, '''-''': op.sub}
lowercase : Stack[int] =Stack()
lowercase : Stack[str] =Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(__A ) )
elif i in operators:
# RULE 2
operator_stack.push(__A )
elif i == ")":
# RULE 4
lowercase : Optional[Any] =operator_stack.peek()
operator_stack.pop()
lowercase : Optional[Any] =operand_stack.peek()
operand_stack.pop()
lowercase : Optional[Any] =operand_stack.peek()
operand_stack.pop()
lowercase : List[str] =operators[opr](__A , __A )
operand_stack.push(__A )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
SCREAMING_SNAKE_CASE = '(5 + ((4 * 2) * (2 + 3)))'
# answer = 45
print(f"""{equation} = {dijkstras_two_stack_algorithm(equation)}""")
| 8
| 0
|
import json
import sys
def _lowerCamelCase ( __lowerCamelCase , __lowerCamelCase ) -> Optional[Any]:
'''simple docstring'''
with open(__lowerCamelCase , encoding="""utf-8""" ) as f:
UpperCAmelCase__ : Any = json.load(__lowerCamelCase )
UpperCAmelCase__ : Dict = ["""<details>""", """<summary>Show updated benchmarks!</summary>""", """ """]
for benchmark_name in sorted(__lowerCamelCase ):
UpperCAmelCase__ : Optional[int] = results[benchmark_name]
UpperCAmelCase__ : List[Any] = benchmark_name.split("""/""" )[-1]
output_md.append(F"### Benchmark: {benchmark_file_name}" )
UpperCAmelCase__ : Union[str, Any] = """| metric |"""
UpperCAmelCase__ : List[Any] = """|--------|"""
UpperCAmelCase__ : str = """| new / old (diff) |"""
for metric_name in sorted(__lowerCamelCase ):
UpperCAmelCase__ : Optional[int] = benchmark_res[metric_name]
UpperCAmelCase__ : Optional[int] = metric_vals["""new"""]
UpperCAmelCase__ : Union[str, Any] = metric_vals.get("""old""" , __lowerCamelCase )
UpperCAmelCase__ : Optional[int] = metric_vals.get("""diff""" , __lowerCamelCase )
UpperCAmelCase__ : List[Any] = F" {new_val:f}" if isinstance(__lowerCamelCase , (int, float) ) else """None"""
if old_val is not None:
val_str += F" / {old_val:f}" if isinstance(__lowerCamelCase , (int, float) ) else "None"
if dif_val is not None:
val_str += F" ({dif_val:f})" if isinstance(__lowerCamelCase , (int, float) ) else "None"
title += " " + metric_name + " |"
lines += "---|"
value += val_str + " |"
output_md += [title, lines, value, " "]
output_md.append("""</details>""" )
with open(__lowerCamelCase , """w""" , encoding="""utf-8""" ) as f:
f.writelines("""\n""".join(__lowerCamelCase ) )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : str = sys.argv[1]
SCREAMING_SNAKE_CASE__ : str = sys.argv[2]
format_json_to_md(input_json_file, output_md_file)
| 79
|
import json
import os
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ImageGPTImageProcessor
class UpperCAmelCase_ ( unittest.TestCase ):
def __init__( self , _lowerCAmelCase , _lowerCAmelCase=7 , _lowerCAmelCase=3 , _lowerCAmelCase=18 , _lowerCAmelCase=30 , _lowerCAmelCase=400 , _lowerCAmelCase=True , _lowerCAmelCase=None , _lowerCAmelCase=True , ):
UpperCAmelCase__ : List[str] = size if size is not None else {"""height""": 18, """width""": 18}
UpperCAmelCase__ : Union[str, Any] = parent
UpperCAmelCase__ : int = batch_size
UpperCAmelCase__ : Tuple = num_channels
UpperCAmelCase__ : Dict = image_size
UpperCAmelCase__ : List[Any] = min_resolution
UpperCAmelCase__ : str = max_resolution
UpperCAmelCase__ : Union[str, Any] = do_resize
UpperCAmelCase__ : Tuple = size
UpperCAmelCase__ : int = do_normalize
def __UpperCAmelCase ( self ):
return {
# here we create 2 clusters for the sake of simplicity
"clusters": np.asarray(
[
[0.8_8_6_6_4_4_3_6_3_4_0_3_3_2_0_3, 0.6_6_1_8_8_2_9_3_6_9_5_4_4_9_8_3, 0.3_8_9_1_7_4_6_4_0_1_7_8_6_8_0_4],
[-0.6_0_4_2_5_5_9_1_4_6_8_8_1_1_0_4, -0.0_2_2_9_5_0_0_8_8_6_0_5_2_8_4_6_9, 0.5_4_2_3_7_9_7_3_6_9_0_0_3_2_9_6],
] ),
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
}
@require_torch
@require_vision
class UpperCAmelCase_ ( __lowerCamelCase , unittest.TestCase ):
__lowerCamelCase = ImageGPTImageProcessor if is_vision_available() else None
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : Any = ImageGPTImageProcessingTester(self )
@property
def __UpperCAmelCase ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : str = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_lowerCAmelCase , """clusters""" ) )
self.assertTrue(hasattr(_lowerCAmelCase , """do_resize""" ) )
self.assertTrue(hasattr(_lowerCAmelCase , """size""" ) )
self.assertTrue(hasattr(_lowerCAmelCase , """do_normalize""" ) )
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 18, """width""": 18} )
UpperCAmelCase__ : Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42} )
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
UpperCAmelCase__ : Optional[int] = json.loads(image_processor.to_json_string() )
for key, value in self.image_processor_dict.items():
if key == "clusters":
self.assertTrue(np.array_equal(_lowerCAmelCase , obj[key] ) )
else:
self.assertEqual(obj[key] , _lowerCAmelCase )
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCAmelCase__ : Union[str, Any] = os.path.join(_lowerCAmelCase , """image_processor.json""" )
image_processor_first.to_json_file(_lowerCAmelCase )
UpperCAmelCase__ : Optional[Any] = self.image_processing_class.from_json_file(_lowerCAmelCase ).to_dict()
UpperCAmelCase__ : Dict = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(_lowerCAmelCase , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , _lowerCAmelCase )
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : str = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
image_processor_first.save_pretrained(_lowerCAmelCase )
UpperCAmelCase__ : List[Any] = self.image_processing_class.from_pretrained(_lowerCAmelCase ).to_dict()
UpperCAmelCase__ : Tuple = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(_lowerCAmelCase , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , _lowerCAmelCase )
@unittest.skip("""ImageGPT requires clusters at initialization""" )
def __UpperCAmelCase ( self ):
pass
def _lowerCamelCase ( ) -> Tuple:
'''simple docstring'''
UpperCAmelCase__ : Any = load_dataset("""hf-internal-testing/fixtures_image_utils""" , split="""test""" )
UpperCAmelCase__ : Dict = Image.open(dataset[4]["""file"""] )
UpperCAmelCase__ : Optional[Any] = Image.open(dataset[5]["""file"""] )
UpperCAmelCase__ : List[Any] = [imagea, imagea]
return images
@require_vision
@require_torch
class UpperCAmelCase_ ( unittest.TestCase ):
@slow
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : Tuple = ImageGPTImageProcessor.from_pretrained("""openai/imagegpt-small""" )
UpperCAmelCase__ : int = prepare_images()
# test non-batched
UpperCAmelCase__ : List[str] = image_processing(images[0] , return_tensors="""pt""" )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (1, 1024) )
UpperCAmelCase__ : List[Any] = [306, 191, 191]
self.assertEqual(encoding.input_ids[0, :3].tolist() , _lowerCAmelCase )
# test batched
UpperCAmelCase__ : List[str] = image_processing(_lowerCAmelCase , return_tensors="""pt""" )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (2, 1024) )
UpperCAmelCase__ : Any = [303, 13, 13]
self.assertEqual(encoding.input_ids[1, -3:].tolist() , _lowerCAmelCase )
| 79
| 1
|
'''simple docstring'''
import tempfile
import unittest
import numpy as np
from diffusers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionPipeline,
PNDMScheduler,
)
from diffusers.utils.testing_utils import is_onnx_available, nightly, require_onnxruntime, require_torch_gpu
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class __a ( UpperCamelCase__, unittest.TestCase ):
__UpperCamelCase : Optional[Any] = 'hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline'
def UpperCAmelCase__ ( self : Any ,lowerCamelCase : str=0 ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = np.random.RandomState(_a )
__SCREAMING_SNAKE_CASE = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint ,provider="""CPUExecutionProvider""" )
pipe.set_progress_bar_config(disable=_a )
__SCREAMING_SNAKE_CASE = self.get_dummy_inputs()
__SCREAMING_SNAKE_CASE = pipe(**_a ).images
__SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__SCREAMING_SNAKE_CASE = np.array([0.65_072, 0.58_492, 0.48_219, 0.55_521, 0.53_180, 0.55_939, 0.50_697, 0.39_800, 0.46_455] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint ,provider="""CPUExecutionProvider""" )
__SCREAMING_SNAKE_CASE = PNDMScheduler.from_config(pipe.scheduler.config ,skip_prk_steps=_a )
pipe.set_progress_bar_config(disable=_a )
__SCREAMING_SNAKE_CASE = self.get_dummy_inputs()
__SCREAMING_SNAKE_CASE = pipe(**_a ).images
__SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__SCREAMING_SNAKE_CASE = np.array([0.65_863, 0.59_425, 0.49_326, 0.56_313, 0.53_875, 0.56_627, 0.51_065, 0.39_777, 0.46_330] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint ,provider="""CPUExecutionProvider""" )
__SCREAMING_SNAKE_CASE = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_a )
__SCREAMING_SNAKE_CASE = self.get_dummy_inputs()
__SCREAMING_SNAKE_CASE = pipe(**_a ).images
__SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__SCREAMING_SNAKE_CASE = np.array([0.53_755, 0.60_786, 0.47_402, 0.49_488, 0.51_869, 0.49_819, 0.47_985, 0.38_957, 0.44_279] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint ,provider="""CPUExecutionProvider""" )
__SCREAMING_SNAKE_CASE = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_a )
__SCREAMING_SNAKE_CASE = self.get_dummy_inputs()
__SCREAMING_SNAKE_CASE = pipe(**_a ).images
__SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__SCREAMING_SNAKE_CASE = np.array([0.53_755, 0.60_786, 0.47_402, 0.49_488, 0.51_869, 0.49_819, 0.47_985, 0.38_957, 0.44_279] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint ,provider="""CPUExecutionProvider""" )
__SCREAMING_SNAKE_CASE = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_a )
__SCREAMING_SNAKE_CASE = self.get_dummy_inputs()
__SCREAMING_SNAKE_CASE = pipe(**_a ).images
__SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__SCREAMING_SNAKE_CASE = np.array([0.53_817, 0.60_812, 0.47_384, 0.49_530, 0.51_894, 0.49_814, 0.47_984, 0.38_958, 0.44_271] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint ,provider="""CPUExecutionProvider""" )
__SCREAMING_SNAKE_CASE = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_a )
__SCREAMING_SNAKE_CASE = self.get_dummy_inputs()
__SCREAMING_SNAKE_CASE = pipe(**_a ).images
__SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__SCREAMING_SNAKE_CASE = np.array([0.53_895, 0.60_808, 0.47_933, 0.49_608, 0.51_886, 0.49_950, 0.48_053, 0.38_957, 0.44_200] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint ,provider="""CPUExecutionProvider""" )
pipe.set_progress_bar_config(disable=_a )
__SCREAMING_SNAKE_CASE = self.get_dummy_inputs()
__SCREAMING_SNAKE_CASE = 3 * [inputs["""prompt"""]]
# forward
__SCREAMING_SNAKE_CASE = pipe(**_a )
__SCREAMING_SNAKE_CASE = output.images[0, -3:, -3:, -1]
__SCREAMING_SNAKE_CASE = self.get_dummy_inputs()
__SCREAMING_SNAKE_CASE = 3 * [inputs.pop("""prompt""" )]
__SCREAMING_SNAKE_CASE = pipe.tokenizer(
_a ,padding="""max_length""" ,max_length=pipe.tokenizer.model_max_length ,truncation=_a ,return_tensors="""np""" ,)
__SCREAMING_SNAKE_CASE = text_inputs["""input_ids"""]
__SCREAMING_SNAKE_CASE = pipe.text_encoder(input_ids=text_inputs.astype(np.intaa ) )[0]
__SCREAMING_SNAKE_CASE = prompt_embeds
# forward
__SCREAMING_SNAKE_CASE = pipe(**_a )
__SCREAMING_SNAKE_CASE = output.images[0, -3:, -3:, -1]
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1E-4
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint ,provider="""CPUExecutionProvider""" )
pipe.set_progress_bar_config(disable=_a )
__SCREAMING_SNAKE_CASE = self.get_dummy_inputs()
__SCREAMING_SNAKE_CASE = 3 * ["""this is a negative prompt"""]
__SCREAMING_SNAKE_CASE = negative_prompt
__SCREAMING_SNAKE_CASE = 3 * [inputs["""prompt"""]]
# forward
__SCREAMING_SNAKE_CASE = pipe(**_a )
__SCREAMING_SNAKE_CASE = output.images[0, -3:, -3:, -1]
__SCREAMING_SNAKE_CASE = self.get_dummy_inputs()
__SCREAMING_SNAKE_CASE = 3 * [inputs.pop("""prompt""" )]
__SCREAMING_SNAKE_CASE = []
for p in [prompt, negative_prompt]:
__SCREAMING_SNAKE_CASE = pipe.tokenizer(
_a ,padding="""max_length""" ,max_length=pipe.tokenizer.model_max_length ,truncation=_a ,return_tensors="""np""" ,)
__SCREAMING_SNAKE_CASE = text_inputs["""input_ids"""]
embeds.append(pipe.text_encoder(input_ids=text_inputs.astype(np.intaa ) )[0] )
__SCREAMING_SNAKE_CASE = embeds
# forward
__SCREAMING_SNAKE_CASE = pipe(**_a )
__SCREAMING_SNAKE_CASE = output.images[0, -3:, -3:, -1]
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1E-4
@nightly
@require_onnxruntime
@require_torch_gpu
class __a ( unittest.TestCase ):
@property
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = ort.SessionOptions()
__SCREAMING_SNAKE_CASE = False
return options
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = OnnxStableDiffusionPipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" ,revision="""onnx""" ,safety_checker=_a ,feature_extractor=_a ,provider=self.gpu_provider ,sess_options=self.gpu_options ,)
sd_pipe.set_progress_bar_config(disable=_a )
__SCREAMING_SNAKE_CASE = """A painting of a squirrel eating a burger"""
np.random.seed(0 )
__SCREAMING_SNAKE_CASE = sd_pipe([prompt] ,guidance_scale=6.0 ,num_inference_steps=10 ,output_type="""np""" )
__SCREAMING_SNAKE_CASE = output.images
__SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
__SCREAMING_SNAKE_CASE = np.array([0.0_452, 0.0_390, 0.0_087, 0.0_350, 0.0_617, 0.0_364, 0.0_544, 0.0_523, 0.0_720] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = DDIMScheduler.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" ,subfolder="""scheduler""" ,revision="""onnx""" )
__SCREAMING_SNAKE_CASE = OnnxStableDiffusionPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" ,revision="""onnx""" ,scheduler=_a ,safety_checker=_a ,feature_extractor=_a ,provider=self.gpu_provider ,sess_options=self.gpu_options ,)
sd_pipe.set_progress_bar_config(disable=_a )
__SCREAMING_SNAKE_CASE = """open neural network exchange"""
__SCREAMING_SNAKE_CASE = np.random.RandomState(0 )
__SCREAMING_SNAKE_CASE = sd_pipe([prompt] ,guidance_scale=7.5 ,num_inference_steps=10 ,generator=_a ,output_type="""np""" )
__SCREAMING_SNAKE_CASE = output.images
__SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
__SCREAMING_SNAKE_CASE = np.array([0.2_867, 0.1_974, 0.1_481, 0.7_294, 0.7_251, 0.6_667, 0.4_194, 0.5_642, 0.6_486] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = LMSDiscreteScheduler.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" ,subfolder="""scheduler""" ,revision="""onnx""" )
__SCREAMING_SNAKE_CASE = OnnxStableDiffusionPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" ,revision="""onnx""" ,scheduler=_a ,safety_checker=_a ,feature_extractor=_a ,provider=self.gpu_provider ,sess_options=self.gpu_options ,)
sd_pipe.set_progress_bar_config(disable=_a )
__SCREAMING_SNAKE_CASE = """open neural network exchange"""
__SCREAMING_SNAKE_CASE = np.random.RandomState(0 )
__SCREAMING_SNAKE_CASE = sd_pipe([prompt] ,guidance_scale=7.5 ,num_inference_steps=10 ,generator=_a ,output_type="""np""" )
__SCREAMING_SNAKE_CASE = output.images
__SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
__SCREAMING_SNAKE_CASE = np.array([0.2_306, 0.1_959, 0.1_593, 0.6_549, 0.6_394, 0.5_408, 0.5_065, 0.6_010, 0.6_161] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = 0
def test_callback_fn(lowerCamelCase : Dict ,lowerCamelCase : Union[str, Any] ,lowerCamelCase : Any ) -> None:
__SCREAMING_SNAKE_CASE = True
nonlocal number_of_steps
number_of_steps += 1
if step == 0:
assert latents.shape == (1, 4, 64, 64)
__SCREAMING_SNAKE_CASE = latents[0, -3:, -3:, -1]
__SCREAMING_SNAKE_CASE = np.array(
[-0.6_772, -0.3_835, -1.2_456, 0.1_905, -1.0_974, 0.6_967, -1.9_353, 0.0_178, 1.0_167] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 1E-3
elif step == 5:
assert latents.shape == (1, 4, 64, 64)
__SCREAMING_SNAKE_CASE = latents[0, -3:, -3:, -1]
__SCREAMING_SNAKE_CASE = np.array(
[-0.3_351, 0.2_241, -0.1_837, -0.2_325, -0.6_577, 0.3_393, -0.0_241, 0.5_899, 1.3_875] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 1E-3
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = OnnxStableDiffusionPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" ,revision="""onnx""" ,safety_checker=_a ,feature_extractor=_a ,provider=self.gpu_provider ,sess_options=self.gpu_options ,)
pipe.set_progress_bar_config(disable=_a )
__SCREAMING_SNAKE_CASE = """Andromeda galaxy in a bottle"""
__SCREAMING_SNAKE_CASE = np.random.RandomState(0 )
pipe(
prompt=_a ,num_inference_steps=5 ,guidance_scale=7.5 ,generator=_a ,callback=_a ,callback_steps=1 ,)
assert test_callback_fn.has_been_called
assert number_of_steps == 6
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = OnnxStableDiffusionPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" ,revision="""onnx""" ,safety_checker=_a ,feature_extractor=_a ,provider=self.gpu_provider ,sess_options=self.gpu_options ,)
assert isinstance(_a ,_a )
assert pipe.safety_checker is None
__SCREAMING_SNAKE_CASE = pipe("""example prompt""" ,num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(_a )
__SCREAMING_SNAKE_CASE = OnnxStableDiffusionPipeline.from_pretrained(_a )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
__SCREAMING_SNAKE_CASE = pipe("""example prompt""" ,num_inference_steps=2 ).images[0]
assert image is not None
| 709
|
'''simple docstring'''
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
a = logging.get_logger(__name__)
@add_end_docstrings(_snake_case )
class __a ( _snake_case ):
def __init__( self : Union[str, Any] ,**lowerCamelCase : str ):
'''simple docstring'''
super().__init__(**lowerCamelCase )
requires_backends(self ,"""vision""" )
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == """tf"""
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING )
def __call__( self : Dict ,lowerCamelCase : Union[str, List[str], "Image", List["Image"]] ,**lowerCamelCase : Optional[Any] ):
'''simple docstring'''
return super().__call__(lowerCamelCase ,**lowerCamelCase )
def UpperCAmelCase__ ( self : Optional[Any] ,**lowerCamelCase : Optional[int] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = {}
if "candidate_labels" in kwargs:
__SCREAMING_SNAKE_CASE = kwargs["""candidate_labels"""]
if "hypothesis_template" in kwargs:
__SCREAMING_SNAKE_CASE = kwargs["""hypothesis_template"""]
return preprocess_params, {}, {}
def UpperCAmelCase__ ( self : List[Any] ,lowerCamelCase : List[Any] ,lowerCamelCase : Union[str, Any]=None ,lowerCamelCase : Union[str, Any]="This is a photo of {}." ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = load_image(lowerCamelCase )
__SCREAMING_SNAKE_CASE = self.image_processor(images=[image] ,return_tensors=self.framework )
__SCREAMING_SNAKE_CASE = candidate_labels
__SCREAMING_SNAKE_CASE = [hypothesis_template.format(lowerCamelCase ) for x in candidate_labels]
__SCREAMING_SNAKE_CASE = self.tokenizer(lowerCamelCase ,return_tensors=self.framework ,padding=lowerCamelCase )
__SCREAMING_SNAKE_CASE = [text_inputs]
return inputs
def UpperCAmelCase__ ( self : Tuple ,lowerCamelCase : str ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = model_inputs.pop("""candidate_labels""" )
__SCREAMING_SNAKE_CASE = model_inputs.pop("""text_inputs""" )
if isinstance(text_inputs[0] ,lowerCamelCase ):
__SCREAMING_SNAKE_CASE = text_inputs[0]
else:
# Batching case.
__SCREAMING_SNAKE_CASE = text_inputs[0][0]
__SCREAMING_SNAKE_CASE = self.model(**lowerCamelCase ,**lowerCamelCase )
__SCREAMING_SNAKE_CASE = {
"""candidate_labels""": candidate_labels,
"""logits""": outputs.logits_per_image,
}
return model_outputs
def UpperCAmelCase__ ( self : Dict ,lowerCamelCase : Tuple ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = model_outputs.pop("""candidate_labels""" )
__SCREAMING_SNAKE_CASE = model_outputs["""logits"""][0]
if self.framework == "pt":
__SCREAMING_SNAKE_CASE = logits.softmax(dim=-1 ).squeeze(-1 )
__SCREAMING_SNAKE_CASE = probs.tolist()
if not isinstance(lowerCamelCase ,lowerCamelCase ):
__SCREAMING_SNAKE_CASE = [scores]
elif self.framework == "tf":
__SCREAMING_SNAKE_CASE = stable_softmax(lowerCamelCase ,axis=-1 )
__SCREAMING_SNAKE_CASE = probs.numpy().tolist()
else:
raise ValueError(f"""Unsupported framework: {self.framework}""" )
__SCREAMING_SNAKE_CASE = [
{"""score""": score, """label""": candidate_label}
for score, candidate_label in sorted(zip(lowerCamelCase ,lowerCamelCase ) ,key=lambda lowerCamelCase : -x[0] )
]
return result
| 13
| 0
|
'''simple docstring'''
import multiprocessing
import time
from arguments import PretokenizationArguments
from datasets import load_dataset
from transformers import AutoTokenizer, HfArgumentParser
def __a(SCREAMING_SNAKE_CASE_ : Optional[int] ):
'''simple docstring'''
_lowerCAmelCase = {}
_lowerCAmelCase = tokenizer(example["content"] , truncation=SCREAMING_SNAKE_CASE_ )["input_ids"]
_lowerCAmelCase = len(example["content"] ) / len(output["input_ids"] )
return output
_SCREAMING_SNAKE_CASE = HfArgumentParser(PretokenizationArguments)
_SCREAMING_SNAKE_CASE = parser.parse_args()
if args.num_workers is None:
_SCREAMING_SNAKE_CASE = multiprocessing.cpu_count()
_SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(args.tokenizer_dir)
_SCREAMING_SNAKE_CASE = time.time()
_SCREAMING_SNAKE_CASE = load_dataset(args.dataset_name, split="train")
print(f'''Dataset loaded in {time.time()-t_start:.2f}s''')
_SCREAMING_SNAKE_CASE = time.time()
_SCREAMING_SNAKE_CASE = ds.map(
tokenize,
num_proc=args.num_workers,
remove_columns=[
"repo_name",
"path",
"copies",
"size",
"content",
"license",
"hash",
"line_mean",
"line_max",
"alpha_frac",
"autogenerated",
],
)
print(f'''Dataset tokenized in {time.time()-t_start:.2f}s''')
_SCREAMING_SNAKE_CASE = time.time()
ds.push_to_hub(args.tokenized_data_repo)
print(f'''Data pushed to the hub in {time.time()-t_start:.2f}s''')
| 18
|
from sklearn.metrics import mean_squared_error
import datasets
lowerCamelCase__ = '''\
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
'''
lowerCamelCase__ = '''\
Mean Squared Error(MSE) is the average of the square of difference between the predicted
and actual values.
'''
lowerCamelCase__ = '''
Args:
predictions: array-like of shape (n_samples,) or (n_samples, n_outputs)
Estimated target values.
references: array-like of shape (n_samples,) or (n_samples, n_outputs)
Ground truth (correct) target values.
sample_weight: array-like of shape (n_samples,), default=None
Sample weights.
multioutput: {"raw_values", "uniform_average"} or array-like of shape (n_outputs,), default="uniform_average"
Defines aggregating of multiple output values. Array-like value defines weights used to average errors.
"raw_values" : Returns a full set of errors in case of multioutput input.
"uniform_average" : Errors of all outputs are averaged with uniform weight.
squared : bool, default=True
If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value.
Returns:
mse : mean squared error.
Examples:
>>> mse_metric = datasets.load_metric("mse")
>>> predictions = [2.5, 0.0, 2, 8]
>>> references = [3, -0.5, 2, 7]
>>> results = mse_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'mse\': 0.375}
>>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False)
>>> print(rmse_result)
{\'mse\': 0.6123724356957945}
If you\'re using multi-dimensional lists, then set the config as follows :
>>> mse_metric = datasets.load_metric("mse", "multilist")
>>> predictions = [[0.5, 1], [-1, 1], [7, -6]]
>>> references = [[0, 2], [-1, 2], [8, -5]]
>>> results = mse_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'mse\': 0.7083333333333334}
>>> results = mse_metric.compute(predictions=predictions, references=references, multioutput=\'raw_values\')
>>> print(results) # doctest: +NORMALIZE_WHITESPACE
{\'mse\': array([0.41666667, 1. ])}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __magic_name__ (datasets.Metric ):
def __a ( self ) -> List[str]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , reference_urls=[
"https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html"
] , )
def __a ( self ) -> Union[str, Any]:
if self.config_name == "multilist":
return {
"predictions": datasets.Sequence(datasets.Value("float" ) ),
"references": datasets.Sequence(datasets.Value("float" ) ),
}
else:
return {
"predictions": datasets.Value("float" ),
"references": datasets.Value("float" ),
}
def __a ( self , _a , _a , _a=None , _a="uniform_average" , _a=True ) -> Dict:
lowerCAmelCase_ = mean_squared_error(
_a , _a , sample_weight=_a , multioutput=_a , squared=_a )
return {"mse": mse}
| 122
| 0
|
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class lowerCAmelCase ( __UpperCamelCase ):
def A_ ( self : List[Any] ) -> Dict:
lowerCamelCase__ : int = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(UpperCAmelCase , 'tf_padding' ) )
self.parent.assertTrue(hasattr(UpperCAmelCase , 'depth_multiplier' ) )
class lowerCAmelCase :
def __init__( self : Dict , UpperCAmelCase : Tuple , UpperCAmelCase : Any=13 , UpperCAmelCase : Union[str, Any]=3 , UpperCAmelCase : Optional[int]=32 , UpperCAmelCase : List[str]=0.2_5 , UpperCAmelCase : Any=8 , UpperCAmelCase : int=True , UpperCAmelCase : Optional[Any]=1024 , UpperCAmelCase : Tuple=32 , UpperCAmelCase : str="relu6" , UpperCAmelCase : List[Any]=0.1 , UpperCAmelCase : Any=0.0_2 , UpperCAmelCase : Tuple=True , UpperCAmelCase : Optional[Any]=True , UpperCAmelCase : Optional[int]=10 , UpperCAmelCase : List[Any]=None , ) -> Union[str, Any]:
lowerCamelCase__ : List[Any] = parent
lowerCamelCase__ : List[Any] = batch_size
lowerCamelCase__ : List[str] = num_channels
lowerCamelCase__ : Optional[int] = image_size
lowerCamelCase__ : str = depth_multiplier
lowerCamelCase__ : Optional[int] = min_depth
lowerCamelCase__ : List[str] = tf_padding
lowerCamelCase__ : Optional[int] = int(last_hidden_size * depth_multiplier )
lowerCamelCase__ : Dict = output_stride
lowerCamelCase__ : Dict = hidden_act
lowerCamelCase__ : Optional[int] = classifier_dropout_prob
lowerCamelCase__ : int = use_labels
lowerCamelCase__ : Tuple = is_training
lowerCamelCase__ : Union[str, Any] = num_labels
lowerCamelCase__ : Optional[Any] = initializer_range
lowerCamelCase__ : Tuple = scope
def A_ ( self : Optional[int] ) -> Union[str, Any]:
lowerCamelCase__ : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase__ : Any = None
lowerCamelCase__ : Any = None
if self.use_labels:
lowerCamelCase__ : Optional[int] = ids_tensor([self.batch_size] , self.num_labels )
lowerCamelCase__ : Any = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
lowerCamelCase__ : List[Any] = self.get_config()
return config, pixel_values, labels, pixel_labels
def A_ ( self : Union[str, Any] ) -> Tuple:
return MobileNetVaConfig(
num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , min_depth=self.min_depth , tf_padding=self.tf_padding , hidden_act=self.hidden_act , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def A_ ( self : Optional[Any] , UpperCAmelCase : List[Any] , UpperCAmelCase : Tuple , UpperCAmelCase : Dict , UpperCAmelCase : str ) -> Tuple:
lowerCamelCase__ : Union[str, Any] = MobileNetVaModel(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowerCamelCase__ : Optional[int] = model(UpperCAmelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def A_ ( self : Any , UpperCAmelCase : Dict , UpperCAmelCase : Optional[int] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Tuple ) -> Optional[Any]:
lowerCamelCase__ : Optional[Any] = self.num_labels
lowerCamelCase__ : Optional[Any] = MobileNetVaForImageClassification(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowerCamelCase__ : Dict = model(UpperCAmelCase , labels=UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A_ ( self : str ) -> Any:
lowerCamelCase__ : str = self.prepare_config_and_inputs()
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : int = config_and_inputs
lowerCamelCase__ : Tuple = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class lowerCAmelCase ( __UpperCamelCase, __UpperCamelCase, unittest.TestCase ):
UpperCAmelCase__ = (MobileNetVaModel, MobileNetVaForImageClassification) if is_torch_available() else ()
UpperCAmelCase__ = (
{"""feature-extraction""": MobileNetVaModel, """image-classification""": MobileNetVaForImageClassification}
if is_torch_available()
else {}
)
UpperCAmelCase__ = False
UpperCAmelCase__ = False
UpperCAmelCase__ = False
UpperCAmelCase__ = False
def A_ ( self : Optional[int] ) -> Optional[Any]:
lowerCamelCase__ : str = MobileNetVaModelTester(self )
lowerCamelCase__ : Any = MobileNetVaConfigTester(self , config_class=UpperCAmelCase , has_text_modality=UpperCAmelCase )
def A_ ( self : Optional[Any] ) -> List[str]:
self.config_tester.run_common_tests()
@unittest.skip(reason='MobileNetV1 does not use inputs_embeds' )
def A_ ( self : Union[str, Any] ) -> Union[str, Any]:
pass
@unittest.skip(reason='MobileNetV1 does not support input and output embeddings' )
def A_ ( self : Dict ) -> str:
pass
@unittest.skip(reason='MobileNetV1 does not output attentions' )
def A_ ( self : Tuple ) -> int:
pass
def A_ ( self : Union[str, Any] ) -> Union[str, Any]:
lowerCamelCase__ , lowerCamelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : str = model_class(UpperCAmelCase )
lowerCamelCase__ : Optional[int] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase__ : Dict = [*signature.parameters.keys()]
lowerCamelCase__ : int = ['pixel_values']
self.assertListEqual(arg_names[:1] , UpperCAmelCase )
def A_ ( self : int ) -> List[str]:
lowerCamelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase )
def A_ ( self : Any ) -> Optional[int]:
def check_hidden_states_output(UpperCAmelCase : Optional[int] , UpperCAmelCase : Dict , UpperCAmelCase : List[Any] ):
lowerCamelCase__ : Union[str, Any] = model_class(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
with torch.no_grad():
lowerCamelCase__ : Optional[Any] = model(**self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) )
lowerCamelCase__ : str = outputs.hidden_states
lowerCamelCase__ : Union[str, Any] = 26
self.assertEqual(len(UpperCAmelCase ) , UpperCAmelCase )
lowerCamelCase__ , lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : List[str] = True
check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase__ : List[str] = True
check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
def A_ ( self : Any ) -> Optional[Any]:
lowerCamelCase__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase )
@slow
def A_ ( self : List[Any] ) -> int:
for model_name in MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__ : Any = MobileNetVaModel.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( ) -> int:
lowerCamelCase__ : str = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class lowerCAmelCase ( unittest.TestCase ):
@cached_property
def A_ ( self : List[str] ) -> Optional[Any]:
return (
MobileNetVaImageProcessor.from_pretrained('google/mobilenet_v1_1.0_224' ) if is_vision_available() else None
)
@slow
def A_ ( self : List[str] ) -> Dict:
lowerCamelCase__ : Union[str, Any] = MobileNetVaForImageClassification.from_pretrained('google/mobilenet_v1_1.0_224' ).to(UpperCAmelCase )
lowerCamelCase__ : List[str] = self.default_image_processor
lowerCamelCase__ : Optional[int] = prepare_img()
lowerCamelCase__ : Any = image_processor(images=UpperCAmelCase , return_tensors='pt' ).to(UpperCAmelCase )
# forward pass
with torch.no_grad():
lowerCamelCase__ : str = model(**UpperCAmelCase )
# verify the logits
lowerCamelCase__ : int = torch.Size((1, 1001) )
self.assertEqual(outputs.logits.shape , UpperCAmelCase )
lowerCamelCase__ : Optional[int] = torch.tensor([-4.1_7_3_9, -1.1_2_3_3, 3.1_2_0_5] ).to(UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCAmelCase , atol=1e-4 ) )
| 188
|
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxCrossAttnUpBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
FlaxUpBlockaD,
)
@flax.struct.dataclass
class lowerCAmelCase ( __UpperCamelCase ):
UpperCAmelCase__ = 42
@flax_register_to_config
class lowerCAmelCase ( nn.Module, __UpperCamelCase, __UpperCamelCase ):
UpperCAmelCase__ = 32
UpperCAmelCase__ = 4
UpperCAmelCase__ = 4
UpperCAmelCase__ = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
UpperCAmelCase__ = ("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D")
UpperCAmelCase__ = False
UpperCAmelCase__ = (3_20, 6_40, 12_80, 12_80)
UpperCAmelCase__ = 2
UpperCAmelCase__ = 8
UpperCAmelCase__ = None
UpperCAmelCase__ = 12_80
UpperCAmelCase__ = 0.0
UpperCAmelCase__ = False
UpperCAmelCase__ = jnp.floataa
UpperCAmelCase__ = True
UpperCAmelCase__ = 0
UpperCAmelCase__ = False
def A_ ( self : Tuple , UpperCAmelCase : jax.random.KeyArray ) -> FrozenDict:
# init input tensors
lowerCamelCase__ : int = (1, self.in_channels, self.sample_size, self.sample_size)
lowerCamelCase__ : List[str] = jnp.zeros(UpperCAmelCase , dtype=jnp.floataa )
lowerCamelCase__ : Tuple = jnp.ones((1,) , dtype=jnp.intaa )
lowerCamelCase__ : Dict = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa )
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = jax.random.split(UpperCAmelCase )
lowerCamelCase__ : Dict = {'params': params_rng, 'dropout': dropout_rng}
return self.init(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )["params"]
def A_ ( self : Tuple ) -> Optional[int]:
lowerCamelCase__ : Any = self.block_out_channels
lowerCamelCase__ : int = block_out_channels[0] * 4
if self.num_attention_heads is not None:
raise ValueError(
'At the moment it is not possible to define the number of attention heads via `num_attention_heads` because of a naming issue as described in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131. Passing `num_attention_heads` will only be supported in diffusers v0.19.' )
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
lowerCamelCase__ : Tuple = self.num_attention_heads or self.attention_head_dim
# input
lowerCamelCase__ : Optional[Any] = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
lowerCamelCase__ : Optional[int] = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift )
lowerCamelCase__ : int = FlaxTimestepEmbedding(UpperCAmelCase , dtype=self.dtype )
lowerCamelCase__ : Optional[int] = self.only_cross_attention
if isinstance(UpperCAmelCase , UpperCAmelCase ):
lowerCamelCase__ : str = (only_cross_attention,) * len(self.down_block_types )
if isinstance(UpperCAmelCase , UpperCAmelCase ):
lowerCamelCase__ : List[Any] = (num_attention_heads,) * len(self.down_block_types )
# down
lowerCamelCase__ : List[Any] = []
lowerCamelCase__ : Dict = block_out_channels[0]
for i, down_block_type in enumerate(self.down_block_types ):
lowerCamelCase__ : Dict = output_channel
lowerCamelCase__ : Optional[int] = block_out_channels[i]
lowerCamelCase__ : List[Any] = i == len(UpperCAmelCase ) - 1
if down_block_type == "CrossAttnDownBlock2D":
lowerCamelCase__ : Tuple = FlaxCrossAttnDownBlockaD(
in_channels=UpperCAmelCase , out_channels=UpperCAmelCase , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
lowerCamelCase__ : str = FlaxDownBlockaD(
in_channels=UpperCAmelCase , out_channels=UpperCAmelCase , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(UpperCAmelCase )
lowerCamelCase__ : List[Any] = down_blocks
# mid
lowerCamelCase__ : Dict = FlaxUNetMidBlockaDCrossAttn(
in_channels=block_out_channels[-1] , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
# up
lowerCamelCase__ : Any = []
lowerCamelCase__ : Optional[int] = list(reversed(UpperCAmelCase ) )
lowerCamelCase__ : Any = list(reversed(UpperCAmelCase ) )
lowerCamelCase__ : int = list(reversed(UpperCAmelCase ) )
lowerCamelCase__ : Tuple = reversed_block_out_channels[0]
for i, up_block_type in enumerate(self.up_block_types ):
lowerCamelCase__ : str = output_channel
lowerCamelCase__ : int = reversed_block_out_channels[i]
lowerCamelCase__ : int = reversed_block_out_channels[min(i + 1 , len(UpperCAmelCase ) - 1 )]
lowerCamelCase__ : Optional[Any] = i == len(UpperCAmelCase ) - 1
if up_block_type == "CrossAttnUpBlock2D":
lowerCamelCase__ : Tuple = FlaxCrossAttnUpBlockaD(
in_channels=UpperCAmelCase , out_channels=UpperCAmelCase , prev_output_channel=UpperCAmelCase , num_layers=self.layers_per_block + 1 , num_attention_heads=reversed_num_attention_heads[i] , add_upsample=not is_final_block , dropout=self.dropout , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
lowerCamelCase__ : Optional[Any] = FlaxUpBlockaD(
in_channels=UpperCAmelCase , out_channels=UpperCAmelCase , prev_output_channel=UpperCAmelCase , num_layers=self.layers_per_block + 1 , add_upsample=not is_final_block , dropout=self.dropout , dtype=self.dtype , )
up_blocks.append(UpperCAmelCase )
lowerCamelCase__ : Tuple = output_channel
lowerCamelCase__ : Tuple = up_blocks
# out
lowerCamelCase__ : Optional[int] = nn.GroupNorm(num_groups=32 , epsilon=1e-5 )
lowerCamelCase__ : Any = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self : int , UpperCAmelCase : Any , UpperCAmelCase : Tuple , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Optional[Any]=None , UpperCAmelCase : Dict=None , UpperCAmelCase : bool = True , UpperCAmelCase : bool = False , ) -> Union[FlaxUNetaDConditionOutput, Tuple]:
# 1. time
if not isinstance(UpperCAmelCase , jnp.ndarray ):
lowerCamelCase__ : List[str] = jnp.array([timesteps] , dtype=jnp.intaa )
elif isinstance(UpperCAmelCase , jnp.ndarray ) and len(timesteps.shape ) == 0:
lowerCamelCase__ : List[Any] = timesteps.astype(dtype=jnp.floataa )
lowerCamelCase__ : Any = jnp.expand_dims(UpperCAmelCase , 0 )
lowerCamelCase__ : List[str] = self.time_proj(UpperCAmelCase )
lowerCamelCase__ : Optional[Any] = self.time_embedding(UpperCAmelCase )
# 2. pre-process
lowerCamelCase__ : Dict = jnp.transpose(UpperCAmelCase , (0, 2, 3, 1) )
lowerCamelCase__ : Optional[Any] = self.conv_in(UpperCAmelCase )
# 3. down
lowerCamelCase__ : Any = (sample,)
for down_block in self.down_blocks:
if isinstance(UpperCAmelCase , UpperCAmelCase ):
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = down_block(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , deterministic=not train )
else:
lowerCamelCase__ , lowerCamelCase__ : Any = down_block(UpperCAmelCase , UpperCAmelCase , deterministic=not train )
down_block_res_samples += res_samples
if down_block_additional_residuals is not None:
lowerCamelCase__ : Union[str, Any] = ()
for down_block_res_sample, down_block_additional_residual in zip(
UpperCAmelCase , UpperCAmelCase ):
down_block_res_sample += down_block_additional_residual
new_down_block_res_samples += (down_block_res_sample,)
lowerCamelCase__ : str = new_down_block_res_samples
# 4. mid
lowerCamelCase__ : List[Any] = self.mid_block(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , deterministic=not train )
if mid_block_additional_residual is not None:
sample += mid_block_additional_residual
# 5. up
for up_block in self.up_blocks:
lowerCamelCase__ : str = down_block_res_samples[-(self.layers_per_block + 1) :]
lowerCamelCase__ : List[str] = down_block_res_samples[: -(self.layers_per_block + 1)]
if isinstance(UpperCAmelCase , UpperCAmelCase ):
lowerCamelCase__ : List[Any] = up_block(
UpperCAmelCase , temb=UpperCAmelCase , encoder_hidden_states=UpperCAmelCase , res_hidden_states_tuple=UpperCAmelCase , deterministic=not train , )
else:
lowerCamelCase__ : int = up_block(UpperCAmelCase , temb=UpperCAmelCase , res_hidden_states_tuple=UpperCAmelCase , deterministic=not train )
# 6. post-process
lowerCamelCase__ : str = self.conv_norm_out(UpperCAmelCase )
lowerCamelCase__ : Optional[Any] = nn.silu(UpperCAmelCase )
lowerCamelCase__ : Any = self.conv_out(UpperCAmelCase )
lowerCamelCase__ : Optional[Any] = jnp.transpose(UpperCAmelCase , (0, 3, 1, 2) )
if not return_dict:
return (sample,)
return FlaxUNetaDConditionOutput(sample=UpperCAmelCase )
| 188
| 1
|
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase : Dict = logging.get_logger(__name__)
lowercase : Optional[Any] = {
"microsoft/unispeech-sat-base-100h-libri-ft": (
"https://huggingface.co/microsoft/unispeech-sat-base-100h-libri-ft/resolve/main/config.json"
),
# See all UniSpeechSat models at https://huggingface.co/models?filter=unispeech_sat
}
class SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ):
"""simple docstring"""
lowercase : Tuple = 'unispeech-sat'
def __init__( self , __UpperCamelCase=32 , __UpperCamelCase=7_68 , __UpperCamelCase=12 , __UpperCamelCase=12 , __UpperCamelCase=30_72 , __UpperCamelCase="gelu" , __UpperCamelCase=0.1 , __UpperCamelCase=0.1 , __UpperCamelCase=0.1 , __UpperCamelCase=0.0 , __UpperCamelCase=0.0 , __UpperCamelCase=0.1 , __UpperCamelCase=0.1 , __UpperCamelCase=0.02 , __UpperCamelCase=1E-5 , __UpperCamelCase="group" , __UpperCamelCase="gelu" , __UpperCamelCase=(5_12, 5_12, 5_12, 5_12, 5_12, 5_12, 5_12) , __UpperCamelCase=(5, 2, 2, 2, 2, 2, 2) , __UpperCamelCase=(10, 3, 3, 3, 3, 2, 2) , __UpperCamelCase=False , __UpperCamelCase=1_28 , __UpperCamelCase=16 , __UpperCamelCase=False , __UpperCamelCase=True , __UpperCamelCase=0.05 , __UpperCamelCase=10 , __UpperCamelCase=2 , __UpperCamelCase=0.0 , __UpperCamelCase=10 , __UpperCamelCase=0 , __UpperCamelCase=3_20 , __UpperCamelCase=2 , __UpperCamelCase=0.1 , __UpperCamelCase=1_00 , __UpperCamelCase=2_56 , __UpperCamelCase=2_56 , __UpperCamelCase=0.1 , __UpperCamelCase="mean" , __UpperCamelCase=False , __UpperCamelCase=False , __UpperCamelCase=2_56 , __UpperCamelCase=(5_12, 5_12, 5_12, 5_12, 15_00) , __UpperCamelCase=(5, 3, 3, 1, 1) , __UpperCamelCase=(1, 2, 3, 1, 1) , __UpperCamelCase=5_12 , __UpperCamelCase=0 , __UpperCamelCase=1 , __UpperCamelCase=2 , __UpperCamelCase=5_04 , **__UpperCamelCase , ) -> Tuple:
'''simple docstring'''
super().__init__(**__UpperCamelCase , pad_token_id=__UpperCamelCase , bos_token_id=__UpperCamelCase , eos_token_id=__UpperCamelCase )
__UpperCamelCase : Tuple = hidden_size
__UpperCamelCase : Union[str, Any] = feat_extract_norm
__UpperCamelCase : Dict = feat_extract_activation
__UpperCamelCase : Optional[int] = list(__UpperCamelCase )
__UpperCamelCase : Union[str, Any] = list(__UpperCamelCase )
__UpperCamelCase : Union[str, Any] = list(__UpperCamelCase )
__UpperCamelCase : Dict = conv_bias
__UpperCamelCase : Optional[int] = num_conv_pos_embeddings
__UpperCamelCase : Optional[int] = num_conv_pos_embedding_groups
__UpperCamelCase : Optional[Any] = len(self.conv_dim )
__UpperCamelCase : Any = num_hidden_layers
__UpperCamelCase : Optional[Any] = intermediate_size
__UpperCamelCase : int = hidden_act
__UpperCamelCase : Optional[Any] = num_attention_heads
__UpperCamelCase : int = hidden_dropout
__UpperCamelCase : List[Any] = attention_dropout
__UpperCamelCase : Tuple = activation_dropout
__UpperCamelCase : Union[str, Any] = feat_proj_dropout
__UpperCamelCase : str = final_dropout
__UpperCamelCase : Optional[Any] = layerdrop
__UpperCamelCase : Tuple = layer_norm_eps
__UpperCamelCase : Dict = initializer_range
__UpperCamelCase : List[Any] = vocab_size
__UpperCamelCase : Tuple = num_clusters
__UpperCamelCase : Any = do_stable_layer_norm
__UpperCamelCase : Any = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="
" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="
f''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
f''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__UpperCamelCase : Optional[Any] = apply_spec_augment
__UpperCamelCase : str = mask_time_prob
__UpperCamelCase : Optional[Any] = mask_time_length
__UpperCamelCase : Dict = mask_time_min_masks
__UpperCamelCase : Any = mask_feature_prob
__UpperCamelCase : Any = mask_feature_length
__UpperCamelCase : Dict = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
__UpperCamelCase : Optional[int] = num_codevectors_per_group
__UpperCamelCase : int = num_codevector_groups
__UpperCamelCase : int = contrastive_logits_temperature
__UpperCamelCase : Optional[Any] = feat_quantizer_dropout
__UpperCamelCase : List[str] = num_negatives
__UpperCamelCase : Dict = codevector_dim
__UpperCamelCase : Optional[int] = proj_codevector_dim
__UpperCamelCase : Optional[Any] = diversity_loss_weight
# ctc loss
__UpperCamelCase : Union[str, Any] = ctc_loss_reduction
__UpperCamelCase : List[Any] = ctc_zero_infinity
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
__UpperCamelCase : Tuple = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
__UpperCamelCase : Any = list(__UpperCamelCase )
__UpperCamelCase : Optional[Any] = list(__UpperCamelCase )
__UpperCamelCase : Union[str, Any] = list(__UpperCamelCase )
__UpperCamelCase : List[Any] = xvector_output_dim
@property
def __lowerCamelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 327
|
from ... import PretrainedConfig
lowercase : Dict = {
"sijunhe/nezha-cn-base": "https://huggingface.co/sijunhe/nezha-cn-base/resolve/main/config.json",
}
class SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ):
"""simple docstring"""
lowercase : List[str] = NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP
lowercase : Union[str, Any] = 'nezha'
def __init__( self , __UpperCamelCase=2_11_28 , __UpperCamelCase=7_68 , __UpperCamelCase=12 , __UpperCamelCase=12 , __UpperCamelCase=30_72 , __UpperCamelCase="gelu" , __UpperCamelCase=0.1 , __UpperCamelCase=0.1 , __UpperCamelCase=5_12 , __UpperCamelCase=64 , __UpperCamelCase=2 , __UpperCamelCase=0.02 , __UpperCamelCase=1E-12 , __UpperCamelCase=0.1 , __UpperCamelCase=0 , __UpperCamelCase=2 , __UpperCamelCase=3 , __UpperCamelCase=True , **__UpperCamelCase , ) -> int:
'''simple docstring'''
super().__init__(pad_token_id=__UpperCamelCase , bos_token_id=__UpperCamelCase , eos_token_id=__UpperCamelCase , **__UpperCamelCase )
__UpperCamelCase : int = vocab_size
__UpperCamelCase : int = hidden_size
__UpperCamelCase : Tuple = num_hidden_layers
__UpperCamelCase : Tuple = num_attention_heads
__UpperCamelCase : Optional[int] = hidden_act
__UpperCamelCase : List[str] = intermediate_size
__UpperCamelCase : Union[str, Any] = hidden_dropout_prob
__UpperCamelCase : Tuple = attention_probs_dropout_prob
__UpperCamelCase : Optional[int] = max_position_embeddings
__UpperCamelCase : str = max_relative_position
__UpperCamelCase : List[str] = type_vocab_size
__UpperCamelCase : Dict = initializer_range
__UpperCamelCase : Optional[int] = layer_norm_eps
__UpperCamelCase : int = classifier_dropout
__UpperCamelCase : List[str] = use_cache
| 327
| 1
|
'''simple docstring'''
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XLMRobertaTokenizer, XLMRobertaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
_A = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase__ ( _snake_case , unittest.TestCase ):
"""simple docstring"""
A : str = XLMRobertaTokenizer
A : int = XLMRobertaTokenizerFast
A : List[str] = True
A : Optional[Any] = True
def _lowerCamelCase (self ) -> int:
super().setUp()
# We have a SentencePiece fixture for testing
lowercase_ : str = XLMRobertaTokenizer(_a , keep_accents=_a )
tokenizer.save_pretrained(self.tmpdirname )
def _lowerCamelCase (self ) -> Tuple:
lowercase_ : Any = '<pad>'
lowercase_ : str = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_a ) , _a )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_a ) , _a )
def _lowerCamelCase (self ) -> List[str]:
lowercase_ : List[Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<s>' )
self.assertEqual(vocab_keys[1] , '<pad>' )
self.assertEqual(vocab_keys[-1] , '<mask>' )
self.assertEqual(len(_a ) , 1_002 )
def _lowerCamelCase (self ) -> Any:
self.assertEqual(self.get_tokenizer().vocab_size , 1_002 )
def _lowerCamelCase (self ) -> List[str]:
lowercase_ : Optional[Any] = XLMRobertaTokenizer(_a , keep_accents=_a )
lowercase_ : Union[str, Any] = tokenizer.tokenize('This is a test' )
self.assertListEqual(_a , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_a ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
lowercase_ : Optional[int] = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
_a , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
lowercase_ : List[str] = tokenizer.convert_tokens_to_ids(_a )
self.assertListEqual(
_a , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
lowercase_ : List[str] = tokenizer.convert_ids_to_tokens(_a )
self.assertListEqual(
_a , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] , )
def _lowerCamelCase (self ) -> List[str]:
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
lowercase_ : Optional[Any] = (self.rust_tokenizer_class, 'hf-internal-testing/tiny-xlm-roberta', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
lowercase_ : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(_a , **_a )
lowercase_ : Union[str, Any] = self.tokenizer_class.from_pretrained(_a , **_a )
lowercase_ : Optional[int] = tempfile.mkdtemp()
lowercase_ : List[str] = tokenizer_r.save_pretrained(_a )
lowercase_ : int = tokenizer_p.save_pretrained(_a )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) )
lowercase_ : List[str] = tuple(f for f in tokenizer_r_files if 'tokenizer.json' not in f )
self.assertSequenceEqual(_a , _a )
# Checks everything loads correctly in the same way
lowercase_ : List[str] = tokenizer_r.from_pretrained(_a )
lowercase_ : Optional[int] = tokenizer_p.from_pretrained(_a )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(_a , _a ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(_a )
# Save tokenizer rust, legacy_format=True
lowercase_ : List[Any] = tempfile.mkdtemp()
lowercase_ : Optional[int] = tokenizer_r.save_pretrained(_a , legacy_format=_a )
lowercase_ : Union[str, Any] = tokenizer_p.save_pretrained(_a )
# Checks it save with the same files
self.assertSequenceEqual(_a , _a )
# Checks everything loads correctly in the same way
lowercase_ : int = tokenizer_r.from_pretrained(_a )
lowercase_ : Optional[Any] = tokenizer_p.from_pretrained(_a )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(_a , _a ) )
shutil.rmtree(_a )
# Save tokenizer rust, legacy_format=False
lowercase_ : int = tempfile.mkdtemp()
lowercase_ : Union[str, Any] = tokenizer_r.save_pretrained(_a , legacy_format=_a )
lowercase_ : List[str] = tokenizer_p.save_pretrained(_a )
# Checks it saved the tokenizer.json file
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
lowercase_ : List[str] = tokenizer_r.from_pretrained(_a )
lowercase_ : Dict = tokenizer_p.from_pretrained(_a )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(_a , _a ) )
shutil.rmtree(_a )
@cached_property
def _lowerCamelCase (self ) -> Optional[int]:
return XLMRobertaTokenizer.from_pretrained('xlm-roberta-base' )
def _lowerCamelCase (self ) -> Dict:
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(_a , f.name )
lowercase_ : List[str] = XLMRobertaTokenizer(f.name , keep_accents=_a )
lowercase_ : List[Any] = pickle.dumps(_a )
pickle.loads(_a )
def _lowerCamelCase (self ) -> List[str]:
if not self.test_rust_tokenizer:
return
lowercase_ : Any = self.get_tokenizer()
lowercase_ : Union[str, Any] = self.get_rust_tokenizer()
lowercase_ : Tuple = 'I was born in 92000, and this is falsé.'
lowercase_ : Optional[Any] = tokenizer.tokenize(_a )
lowercase_ : List[str] = rust_tokenizer.tokenize(_a )
self.assertListEqual(_a , _a )
lowercase_ : List[str] = tokenizer.encode(_a , add_special_tokens=_a )
lowercase_ : Optional[Any] = rust_tokenizer.encode(_a , add_special_tokens=_a )
self.assertListEqual(_a , _a )
lowercase_ : Tuple = self.get_rust_tokenizer()
lowercase_ : Union[str, Any] = tokenizer.encode(_a )
lowercase_ : Tuple = rust_tokenizer.encode(_a )
self.assertListEqual(_a , _a )
@slow
def _lowerCamelCase (self ) -> List[str]:
lowercase_ : Optional[Any] = 'Hello World!'
lowercase_ : List[str] = [0, 35_378, 6_661, 38, 2]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(_a , self.big_tokenizer.encode(_a ) )
@slow
def _lowerCamelCase (self ) -> Any:
lowercase_ : Optional[int] = (
'This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'
' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'
)
lowercase_ : Any = [
0,
3_293,
83,
10,
4_552,
4_989,
7_986,
678,
10,
5_915,
111,
179_459,
124_850,
4,
6_044,
237,
12,
6,
5,
6,
4,
6_780,
705,
15,
1_388,
44,
378,
10_114,
711,
152,
20,
6,
5,
22_376,
642,
1_221,
15_190,
34_153,
450,
5_608,
959,
1_119,
57_702,
136,
186,
47,
1_098,
29_367,
47,
# 4426, # What fairseq tokenizes from "<unk>": "_<"
# 3678, # What fairseq tokenizes from "<unk>": "unk"
# 2740, # What fairseq tokenizes from "<unk>": ">"
3, # What we tokenize from "<unk>": "<unk>"
6, # Residue from the tokenization: an extra sentencepiece underline
4,
6_044,
237,
6_284,
50_901,
528,
31,
90,
34,
927,
2,
]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(_a , self.big_tokenizer.encode(_a ) )
@slow
def _lowerCamelCase (self ) -> List[str]:
# fmt: off
lowercase_ : Tuple = {'input_ids': [[0, 11_062, 82_772, 7, 15, 82_772, 538, 51_529, 237, 17_198, 1_290, 206, 9, 215_175, 1_314, 136, 17_198, 1_290, 206, 9, 56_359, 42, 122_009, 9, 16_466, 16, 87_344, 4_537, 9, 4_717, 78_381, 6, 159_958, 7, 15, 24_480, 618, 4, 527, 22_693, 5_428, 4, 2_777, 24_480, 9_874, 4, 43_523, 594, 4, 803, 18_392, 33_189, 18, 4, 43_523, 24_447, 12_399, 100, 24_955, 83_658, 9_626, 144_057, 15, 839, 22_335, 16, 136, 24_955, 83_658, 83_479, 15, 39_102, 724, 16, 678, 645, 2_789, 1_328, 4_589, 42, 122_009, 115_774, 23, 805, 1_328, 46_876, 7, 136, 53_894, 1_940, 42_227, 41_159, 17_721, 823, 425, 4, 27_512, 98_722, 206, 136, 5_531, 4_970, 919, 17_336, 5, 2], [0, 20_080, 618, 83, 82_775, 47, 479, 9, 1_517, 73, 53_894, 333, 80_581, 110_117, 18_811, 5_256, 1_295, 51, 152_526, 297, 7_986, 390, 124_416, 538, 35_431, 214, 98, 15_044, 25_737, 136, 7_108, 43_701, 23, 756, 135_355, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 581, 63_773, 119_455, 6, 147_797, 88_203, 7, 645, 70, 21, 3_285, 10_269, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_a , model_name='xlm-roberta-base' , revision='d9d8a8ea5eb94b1c6654ae9249df7793cd2933d3' , )
| 438
|
'''simple docstring'''
from __future__ import annotations
def _UpperCamelCase ( SCREAMING_SNAKE_CASE_ ):
# preprocessing the first row
for i in range(1 , len(matrix[0] ) ):
matrix[0][i] += matrix[0][i - 1]
# preprocessing the first column
for i in range(1 , len(SCREAMING_SNAKE_CASE_ ) ):
matrix[i][0] += matrix[i - 1][0]
# updating the path cost for current position
for i in range(1 , len(SCREAMING_SNAKE_CASE_ ) ):
for j in range(1 , len(matrix[0] ) ):
matrix[i][j] += min(matrix[i - 1][j] , matrix[i][j - 1] )
return matrix[-1][-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 438
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase = logging.get_logger(__name__)
lowerCamelCase = {
'sayakpaul/vit-msn-base': 'https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json',
# See all ViT MSN models at https://huggingface.co/models?filter=vit_msn
}
class A ( UpperCamelCase_ ):
UpperCamelCase__ : Union[str, Any] ='vit_msn'
def __init__( self : Union[str, Any] , lowercase_ : List[str]=768 , lowercase_ : Optional[int]=12 , lowercase_ : List[str]=12 , lowercase_ : List[Any]=3072 , lowercase_ : int="gelu" , lowercase_ : List[Any]=0.0 , lowercase_ : Optional[int]=0.0 , lowercase_ : Dict=0.02 , lowercase_ : Any=1E-06 , lowercase_ : Union[str, Any]=224 , lowercase_ : Optional[int]=16 , lowercase_ : List[Any]=3 , lowercase_ : Any=True , **lowercase_ : Tuple , ) -> Dict:
"""simple docstring"""
super().__init__(**lowercase_ )
_lowerCamelCase : Dict =hidden_size
_lowerCamelCase : Any =num_hidden_layers
_lowerCamelCase : Union[str, Any] =num_attention_heads
_lowerCamelCase : List[Any] =intermediate_size
_lowerCamelCase : Tuple =hidden_act
_lowerCamelCase : Any =hidden_dropout_prob
_lowerCamelCase : Dict =attention_probs_dropout_prob
_lowerCamelCase : Any =initializer_range
_lowerCamelCase : List[Any] =layer_norm_eps
_lowerCamelCase : Optional[int] =image_size
_lowerCamelCase : Optional[int] =patch_size
_lowerCamelCase : List[Any] =num_channels
_lowerCamelCase : Optional[Any] =qkv_bias
| 464
|
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XLMRobertaTokenizer, XLMRobertaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
lowerCamelCase = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
@require_tokenizers
class A ( UpperCamelCase_ , unittest.TestCase ):
UpperCamelCase__ : List[Any] =XLMRobertaTokenizer
UpperCamelCase__ : Union[str, Any] =XLMRobertaTokenizerFast
UpperCamelCase__ : int =True
UpperCamelCase__ : Optional[Any] =True
def lowerCamelCase ( self : List[Any] ) -> Dict:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
_lowerCamelCase : Dict =XLMRobertaTokenizer(lowercase_ , keep_accents=lowercase_ )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCamelCase ( self : Dict ) -> Any:
"""simple docstring"""
_lowerCamelCase : Tuple ='<pad>'
_lowerCamelCase : Optional[int] =1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowercase_ ) , lowercase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowercase_ ) , lowercase_ )
def lowerCamelCase ( self : Any ) -> Optional[int]:
"""simple docstring"""
_lowerCamelCase : str =list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<s>' )
self.assertEqual(vocab_keys[1] , '<pad>' )
self.assertEqual(vocab_keys[-1] , '<mask>' )
self.assertEqual(len(lowercase_ ) , 1002 )
def lowerCamelCase ( self : int ) -> Optional[Any]:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1002 )
def lowerCamelCase ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
_lowerCamelCase : Optional[Any] =XLMRobertaTokenizer(lowercase_ , keep_accents=lowercase_ )
_lowerCamelCase : List[Any] =tokenizer.tokenize('This is a test' )
self.assertListEqual(lowercase_ , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowercase_ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
_lowerCamelCase : int =tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
lowercase_ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
_lowerCamelCase : Optional[Any] =tokenizer.convert_tokens_to_ids(lowercase_ )
self.assertListEqual(
lowercase_ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
_lowerCamelCase : Optional[Any] =tokenizer.convert_ids_to_tokens(lowercase_ )
self.assertListEqual(
lowercase_ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] , )
def lowerCamelCase ( self : List[Any] ) -> Any:
"""simple docstring"""
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
_lowerCamelCase : List[str] =(self.rust_tokenizer_class, 'hf-internal-testing/tiny-xlm-roberta', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
_lowerCamelCase : int =self.rust_tokenizer_class.from_pretrained(lowercase_ , **lowercase_ )
_lowerCamelCase : List[Any] =self.tokenizer_class.from_pretrained(lowercase_ , **lowercase_ )
_lowerCamelCase : int =tempfile.mkdtemp()
_lowerCamelCase : List[str] =tokenizer_r.save_pretrained(lowercase_ )
_lowerCamelCase : int =tokenizer_p.save_pretrained(lowercase_ )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) )
_lowerCamelCase : Optional[Any] =tuple(f for f in tokenizer_r_files if 'tokenizer.json' not in f )
self.assertSequenceEqual(lowercase_ , lowercase_ )
# Checks everything loads correctly in the same way
_lowerCamelCase : int =tokenizer_r.from_pretrained(lowercase_ )
_lowerCamelCase : Any =tokenizer_p.from_pretrained(lowercase_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowercase_ , lowercase_ ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(lowercase_ )
# Save tokenizer rust, legacy_format=True
_lowerCamelCase : Dict =tempfile.mkdtemp()
_lowerCamelCase : int =tokenizer_r.save_pretrained(lowercase_ , legacy_format=lowercase_ )
_lowerCamelCase : Optional[Any] =tokenizer_p.save_pretrained(lowercase_ )
# Checks it save with the same files
self.assertSequenceEqual(lowercase_ , lowercase_ )
# Checks everything loads correctly in the same way
_lowerCamelCase : int =tokenizer_r.from_pretrained(lowercase_ )
_lowerCamelCase : List[Any] =tokenizer_p.from_pretrained(lowercase_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowercase_ , lowercase_ ) )
shutil.rmtree(lowercase_ )
# Save tokenizer rust, legacy_format=False
_lowerCamelCase : str =tempfile.mkdtemp()
_lowerCamelCase : Optional[Any] =tokenizer_r.save_pretrained(lowercase_ , legacy_format=lowercase_ )
_lowerCamelCase : Any =tokenizer_p.save_pretrained(lowercase_ )
# Checks it saved the tokenizer.json file
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
_lowerCamelCase : str =tokenizer_r.from_pretrained(lowercase_ )
_lowerCamelCase : List[str] =tokenizer_p.from_pretrained(lowercase_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowercase_ , lowercase_ ) )
shutil.rmtree(lowercase_ )
@cached_property
def lowerCamelCase ( self : List[str] ) -> List[Any]:
"""simple docstring"""
return XLMRobertaTokenizer.from_pretrained('xlm-roberta-base' )
def lowerCamelCase ( self : Tuple ) -> Dict:
"""simple docstring"""
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(lowercase_ , f.name )
_lowerCamelCase : Union[str, Any] =XLMRobertaTokenizer(f.name , keep_accents=lowercase_ )
_lowerCamelCase : Dict =pickle.dumps(lowercase_ )
pickle.loads(lowercase_ )
def lowerCamelCase ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
if not self.test_rust_tokenizer:
return
_lowerCamelCase : Any =self.get_tokenizer()
_lowerCamelCase : Optional[int] =self.get_rust_tokenizer()
_lowerCamelCase : Tuple ='I was born in 92000, and this is falsé.'
_lowerCamelCase : Any =tokenizer.tokenize(lowercase_ )
_lowerCamelCase : List[str] =rust_tokenizer.tokenize(lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
_lowerCamelCase : int =tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ )
_lowerCamelCase : Union[str, Any] =rust_tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
_lowerCamelCase : Dict =self.get_rust_tokenizer()
_lowerCamelCase : Optional[int] =tokenizer.encode(lowercase_ )
_lowerCamelCase : Optional[Any] =rust_tokenizer.encode(lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
@slow
def lowerCamelCase ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
_lowerCamelCase : Optional[Any] ='Hello World!'
_lowerCamelCase : Union[str, Any] =[0, 3_5378, 6661, 38, 2]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(lowercase_ , self.big_tokenizer.encode(lowercase_ ) )
@slow
def lowerCamelCase ( self : Any ) -> int:
"""simple docstring"""
_lowerCamelCase : Union[str, Any] =(
'This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'
' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'
)
_lowerCamelCase : List[str] =[
0,
3293,
83,
10,
4552,
4989,
7986,
678,
10,
5915,
111,
17_9459,
12_4850,
4,
6044,
237,
12,
6,
5,
6,
4,
6780,
705,
15,
1388,
44,
378,
1_0114,
711,
152,
20,
6,
5,
2_2376,
642,
1221,
1_5190,
3_4153,
450,
5608,
959,
1119,
5_7702,
136,
186,
47,
1098,
2_9367,
47,
# 4426, # What fairseq tokenizes from "<unk>": "_<"
# 3678, # What fairseq tokenizes from "<unk>": "unk"
# 2740, # What fairseq tokenizes from "<unk>": ">"
3, # What we tokenize from "<unk>": "<unk>"
6, # Residue from the tokenization: an extra sentencepiece underline
4,
6044,
237,
6284,
5_0901,
528,
31,
90,
34,
927,
2,
]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(lowercase_ , self.big_tokenizer.encode(lowercase_ ) )
@slow
def lowerCamelCase ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
_lowerCamelCase : List[Any] ={'input_ids': [[0, 1_1062, 8_2772, 7, 15, 8_2772, 538, 5_1529, 237, 1_7198, 1290, 206, 9, 21_5175, 1314, 136, 1_7198, 1290, 206, 9, 5_6359, 42, 12_2009, 9, 1_6466, 16, 8_7344, 4537, 9, 4717, 7_8381, 6, 15_9958, 7, 15, 2_4480, 618, 4, 527, 2_2693, 5428, 4, 2777, 2_4480, 9874, 4, 4_3523, 594, 4, 803, 1_8392, 3_3189, 18, 4, 4_3523, 2_4447, 1_2399, 100, 2_4955, 8_3658, 9626, 14_4057, 15, 839, 2_2335, 16, 136, 2_4955, 8_3658, 8_3479, 15, 3_9102, 724, 16, 678, 645, 2789, 1328, 4589, 42, 12_2009, 11_5774, 23, 805, 1328, 4_6876, 7, 136, 5_3894, 1940, 4_2227, 4_1159, 1_7721, 823, 425, 4, 2_7512, 9_8722, 206, 136, 5531, 4970, 919, 1_7336, 5, 2], [0, 2_0080, 618, 83, 8_2775, 47, 479, 9, 1517, 73, 5_3894, 333, 8_0581, 11_0117, 1_8811, 5256, 1295, 51, 15_2526, 297, 7986, 390, 12_4416, 538, 3_5431, 214, 98, 1_5044, 2_5737, 136, 7108, 4_3701, 23, 756, 13_5355, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 581, 6_3773, 11_9455, 6, 14_7797, 8_8203, 7, 645, 70, 21, 3285, 1_0269, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowercase_ , model_name='xlm-roberta-base' , revision='d9d8a8ea5eb94b1c6654ae9249df7793cd2933d3' , )
| 464
| 1
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCamelCase :str = logging.get_logger(__name__)
lowerCamelCase :int = {'''vocab_file''': '''sentencepiece.model'''}
lowerCamelCase :Tuple = {
'''vocab_file''': {
'''google/rembert''': '''https://huggingface.co/google/rembert/resolve/main/sentencepiece.model''',
},
}
lowerCamelCase :Optional[int] = {
'''google/rembert''': 256,
}
class UpperCAmelCase ( UpperCamelCase__ ):
a: List[Any] = VOCAB_FILES_NAMES
a: Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
a: Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self: Tuple , __UpperCamelCase: List[Any] , __UpperCamelCase: Optional[Any]=False , __UpperCamelCase: Dict=True , __UpperCamelCase: List[Any]=True , __UpperCamelCase: Union[str, Any]="[CLS]" , __UpperCamelCase: List[str]="[SEP]" , __UpperCamelCase: Optional[Any]="[UNK]" , __UpperCamelCase: str="[SEP]" , __UpperCamelCase: Union[str, Any]="[PAD]" , __UpperCamelCase: List[Any]="[CLS]" , __UpperCamelCase: Optional[int]="[MASK]" , **__UpperCamelCase: Optional[Any] , ):
super().__init__(
do_lower_case=__UpperCamelCase , remove_space=__UpperCamelCase , keep_accents=__UpperCamelCase , bos_token=__UpperCamelCase , eos_token=__UpperCamelCase , unk_token=__UpperCamelCase , sep_token=__UpperCamelCase , pad_token=__UpperCamelCase , cls_token=__UpperCamelCase , mask_token=__UpperCamelCase , **__UpperCamelCase , )
_a = do_lower_case
_a = remove_space
_a = keep_accents
_a = vocab_file
_a = spm.SentencePieceProcessor()
self.sp_model.Load(__UpperCamelCase )
@property
def _A ( self: Optional[int] ):
return len(self.sp_model )
def _A ( self: Dict ):
_a = {self.convert_ids_to_tokens(__UpperCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self: Optional[Any] ):
_a = self.__dict__.copy()
_a = None
return state
def __setstate__( self: List[Any] , __UpperCamelCase: List[str] ):
_a = d
_a = spm.SentencePieceProcessor()
self.sp_model.Load(self.vocab_file )
def _A ( self: int , __UpperCamelCase: List[Any] , __UpperCamelCase: Tuple=False ):
_a = self.sp_model.EncodeAsPieces(__UpperCamelCase )
return pieces
def _A ( self: List[Any] , __UpperCamelCase: Tuple ):
return self.sp_model.PieceToId(__UpperCamelCase )
def _A ( self: Union[str, Any] , __UpperCamelCase: Optional[Any] ):
return self.sp_model.IdToPiece(__UpperCamelCase )
def _A ( self: str , __UpperCamelCase: Union[str, Any] ):
_a = self.sp_model.decode_pieces(__UpperCamelCase )
return out_string
def _A ( self: Union[str, Any] , __UpperCamelCase: List[Any] , __UpperCamelCase: str = None ):
_a = [self.sep_token_id]
_a = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def _A ( self: List[Any] , __UpperCamelCase: List[Any] , __UpperCamelCase: int = None , __UpperCamelCase: Optional[Any] = False ):
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'''You should not supply a second sequence if the provided sequence of '''
'''ids is already formatted with special tokens for the model.''' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(__UpperCamelCase )) + [1] + ([0] * len(__UpperCamelCase )) + [1]
return [1] + ([0] * len(__UpperCamelCase )) + [1]
def _A ( self: List[Any] , __UpperCamelCase: Union[str, Any] , __UpperCamelCase: Any = None ):
_a = [self.sep_token_id]
_a = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _A ( self: int , __UpperCamelCase: str , __UpperCamelCase: Optional[Any] = None ):
if not os.path.isdir(__UpperCamelCase ):
logger.error('''Vocabulary path ({}) should be a directory'''.format(__UpperCamelCase ) )
return
_a = os.path.join(
__UpperCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCamelCase ):
copyfile(self.vocab_file , __UpperCamelCase )
return (out_vocab_file,)
| 717
|
import argparse
import glob
import logging
import os
import time
from argparse import Namespace
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from torch.utils.data import DataLoader, TensorDataset
from transformers import glue_compute_metrics as compute_metrics
from transformers import glue_convert_examples_to_features as convert_examples_to_features
from transformers import glue_output_modes, glue_tasks_num_labels
from transformers import glue_processors as processors
lowerCamelCase :int = logging.getLogger(__name__)
class UpperCAmelCase ( __snake_case ):
a: int = "sequence-classification"
def __init__( self: int , __UpperCamelCase: str ):
if type(__UpperCamelCase ) == dict:
_a = Namespace(**__UpperCamelCase )
_a = glue_output_modes[hparams.task]
_a = glue_tasks_num_labels[hparams.task]
super().__init__(__UpperCamelCase , __UpperCamelCase , self.mode )
def _A ( self: Dict , **__UpperCamelCase: str ):
return self.model(**__UpperCamelCase )
def _A ( self: Optional[int] , __UpperCamelCase: Tuple , __UpperCamelCase: Union[str, Any] ):
_a = {'''input_ids''': batch[0], '''attention_mask''': batch[1], '''labels''': batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
_a = batch[2] if self.config.model_type in ['''bert''', '''xlnet''', '''albert'''] else None
_a = self(**__UpperCamelCase )
_a = outputs[0]
_a = self.trainer.lr_schedulers[0]['''scheduler''']
_a = {'''loss''': loss, '''rate''': lr_scheduler.get_last_lr()[-1]}
return {"loss": loss, "log": tensorboard_logs}
def _A ( self: Union[str, Any] ):
_a = self.hparams
_a = processors[args.task]()
_a = processor.get_labels()
for mode in ["train", "dev"]:
_a = self._feature_file(__UpperCamelCase )
if os.path.exists(__UpperCamelCase ) and not args.overwrite_cache:
logger.info('''Loading features from cached file %s''' , __UpperCamelCase )
else:
logger.info('''Creating features from dataset file at %s''' , args.data_dir )
_a = (
processor.get_dev_examples(args.data_dir )
if mode == '''dev'''
else processor.get_train_examples(args.data_dir )
)
_a = convert_examples_to_features(
__UpperCamelCase , self.tokenizer , max_length=args.max_seq_length , label_list=self.labels , output_mode=args.glue_output_mode , )
logger.info('''Saving features into cached file %s''' , __UpperCamelCase )
torch.save(__UpperCamelCase , __UpperCamelCase )
def _A ( self: str , __UpperCamelCase: str , __UpperCamelCase: int , __UpperCamelCase: bool = False ):
_a = '''dev''' if mode == '''test''' else mode
_a = self._feature_file(__UpperCamelCase )
logger.info('''Loading features from cached file %s''' , __UpperCamelCase )
_a = torch.load(__UpperCamelCase )
_a = torch.tensor([f.input_ids for f in features] , dtype=torch.long )
_a = torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
_a = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
if self.hparams.glue_output_mode == "classification":
_a = torch.tensor([f.label for f in features] , dtype=torch.long )
elif self.hparams.glue_output_mode == "regression":
_a = torch.tensor([f.label for f in features] , dtype=torch.float )
return DataLoader(
TensorDataset(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) , batch_size=__UpperCamelCase , shuffle=__UpperCamelCase , )
def _A ( self: int , __UpperCamelCase: Tuple , __UpperCamelCase: Tuple ):
_a = {'''input_ids''': batch[0], '''attention_mask''': batch[1], '''labels''': batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
_a = batch[2] if self.config.model_type in ['''bert''', '''xlnet''', '''albert'''] else None
_a = self(**__UpperCamelCase )
_a , _a = outputs[:2]
_a = logits.detach().cpu().numpy()
_a = inputs['''labels'''].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def _A ( self: List[str] , __UpperCamelCase: Any ):
_a = torch.stack([x['''val_loss'''] for x in outputs] ).mean().detach().cpu().item()
_a = np.concatenate([x['''pred'''] for x in outputs] , axis=0 )
if self.hparams.glue_output_mode == "classification":
_a = np.argmax(__UpperCamelCase , axis=1 )
elif self.hparams.glue_output_mode == "regression":
_a = np.squeeze(__UpperCamelCase )
_a = np.concatenate([x['''target'''] for x in outputs] , axis=0 )
_a = [[] for _ in range(out_label_ids.shape[0] )]
_a = [[] for _ in range(out_label_ids.shape[0] )]
_a = {**{'''val_loss''': val_loss_mean}, **compute_metrics(self.hparams.task , __UpperCamelCase , __UpperCamelCase )}
_a = dict(results.items() )
_a = results
return ret, preds_list, out_label_list
def _A ( self: Any , __UpperCamelCase: list ):
_a , _a , _a = self._eval_end(__UpperCamelCase )
_a = ret['''log''']
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def _A ( self: List[Any] , __UpperCamelCase: Any ):
_a , _a , _a = self._eval_end(__UpperCamelCase )
_a = ret['''log''']
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def _A ( __UpperCamelCase: Optional[Any] , __UpperCamelCase: int ):
BaseTransformer.add_model_specific_args(__UpperCamelCase , __UpperCamelCase )
parser.add_argument(
'''--max_seq_length''' , default=128 , type=__UpperCamelCase , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument(
'''--task''' , default='''''' , type=__UpperCamelCase , required=__UpperCamelCase , help='''The GLUE task to run''' , )
parser.add_argument(
'''--gpus''' , default=0 , type=__UpperCamelCase , help='''The number of GPUs allocated for this, it is by default 0 meaning none''' , )
parser.add_argument(
'''--overwrite_cache''' , action='''store_true''' , help='''Overwrite the cached training and evaluation sets''' )
return parser
def __snake_case ( ) -> List[Any]:
_a = argparse.ArgumentParser()
add_generic_args(_UpperCamelCase , os.getcwd() )
_a = GLUETransformer.add_model_specific_args(_UpperCamelCase , os.getcwd() )
_a = parser.parse_args()
# If output_dir not provided, a folder will be generated in pwd
if args.output_dir is None:
_a = os.path.join(
'''./results''' , f"{args.task}_{time.strftime('%Y%m%d_%H%M%S' )}" , )
os.makedirs(args.output_dir )
_a = GLUETransformer(_UpperCamelCase )
_a = generic_train(_UpperCamelCase , _UpperCamelCase )
# Optionally, predict on dev set and write to output_dir
if args.do_predict:
_a = sorted(glob.glob(os.path.join(args.output_dir , '''checkpoint-epoch=*.ckpt''' ) , recursive=_UpperCamelCase ) )
_a = model.load_from_checkpoint(checkpoints[-1] )
return trainer.test(_UpperCamelCase )
if __name__ == "__main__":
main()
| 346
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
UpperCAmelCase_ : Optional[Any] = {
"""configuration_layoutlmv2""": ["""LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LayoutLMv2Config"""],
"""processing_layoutlmv2""": ["""LayoutLMv2Processor"""],
"""tokenization_layoutlmv2""": ["""LayoutLMv2Tokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : int = ["""LayoutLMv2TokenizerFast"""]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Any = ["""LayoutLMv2FeatureExtractor"""]
UpperCAmelCase_ : List[Any] = ["""LayoutLMv2ImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Union[str, Any] = [
"""LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""LayoutLMv2ForQuestionAnswering""",
"""LayoutLMv2ForSequenceClassification""",
"""LayoutLMv2ForTokenClassification""",
"""LayoutLMv2Layer""",
"""LayoutLMv2Model""",
"""LayoutLMv2PreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_layoutlmva import LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMvaConfig
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor, LayoutLMvaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaLayer,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 512
|
"""simple docstring"""
# limitations under the License.
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401
from .utils import deprecate
deprecate(
"""pipelines_utils""",
"""0.22.0""",
"""Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.""",
standard_warn=False,
stacklevel=3,
)
| 512
| 1
|
UpperCAmelCase__ : str = [sum(int(c, 10) ** 2 for c in i.__str__()) for i in range(10_00_00)]
def A ( snake_case__ : int ) -> int:
'''simple docstring'''
__snake_case = 0
while number:
# Increased Speed Slightly by checking every 5 digits together.
sum_of_digits_squared += DIGITS_SQUARED[number % 10_0000]
number //= 10_0000
return sum_of_digits_squared
# There are 2 Chains made,
# One ends with 89 with the chain member 58 being the one which when declared first,
# there will be the least number of iterations for all the members to be checked.
# The other one ends with 1 and has only one element 1.
# So 58 and 1 are chosen to be declared at the starting.
# Changed dictionary to an array to quicken the solution
UpperCAmelCase__ : str = [None] * 10_00_00_00
UpperCAmelCase__ : Dict = True
UpperCAmelCase__ : Union[str, Any] = False
def A ( snake_case__ : int ) -> Optional[Any]:
'''simple docstring'''
if CHAINS[number - 1] is not None:
return CHAINS[number - 1] # type: ignore
__snake_case = chain(next_number(__a ) )
__snake_case = number_chain
while number < 1000_0000:
__snake_case = number_chain
number *= 10
return number_chain
def A ( snake_case__ : int = 1000_0000 ) -> Optional[int]:
'''simple docstring'''
for i in range(1 , __a ):
if CHAINS[i] is None:
chain(i + 1 )
return CHAINS[:number].count(__a )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F"""{solution() = }""")
| 720
|
from urllib.parse import quote
import pytest
from datasets.utils.hub import hf_hub_url
@pytest.mark.parametrize('repo_id' , ['canonical_dataset_name', 'org-name/dataset-name'] )
@pytest.mark.parametrize('path' , ['filename.csv', 'filename with blanks.csv'] )
@pytest.mark.parametrize('revision' , [None, 'v2'] )
def A ( snake_case__ : Optional[int] , snake_case__ : List[str] , snake_case__ : Any ) -> Optional[int]:
'''simple docstring'''
__snake_case = hf_hub_url(repo_id=snake_case__ , path=snake_case__ , revision=snake_case__ )
assert url == f"https://huggingface.co/datasets/{repo_id}/resolve/{revision or 'main'}/{quote(snake_case__ )}"
| 676
| 0
|
'''simple docstring'''
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
UNetaDConditionModel,
VideoToVideoSDPipeline,
)
from diffusers.utils import floats_tensor, is_xformers_available, skip_mps
from diffusers.utils.testing_utils import enable_full_determinism, slow, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class _snake_case (__SCREAMING_SNAKE_CASE , unittest.TestCase):
__A : Union[str, Any] =VideoToVideoSDPipeline
__A : Tuple =TEXT_GUIDED_IMAGE_VARIATION_PARAMS.union({"video"}) - {"image", "width", "height"}
__A : Union[str, Any] =TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"video"}) - {"image"}
__A : str =PipelineTesterMixin.required_optional_params - {"latents"}
__A : Dict =False
# No `output_type`.
__A : Optional[int] =frozenset(
[
"num_inference_steps",
"generator",
"latents",
"return_dict",
"callback",
"callback_steps",
])
def UpperCamelCase__ ( self ):
torch.manual_seed(0 )
UpperCAmelCase_ : Optional[int] = UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=("CrossAttnDownBlock3D", "CrossAttnDownBlock3D", "CrossAttnDownBlock3D", "DownBlock3D") ,up_block_types=("UpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D") ,cross_attention_dim=32 ,attention_head_dim=4 ,)
UpperCAmelCase_ : int = DDIMScheduler(
beta_start=0.00085 ,beta_end=0.012 ,beta_schedule="scaled_linear" ,clip_sample=_snake_case ,set_alpha_to_one=_snake_case ,)
torch.manual_seed(0 )
UpperCAmelCase_ : Dict = AutoencoderKL(
block_out_channels=[32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] ,up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] ,latent_channels=4 ,sample_size=1_28 ,)
torch.manual_seed(0 )
UpperCAmelCase_ : Dict = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1E-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=10_00 ,hidden_act="gelu" ,projection_dim=5_12 ,)
UpperCAmelCase_ : Union[str, Any] = CLIPTextModel(_snake_case )
UpperCAmelCase_ : List[Any] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
UpperCAmelCase_ : Optional[int] = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
}
return components
def UpperCamelCase__ ( self ,_snake_case ,_snake_case=0 ):
# 3 frames
UpperCAmelCase_ : Dict = floats_tensor((1, 3, 3, 32, 32) ,rng=random.Random(_snake_case ) ).to(_snake_case )
if str(_snake_case ).startswith("mps" ):
UpperCAmelCase_ : Tuple = torch.manual_seed(_snake_case )
else:
UpperCAmelCase_ : Tuple = torch.Generator(device=_snake_case ).manual_seed(_snake_case )
UpperCAmelCase_ : Union[str, Any] = {
"prompt": "A painting of a squirrel eating a burger",
"video": video,
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "pt",
}
return inputs
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : str = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ : Dict = self.get_dummy_components()
UpperCAmelCase_ : str = VideoToVideoSDPipeline(**_snake_case )
UpperCAmelCase_ : int = sd_pipe.to(_snake_case )
sd_pipe.set_progress_bar_config(disable=_snake_case )
UpperCAmelCase_ : Tuple = self.get_dummy_inputs(_snake_case )
UpperCAmelCase_ : str = "np"
UpperCAmelCase_ : Dict = sd_pipe(**_snake_case ).frames
UpperCAmelCase_ : Tuple = frames[0][-3:, -3:, -1]
assert frames[0].shape == (32, 32, 3)
UpperCAmelCase_ : Dict = np.array([1_06, 1_17, 1_13, 1_74, 1_37, 1_12, 1_48, 1_51, 1_31] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() ,reason="XFormers attention is only available with CUDA and `xformers` installed" ,)
def UpperCamelCase__ ( self ):
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=_snake_case ,expected_max_diff=5E-3 )
@unittest.skip(reason="Batching needs to be properly figured out first for this pipeline." )
def UpperCamelCase__ ( self ):
pass
@unittest.skip(reason="Batching needs to be properly figured out first for this pipeline." )
def UpperCamelCase__ ( self ):
pass
@unittest.skip(reason="`num_images_per_prompt` argument is not supported for this pipeline." )
def UpperCamelCase__ ( self ):
pass
def UpperCamelCase__ ( self ):
return super().test_progress_bar()
@slow
@skip_mps
class _snake_case (unittest.TestCase):
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Dict = VideoToVideoSDPipeline.from_pretrained("cerspense/zeroscope_v2_XL" ,torch_dtype=torch.floataa )
pipe.enable_model_cpu_offload()
# 10 frames
UpperCAmelCase_ : str = torch.Generator(device="cpu" ).manual_seed(0 )
UpperCAmelCase_ : int = torch.randn((1, 10, 3, 10_24, 5_76) ,generator=_snake_case )
UpperCAmelCase_ : List[Any] = video.to("cuda" )
UpperCAmelCase_ : List[Any] = "Spiderman is surfing"
UpperCAmelCase_ : Optional[Any] = pipe(_snake_case ,video=_snake_case ,generator=_snake_case ,num_inference_steps=3 ,output_type="pt" ).frames
UpperCAmelCase_ : Any = np.array([-1.0458984, -1.1279297, -0.9663086, -0.91503906, -0.75097656] )
assert np.abs(video_frames.cpu().numpy()[0, 0, 0, 0, -5:] - expected_array ).sum() < 1E-2
| 71
|
"""simple docstring"""
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XLMRobertaTokenizer, XLMRobertaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
lowerCAmelCase_ : Any = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
@require_tokenizers
class UpperCamelCase_ ( a_ , unittest.TestCase ):
_A : List[str] = XLMRobertaTokenizer
_A : List[str] = XLMRobertaTokenizerFast
_A : Optional[Any] = True
_A : List[str] = True
def UpperCamelCase_ ( self ) -> Optional[int]:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
UpperCAmelCase = XLMRobertaTokenizer(snake_case__ , keep_accents=snake_case__ )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase_ ( self ) -> int:
"""simple docstring"""
UpperCAmelCase = """<pad>"""
UpperCAmelCase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case__ ) , snake_case__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case__ ) , snake_case__ )
def UpperCamelCase_ ( self ) -> Any:
"""simple docstring"""
UpperCAmelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(vocab_keys[-1] , """<mask>""" )
self.assertEqual(len(snake_case__ ) , 10_02 )
def UpperCamelCase_ ( self ) -> Dict:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 10_02 )
def UpperCamelCase_ ( self ) -> str:
"""simple docstring"""
UpperCAmelCase = XLMRobertaTokenizer(snake_case__ , keep_accents=snake_case__ )
UpperCAmelCase = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(snake_case__ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(snake_case__ ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , )
UpperCAmelCase = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
snake_case__ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
UpperCAmelCase = tokenizer.convert_tokens_to_ids(snake_case__ )
self.assertListEqual(
snake_case__ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
UpperCAmelCase = tokenizer.convert_ids_to_tokens(snake_case__ )
self.assertListEqual(
snake_case__ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
def UpperCamelCase_ ( self ) -> int:
"""simple docstring"""
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
UpperCAmelCase = (self.rust_tokenizer_class, """hf-internal-testing/tiny-xlm-roberta""", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(snake_case__ , **snake_case__ )
UpperCAmelCase = self.tokenizer_class.from_pretrained(snake_case__ , **snake_case__ )
UpperCAmelCase = tempfile.mkdtemp()
UpperCAmelCase = tokenizer_r.save_pretrained(snake_case__ )
UpperCAmelCase = tokenizer_p.save_pretrained(snake_case__ )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
UpperCAmelCase = tuple(f for f in tokenizer_r_files if """tokenizer.json""" not in f )
self.assertSequenceEqual(snake_case__ , snake_case__ )
# Checks everything loads correctly in the same way
UpperCAmelCase = tokenizer_r.from_pretrained(snake_case__ )
UpperCAmelCase = tokenizer_p.from_pretrained(snake_case__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(snake_case__ , snake_case__ ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(snake_case__ )
# Save tokenizer rust, legacy_format=True
UpperCAmelCase = tempfile.mkdtemp()
UpperCAmelCase = tokenizer_r.save_pretrained(snake_case__ , legacy_format=snake_case__ )
UpperCAmelCase = tokenizer_p.save_pretrained(snake_case__ )
# Checks it save with the same files
self.assertSequenceEqual(snake_case__ , snake_case__ )
# Checks everything loads correctly in the same way
UpperCAmelCase = tokenizer_r.from_pretrained(snake_case__ )
UpperCAmelCase = tokenizer_p.from_pretrained(snake_case__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(snake_case__ , snake_case__ ) )
shutil.rmtree(snake_case__ )
# Save tokenizer rust, legacy_format=False
UpperCAmelCase = tempfile.mkdtemp()
UpperCAmelCase = tokenizer_r.save_pretrained(snake_case__ , legacy_format=snake_case__ )
UpperCAmelCase = tokenizer_p.save_pretrained(snake_case__ )
# Checks it saved the tokenizer.json file
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
UpperCAmelCase = tokenizer_r.from_pretrained(snake_case__ )
UpperCAmelCase = tokenizer_p.from_pretrained(snake_case__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(snake_case__ , snake_case__ ) )
shutil.rmtree(snake_case__ )
@cached_property
def UpperCamelCase_ ( self ) -> int:
"""simple docstring"""
return XLMRobertaTokenizer.from_pretrained("""xlm-roberta-base""" )
def UpperCamelCase_ ( self ) -> List[Any]:
"""simple docstring"""
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(snake_case__ , f.name )
UpperCAmelCase = XLMRobertaTokenizer(f.name , keep_accents=snake_case__ )
UpperCAmelCase = pickle.dumps(snake_case__ )
pickle.loads(snake_case__ )
def UpperCamelCase_ ( self ) -> List[str]:
"""simple docstring"""
if not self.test_rust_tokenizer:
return
UpperCAmelCase = self.get_tokenizer()
UpperCAmelCase = self.get_rust_tokenizer()
UpperCAmelCase = """I was born in 92000, and this is falsé."""
UpperCAmelCase = tokenizer.tokenize(snake_case__ )
UpperCAmelCase = rust_tokenizer.tokenize(snake_case__ )
self.assertListEqual(snake_case__ , snake_case__ )
UpperCAmelCase = tokenizer.encode(snake_case__ , add_special_tokens=snake_case__ )
UpperCAmelCase = rust_tokenizer.encode(snake_case__ , add_special_tokens=snake_case__ )
self.assertListEqual(snake_case__ , snake_case__ )
UpperCAmelCase = self.get_rust_tokenizer()
UpperCAmelCase = tokenizer.encode(snake_case__ )
UpperCAmelCase = rust_tokenizer.encode(snake_case__ )
self.assertListEqual(snake_case__ , snake_case__ )
@slow
def UpperCamelCase_ ( self ) -> Dict:
"""simple docstring"""
UpperCAmelCase = """Hello World!"""
UpperCAmelCase = [0, 3_53_78, 66_61, 38, 2]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(snake_case__ , self.big_tokenizer.encode(snake_case__ ) )
@slow
def UpperCamelCase_ ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase = (
"""This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"""
""" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"""
)
UpperCAmelCase = [
0,
32_93,
83,
10,
45_52,
49_89,
79_86,
6_78,
10,
59_15,
1_11,
17_94_59,
12_48_50,
4,
60_44,
2_37,
12,
6,
5,
6,
4,
67_80,
7_05,
15,
13_88,
44,
3_78,
1_01_14,
7_11,
1_52,
20,
6,
5,
2_23_76,
6_42,
12_21,
1_51_90,
3_41_53,
4_50,
56_08,
9_59,
11_19,
5_77_02,
1_36,
1_86,
47,
10_98,
2_93_67,
47,
# 4426, # What fairseq tokenizes from "<unk>": "_<"
# 3678, # What fairseq tokenizes from "<unk>": "unk"
# 2740, # What fairseq tokenizes from "<unk>": ">"
3, # What we tokenize from "<unk>": "<unk>"
6, # Residue from the tokenization: an extra sentencepiece underline
4,
60_44,
2_37,
62_84,
5_09_01,
5_28,
31,
90,
34,
9_27,
2,
]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(snake_case__ , self.big_tokenizer.encode(snake_case__ ) )
@slow
def UpperCamelCase_ ( self ) -> Tuple:
"""simple docstring"""
UpperCAmelCase = {"""input_ids""": [[0, 1_10_62, 8_27_72, 7, 15, 8_27_72, 5_38, 5_15_29, 2_37, 1_71_98, 12_90, 2_06, 9, 21_51_75, 13_14, 1_36, 1_71_98, 12_90, 2_06, 9, 5_63_59, 42, 12_20_09, 9, 1_64_66, 16, 8_73_44, 45_37, 9, 47_17, 7_83_81, 6, 15_99_58, 7, 15, 2_44_80, 6_18, 4, 5_27, 2_26_93, 54_28, 4, 27_77, 2_44_80, 98_74, 4, 4_35_23, 5_94, 4, 8_03, 1_83_92, 3_31_89, 18, 4, 4_35_23, 2_44_47, 1_23_99, 1_00, 2_49_55, 8_36_58, 96_26, 14_40_57, 15, 8_39, 2_23_35, 16, 1_36, 2_49_55, 8_36_58, 8_34_79, 15, 3_91_02, 7_24, 16, 6_78, 6_45, 27_89, 13_28, 45_89, 42, 12_20_09, 11_57_74, 23, 8_05, 13_28, 4_68_76, 7, 1_36, 5_38_94, 19_40, 4_22_27, 4_11_59, 1_77_21, 8_23, 4_25, 4, 2_75_12, 9_87_22, 2_06, 1_36, 55_31, 49_70, 9_19, 1_73_36, 5, 2], [0, 2_00_80, 6_18, 83, 8_27_75, 47, 4_79, 9, 15_17, 73, 5_38_94, 3_33, 8_05_81, 11_01_17, 1_88_11, 52_56, 12_95, 51, 15_25_26, 2_97, 79_86, 3_90, 12_44_16, 5_38, 3_54_31, 2_14, 98, 1_50_44, 2_57_37, 1_36, 71_08, 4_37_01, 23, 7_56, 13_53_55, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 5_81, 6_37_73, 11_94_55, 6, 14_77_97, 8_82_03, 7, 6_45, 70, 21, 32_85, 1_02_69, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=snake_case__ , model_name="""xlm-roberta-base""" , revision="""d9d8a8ea5eb94b1c6654ae9249df7793cd2933d3""" , )
| 673
| 0
|
"""simple docstring"""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
if is_torch_available():
import torch
from transformers import XLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_torch
class snake_case__ ( unittest.TestCase ):
@slow
def __lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase : Tuple = XLMRobertaModel.from_pretrained("xlm-roberta-base" )
UpperCAmelCase : int = torch.tensor([[0, 5_81, 1_02_69, 83, 9_99_42, 1_36, 6_07_42, 23, 70, 8_05_83, 1_82_76, 2]] )
# The dog is cute and lives in the garden house
UpperCAmelCase : Any = torch.Size((1, 12, 7_68) ) # batch_size, sequence_length, embedding_vector_dim
UpperCAmelCase : int = torch.tensor(
[[-0.0_1_0_1, 0.1_2_1_8, -0.0_8_0_3, 0.0_8_0_1, 0.1_3_2_7, 0.0_7_7_6, -0.1_2_1_5, 0.2_3_8_3, 0.3_3_3_8, 0.3_1_0_6, 0.0_3_0_0, 0.0_2_5_2]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
UpperCAmelCase : Tuple = model(lowercase )["last_hidden_state"].detach()
self.assertEqual(output.shape , lowercase )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , lowercase , atol=1E-3 ) )
@slow
def __lowerCAmelCase ( self : int ):
'''simple docstring'''
UpperCAmelCase : Tuple = XLMRobertaModel.from_pretrained("xlm-roberta-large" )
UpperCAmelCase : List[Any] = torch.tensor([[0, 5_81, 1_02_69, 83, 9_99_42, 1_36, 6_07_42, 23, 70, 8_05_83, 1_82_76, 2]] )
# The dog is cute and lives in the garden house
UpperCAmelCase : int = torch.Size((1, 12, 10_24) ) # batch_size, sequence_length, embedding_vector_dim
UpperCAmelCase : Optional[int] = torch.tensor(
[[-0.0_6_9_9, -0.0_3_1_8, 0.0_7_0_5, -0.1_2_4_1, 0.0_9_9_9, -0.0_5_2_0, 0.1_0_0_4, -0.1_8_3_8, -0.4_7_0_4, 0.1_4_3_7, 0.0_8_2_1, 0.0_1_2_6]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
UpperCAmelCase : List[Any] = model(lowercase )["last_hidden_state"].detach()
self.assertEqual(output.shape , lowercase )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , lowercase , atol=1E-3 ) )
| 292
|
"""simple docstring"""
from __future__ import annotations
snake_case_ : str = list[list[int]]
# assigning initial values to the grid
snake_case_ : Matrix = [
[3, 0, 6, 5, 0, 8, 4, 0, 0],
[5, 2, 0, 0, 0, 0, 0, 0, 0],
[0, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
# a grid with no solution
snake_case_ : Matrix = [
[5, 0, 6, 5, 0, 8, 4, 0, 3],
[5, 2, 0, 0, 0, 0, 0, 0, 2],
[1, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
def lowercase_ ( _lowercase : Matrix , _lowercase : int , _lowercase : int , _lowercase : int ):
'''simple docstring'''
for i in range(9 ):
if grid[row][i] == n or grid[i][column] == n:
return False
for i in range(3 ):
for j in range(3 ):
if grid[(row - row % 3) + i][(column - column % 3) + j] == n:
return False
return True
def lowercase_ ( _lowercase : Matrix ):
'''simple docstring'''
for i in range(9 ):
for j in range(9 ):
if grid[i][j] == 0:
return i, j
return None
def lowercase_ ( _lowercase : Matrix ):
'''simple docstring'''
if location := find_empty_location(_lowercase ):
UpperCAmelCase , UpperCAmelCase : List[str] = location
else:
# If the location is ``None``, then the grid is solved.
return grid
for digit in range(1 , 10 ):
if is_safe(_lowercase , _lowercase , _lowercase , _lowercase ):
UpperCAmelCase : int = digit
if sudoku(_lowercase ) is not None:
return grid
UpperCAmelCase : Optional[Any] = 0
return None
def lowercase_ ( _lowercase : Matrix ):
'''simple docstring'''
for row in grid:
for cell in row:
print(_lowercase , end=" " )
print()
if __name__ == "__main__":
# make a copy of grid so that you can compare with the unmodified grid
for example_grid in (initial_grid, no_solution):
print("""\nExample grid:\n""" + """=""" * 2_0)
print_solution(example_grid)
print("""\nExample grid solution:""")
snake_case_ : Dict = sudoku(example_grid)
if solution is not None:
print_solution(solution)
else:
print("""Cannot find a solution.""")
| 292
| 1
|
"""simple docstring"""
import argparse
import os
import torch
from transformers import (
XLNetConfig,
XLNetForQuestionAnswering,
XLNetForSequenceClassification,
XLNetLMHeadModel,
load_tf_weights_in_xlnet,
)
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
_lowerCAmelCase : List[Any] = {
"cola": 2,
"mnli": 3,
"mrpc": 2,
"sst-2": 2,
"sts-b": 1,
"qqp": 2,
"qnli": 2,
"rte": 2,
"wnli": 2,
}
logging.set_verbosity_info()
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None ) -> Optional[Any]:
'''simple docstring'''
_lowerCamelCase : Dict = XLNetConfig.from_json_file(A_ )
_lowerCamelCase : Any = finetuning_task.lower() if finetuning_task is not None else """"""
if finetuning_task in GLUE_TASKS_NUM_LABELS:
print(F"""Building PyTorch XLNetForSequenceClassification model from configuration: {config}""" )
_lowerCamelCase : Optional[Any] = finetuning_task
_lowerCamelCase : Any = GLUE_TASKS_NUM_LABELS[finetuning_task]
_lowerCamelCase : List[str] = XLNetForSequenceClassification(A_ )
elif "squad" in finetuning_task:
_lowerCamelCase : Tuple = finetuning_task
_lowerCamelCase : Optional[Any] = XLNetForQuestionAnswering(A_ )
else:
_lowerCamelCase : List[str] = XLNetLMHeadModel(A_ )
# Load weights from tf checkpoint
load_tf_weights_in_xlnet(A_ , A_ , A_ )
# Save pytorch-model
_lowerCamelCase : List[str] = os.path.join(A_ , A_ )
_lowerCamelCase : Optional[int] = os.path.join(A_ , A_ )
print(F"""Save PyTorch model to {os.path.abspath(A_ )}""" )
torch.save(model.state_dict() , A_ )
print(F"""Save configuration file to {os.path.abspath(A_ )}""" )
with open(A_ , "w" , encoding="utf-8" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
_lowerCAmelCase : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--xlnet_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained XLNet model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=str,
required=True,
help='''Path to the folder to store the PyTorch model or dataset/vocab.''',
)
parser.add_argument(
'''--finetuning_task''',
default=None,
type=str,
help='''Name of a task on which the XLNet TensorFlow model was fine-tuned''',
)
_lowerCAmelCase : Optional[Any] = parser.parse_args()
print(args)
convert_xlnet_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.xlnet_config_file, args.pytorch_dump_folder_path, args.finetuning_task
)
| 46
|
'''simple docstring'''
import gc
import importlib.metadata
import tempfile
import unittest
from packaging import version
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoTokenizer,
BitsAndBytesConfig,
pipeline,
)
from transformers.testing_utils import (
is_torch_available,
require_accelerate,
require_bitsandbytes,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
def _snake_case ( A_ : Optional[Any] ):
"""simple docstring"""
if model.config.model_type == "gpt2":
return model.transformer.h[0].mlp.c_fc
return model.transformer.h[0].mlp.dense_ah_to_h
if is_torch_available():
import torch
import torch.nn as nn
class _UpperCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
super().__init__()
a_ : Optional[Any] = module
a_ : Union[str, Any] = nn.Sequential(
nn.Linear(module.in_features , lowerCAmelCase_ , bias=lowerCAmelCase_ ) , nn.Linear(lowerCAmelCase_ , module.out_features , bias=lowerCAmelCase_ ) , )
a_ : int = (2.0 / (5 * min(module.in_features , module.out_features ))) ** 0.5
nn.init.normal_(self.adapter[0].weight , std=lowerCAmelCase_ )
nn.init.zeros_(self.adapter[1].weight )
self.adapter.to(module.weight.device )
def _lowerCAmelCase ( self , lowerCAmelCase_ , *lowerCAmelCase_ , **lowerCAmelCase_ ):
'''simple docstring'''
return self.module(lowerCAmelCase_ , *lowerCAmelCase_ , **lowerCAmelCase_ ) + self.adapter(lowerCAmelCase_ )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
a_ = "bigscience/bloom-1b7"
# Constant values
a_ = 2.109659552692574
a_ = "Hello my name is"
a_ = set()
EXPECTED_OUTPUTS.add("Hello my name is John and I am a professional photographer. I" )
EXPECTED_OUTPUTS.add("Hello my name is John.\nI am a friend of your father.\n" )
EXPECTED_OUTPUTS.add("Hello my name is John Doe, I am a student at the University" )
a_ = 10
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ : Any = AutoTokenizer.from_pretrained(self.model_name )
class _UpperCAmelCase ( lowerCAmelCase__ ):
"""simple docstring"""
def _lowerCAmelCase ( self ):
'''simple docstring'''
super().setUp()
# Models and tokenizer
a_ : Optional[Any] = AutoModelForCausalLM.from_pretrained(
self.model_name , torch_dtype=torch.floataa , device_map="""auto""" )
a_ : Optional[int] = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=lowerCAmelCase_ , device_map="""auto""" )
def _lowerCAmelCase ( self ):
'''simple docstring'''
del self.model_fpaa
del self.model_abit
gc.collect()
torch.cuda.empty_cache()
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ : Any = self.model_abit.config
self.assertTrue(hasattr(lowerCAmelCase_ , """quantization_config""" ) )
a_ : Any = config.to_dict()
a_ : Dict = config.to_diff_dict()
a_ : Dict = config.to_json_string()
def _lowerCAmelCase ( self ):
'''simple docstring'''
from bitsandbytes.nn import Paramsabit
a_ : List[str] = self.model_fpaa.get_memory_footprint()
a_ : Optional[int] = self.model_abit.get_memory_footprint()
self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE )
a_ : Union[str, Any] = get_some_linear_layer(self.model_abit )
self.assertTrue(linear.weight.__class__ == Paramsabit )
def _lowerCAmelCase ( self ):
'''simple docstring'''
from transformers import TaPreTrainedModel
self.model_fpaa.get_memory_footprint()
self.model_abit.get_memory_footprint()
for name, module in self.model_abit.named_modules():
if isinstance(lowerCAmelCase_ , torch.nn.Linear ):
if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules:
# 4-bit parameters are packed in uint8 variables
self.assertTrue(module.weight.dtype == torch.uinta )
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ : Any = self.tokenizer(self.input_text , return_tensors="""pt""" )
a_ : str = self.model_abit.generate(input_ids=encoded_input["""input_ids"""].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=lowerCAmelCase_ ) , self.EXPECTED_OUTPUTS )
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ : Optional[Any] = BitsAndBytesConfig()
a_ : Union[str, Any] = True
a_ : Dict = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=lowerCAmelCase_ , device_map="""auto""" )
a_ : str = self.tokenizer(self.input_text , return_tensors="""pt""" )
a_ : Tuple = model_abit_from_config.generate(
input_ids=encoded_input["""input_ids"""].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=lowerCAmelCase_ ) , self.EXPECTED_OUTPUTS )
def _lowerCAmelCase ( self ):
'''simple docstring'''
with self.assertRaises(lowerCAmelCase_ ), tempfile.TemporaryDirectory() as tmpdirname:
self.model_abit.save_pretrained(lowerCAmelCase_ )
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ : Any = BitsAndBytesConfig()
with self.assertRaises(lowerCAmelCase_ ):
a_ : List[str] = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=lowerCAmelCase_ , load_in_abit=lowerCAmelCase_ , device_map="""auto""" , bnb_abit_quant_type="""nf4""" , )
def _lowerCAmelCase ( self ):
'''simple docstring'''
with self.assertRaises(lowerCAmelCase_ ):
# Tries with `str`
self.model_abit.to("""cpu""" )
with self.assertRaises(lowerCAmelCase_ ):
# Tries with a `dtype``
self.model_abit.to(torch.floataa )
with self.assertRaises(lowerCAmelCase_ ):
# Tries with a `device`
self.model_abit.to(torch.device("""cuda:0""" ) )
with self.assertRaises(lowerCAmelCase_ ):
# Tries with a `device`
self.model_abit.float()
with self.assertRaises(lowerCAmelCase_ ):
# Tries with a `device`
self.model_abit.half()
# Test if we did not break anything
a_ : List[Any] = self.tokenizer(self.input_text , return_tensors="""pt""" )
a_ : Union[str, Any] = self.model_fpaa.to(torch.floataa )
a_ : Union[str, Any] = self.model_fpaa.generate(input_ids=encoded_input["""input_ids"""].to(0 ) , max_new_tokens=10 )
# Check this does not throw an error
a_ : int = self.model_fpaa.to("""cpu""" )
# Check this does not throw an error
a_ : Optional[Any] = self.model_fpaa.half()
# Check this does not throw an error
a_ : Dict = self.model_fpaa.float()
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ : Optional[Any] = AutoModelForSeqaSeqLM.from_pretrained("""t5-small""" , load_in_abit=lowerCAmelCase_ , device_map="""auto""" )
self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@classmethod
def _lowerCAmelCase ( cls ):
'''simple docstring'''
a_ : List[str] = """t5-small"""
a_ : Any = """google/flan-t5-small""" # flan-t5 uses dense-act instead of dense-relu-dense
a_ : Optional[Any] = AutoTokenizer.from_pretrained(cls.model_name )
a_ : int = """Translate in German: Hello, my dog is cute"""
def _lowerCAmelCase ( self ):
'''simple docstring'''
gc.collect()
torch.cuda.empty_cache()
def _lowerCAmelCase ( self ):
'''simple docstring'''
from transformers import TaForConditionalGeneration
a_ : Dict = TaForConditionalGeneration._keep_in_fpaa_modules
a_ : str = None
# test with `t5-small`
a_ : Tuple = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=lowerCAmelCase_ , device_map="""auto""" )
a_ : Optional[int] = self.tokenizer(self.input_text , return_tensors="""pt""" ).to(0 )
a_ : str = model.generate(**lowerCAmelCase_ )
# test with `flan-t5-small`
a_ : Optional[Any] = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=lowerCAmelCase_ , device_map="""auto""" )
a_ : str = self.tokenizer(self.input_text , return_tensors="""pt""" ).to(0 )
a_ : Any = model.generate(**lowerCAmelCase_ )
a_ : List[str] = modules
def _lowerCAmelCase ( self ):
'''simple docstring'''
import bitsandbytes as bnb
from transformers import TaForConditionalGeneration
# test with `t5-small`
a_ : List[str] = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=lowerCAmelCase_ , device_map="""auto""" )
# there was a bug with decoders - this test checks that it is fixed
self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit ) )
a_ : Tuple = self.tokenizer(self.input_text , return_tensors="""pt""" ).to(0 )
a_ : Union[str, Any] = model.generate(**lowerCAmelCase_ )
# test with `flan-t5-small`
a_ : Dict = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=lowerCAmelCase_ , device_map="""auto""" )
a_ : str = self.tokenizer(self.input_text , return_tensors="""pt""" ).to(0 )
a_ : Tuple = model.generate(**lowerCAmelCase_ )
class _UpperCAmelCase ( lowerCAmelCase__ ):
"""simple docstring"""
def _lowerCAmelCase ( self ):
'''simple docstring'''
super().setUp()
# model_name
a_ : Dict = """bigscience/bloom-560m"""
a_ : Any = """t5-small"""
# Different types of model
a_ : Tuple = AutoModel.from_pretrained(self.model_name , load_in_abit=lowerCAmelCase_ , device_map="""auto""" )
# Sequence classification model
a_ : Any = AutoModelForSequenceClassification.from_pretrained(
self.model_name , load_in_abit=lowerCAmelCase_ , device_map="""auto""" )
# CausalLM model
a_ : str = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=lowerCAmelCase_ , device_map="""auto""" )
# Seq2seq model
a_ : str = AutoModelForSeqaSeqLM.from_pretrained(
self.seq_to_seq_name , load_in_abit=lowerCAmelCase_ , device_map="""auto""" )
def _lowerCAmelCase ( self ):
'''simple docstring'''
del self.base_model
del self.sequence_model
del self.model_abit
del self.seq_to_seq_model
gc.collect()
torch.cuda.empty_cache()
def _lowerCAmelCase ( self ):
'''simple docstring'''
from bitsandbytes.nn import Paramsabit
self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit )
# Other heads should be nn.Parameter
self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter )
class _UpperCAmelCase ( lowerCAmelCase__ ):
"""simple docstring"""
def _lowerCAmelCase ( self ):
'''simple docstring'''
super().setUp()
def _lowerCAmelCase ( self ):
'''simple docstring'''
del self.pipe
gc.collect()
torch.cuda.empty_cache()
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ : Optional[Any] = pipeline(
"""text-generation""" , model=self.model_name , model_kwargs={"""device_map""": """auto""", """load_in_4bit""": True, """torch_dtype""": torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , )
# Real second forward pass
a_ : List[Any] = self.pipe(self.input_text )
self.assertIn(pipeline_output[0]["""generated_text"""] , self.EXPECTED_OUTPUTS )
@require_torch_multi_gpu
class _UpperCAmelCase ( lowerCAmelCase__ ):
"""simple docstring"""
def _lowerCAmelCase ( self ):
'''simple docstring'''
super().setUp()
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ : int = AutoModelForCausalLM.from_pretrained(
self.model_name , load_in_abit=lowerCAmelCase_ , device_map="""balanced""" )
# Check correct device map
self.assertEqual(set(model_parallel.hf_device_map.values() ) , {0, 1} )
# Check that inference pass works on the model
a_ : Any = self.tokenizer(self.input_text , return_tensors="""pt""" )
# Second real batch
a_ : Dict = model_parallel.generate(input_ids=encoded_input["""input_ids"""].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=lowerCAmelCase_ ) , self.EXPECTED_OUTPUTS )
class _UpperCAmelCase ( lowerCAmelCase__ ):
"""simple docstring"""
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ : str = """facebook/opt-350m"""
super().setUp()
def _lowerCAmelCase ( self ):
'''simple docstring'''
if version.parse(importlib.metadata.version("""bitsandbytes""" ) ) < version.parse("""0.37.0""" ):
return
# Step 1: freeze all parameters
a_ : str = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=lowerCAmelCase_ )
self.assertEqual(set(model.hf_device_map.values() ) , {torch.cuda.current_device()} )
for param in model.parameters():
a_ : int = False # freeze the model - train adapters later
if param.ndim == 1:
# cast the small parameters (e.g. layernorm) to fp32 for stability
a_ : Any = param.data.to(torch.floataa )
# Step 2: add adapters
for _, module in model.named_modules():
if "OPTAttention" in repr(type(lowerCAmelCase_ ) ):
a_ : List[str] = LoRALayer(module.q_proj , rank=16 )
a_ : Union[str, Any] = LoRALayer(module.k_proj , rank=16 )
a_ : Optional[Any] = LoRALayer(module.v_proj , rank=16 )
# Step 3: dummy batch
a_ : Tuple = self.tokenizer("""Test batch """ , return_tensors="""pt""" ).to(0 )
# Step 4: Check if the gradient is not None
with torch.cuda.amp.autocast():
a_ : List[str] = model.forward(**lowerCAmelCase_ )
out.logits.norm().backward()
for module in model.modules():
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
self.assertTrue(module.adapter[1].weight.grad is not None )
self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0 )
elif isinstance(lowerCAmelCase_ , nn.Embedding ):
self.assertTrue(module.weight.grad is None )
class _UpperCAmelCase ( lowerCAmelCase__ ):
"""simple docstring"""
a_ = "gpt2-xl"
a_ = 3.3191854854152187
| 577
| 0
|
"""simple docstring"""
import argparse
import json
import os
from collections import OrderedDict
import torch
from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def __A ( a_ : List[str] , a_ : Union[str, Any] , a_ : Optional[int] , a_ : Union[str, Any] , a_ : List[str] )-> Any:
'''simple docstring'''
with open(a_ ) as metadata_file:
SCREAMING_SNAKE_CASE : List[str] = json.load(a_ )
SCREAMING_SNAKE_CASE : Dict = LukeConfig(use_entity_aware_attention=a_ , **metadata['''model_config'''] )
# Load in the weights from the checkpoint_path
SCREAMING_SNAKE_CASE : List[str] = torch.load(a_ , map_location='''cpu''' )['''module''']
# Load the entity vocab file
SCREAMING_SNAKE_CASE : int = load_original_entity_vocab(a_ )
# add an entry for [MASK2]
SCREAMING_SNAKE_CASE : Optional[Any] = max(entity_vocab.values() ) + 1
config.entity_vocab_size += 1
SCREAMING_SNAKE_CASE : Tuple = XLMRobertaTokenizer.from_pretrained(metadata['''model_config''']['''bert_model_name'''] )
# Add special tokens to the token vocabulary for downstream tasks
SCREAMING_SNAKE_CASE : Any = AddedToken('''<ent>''' , lstrip=a_ , rstrip=a_ )
SCREAMING_SNAKE_CASE : Any = AddedToken('''<ent2>''' , lstrip=a_ , rstrip=a_ )
tokenizer.add_special_tokens({'''additional_special_tokens''': [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(F"Saving tokenizer to {pytorch_dump_folder_path}" )
tokenizer.save_pretrained(a_ )
with open(os.path.join(a_ , '''tokenizer_config.json''' ) , '''r''' ) as f:
SCREAMING_SNAKE_CASE : Any = json.load(a_ )
SCREAMING_SNAKE_CASE : Dict = '''MLukeTokenizer'''
with open(os.path.join(a_ , '''tokenizer_config.json''' ) , '''w''' ) as f:
json.dump(a_ , a_ )
with open(os.path.join(a_ , MLukeTokenizer.vocab_files_names['''entity_vocab_file'''] ) , '''w''' ) as f:
json.dump(a_ , a_ )
SCREAMING_SNAKE_CASE : Optional[int] = MLukeTokenizer.from_pretrained(a_ )
# Initialize the embeddings of the special tokens
SCREAMING_SNAKE_CASE : List[Any] = tokenizer.convert_tokens_to_ids(['''@'''] )[0]
SCREAMING_SNAKE_CASE : Any = tokenizer.convert_tokens_to_ids(['''#'''] )[0]
SCREAMING_SNAKE_CASE : Optional[Any] = state_dict['''embeddings.word_embeddings.weight''']
SCREAMING_SNAKE_CASE : Optional[Any] = word_emb[ent_init_index].unsqueeze(0 )
SCREAMING_SNAKE_CASE : Union[str, Any] = word_emb[enta_init_index].unsqueeze(0 )
SCREAMING_SNAKE_CASE : Tuple = torch.cat([word_emb, ent_emb, enta_emb] )
# add special tokens for 'entity_predictions.bias'
for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]:
SCREAMING_SNAKE_CASE : Optional[int] = state_dict[bias_name]
SCREAMING_SNAKE_CASE : Dict = decoder_bias[ent_init_index].unsqueeze(0 )
SCREAMING_SNAKE_CASE : List[Any] = decoder_bias[enta_init_index].unsqueeze(0 )
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
SCREAMING_SNAKE_CASE : Union[str, Any] = F"encoder.layer.{layer_index}.attention.self."
SCREAMING_SNAKE_CASE : str = state_dict[prefix + matrix_name]
SCREAMING_SNAKE_CASE : List[Any] = state_dict[prefix + matrix_name]
SCREAMING_SNAKE_CASE : str = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
SCREAMING_SNAKE_CASE : int = state_dict['''entity_embeddings.entity_embeddings.weight''']
SCREAMING_SNAKE_CASE : Optional[Any] = entity_emb[entity_vocab['''[MASK]''']].unsqueeze(0 )
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.cat([entity_emb, entity_mask_emb] )
# add [MASK2] for 'entity_predictions.bias'
SCREAMING_SNAKE_CASE : str = state_dict['''entity_predictions.bias''']
SCREAMING_SNAKE_CASE : int = entity_prediction_bias[entity_vocab['''[MASK]''']].unsqueeze(0 )
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.cat([entity_prediction_bias, entity_mask_bias] )
SCREAMING_SNAKE_CASE : str = LukeForMaskedLM(config=a_ ).eval()
state_dict.pop('''entity_predictions.decoder.weight''' )
state_dict.pop('''lm_head.decoder.weight''' )
state_dict.pop('''lm_head.decoder.bias''' )
SCREAMING_SNAKE_CASE : List[str] = OrderedDict()
for key, value in state_dict.items():
if not (key.startswith('''lm_head''' ) or key.startswith('''entity_predictions''' )):
SCREAMING_SNAKE_CASE : Optional[int] = state_dict[key]
else:
SCREAMING_SNAKE_CASE : str = state_dict[key]
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Optional[int] = model.load_state_dict(a_ , strict=a_ )
if set(a_ ) != {"luke.embeddings.position_ids"}:
raise ValueError(F"Unexpected unexpected_keys: {unexpected_keys}" )
if set(a_ ) != {
"lm_head.decoder.weight",
"lm_head.decoder.bias",
"entity_predictions.decoder.weight",
}:
raise ValueError(F"Unexpected missing_keys: {missing_keys}" )
model.tie_weights()
assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all()
assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all()
# Check outputs
SCREAMING_SNAKE_CASE : Any = MLukeTokenizer.from_pretrained(a_ , task='''entity_classification''' )
SCREAMING_SNAKE_CASE : List[Any] = '''ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan).'''
SCREAMING_SNAKE_CASE : int = (0, 9)
SCREAMING_SNAKE_CASE : int = tokenizer(a_ , entity_spans=[span] , return_tensors='''pt''' )
SCREAMING_SNAKE_CASE : Any = model(**a_ )
# Verify word hidden states
if model_size == "large":
raise NotImplementedError
else: # base
SCREAMING_SNAKE_CASE : Dict = torch.Size((1, 33, 7_68) )
SCREAMING_SNAKE_CASE : List[Any] = torch.tensor([[0.0892, 0.0596, -0.2819], [0.0134, 0.1199, 0.0573], [-0.0169, 0.0927, 0.0644]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
F"Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}" )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , a_ , atol=1E-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
raise NotImplementedError
else: # base
SCREAMING_SNAKE_CASE : Optional[int] = torch.Size((1, 1, 7_68) )
SCREAMING_SNAKE_CASE : Any = torch.tensor([[-0.1482, 0.0609, 0.0322]] )
if not (outputs.entity_last_hidden_state.shape == expected_shape):
raise ValueError(
F"Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is"
F" {expected_shape}" )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , a_ , atol=1E-4 ):
raise ValueError
# Verify masked word/entity prediction
SCREAMING_SNAKE_CASE : Any = MLukeTokenizer.from_pretrained(a_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = '''Tokyo is the capital of <mask>.'''
SCREAMING_SNAKE_CASE : List[Any] = (24, 30)
SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer(a_ , entity_spans=[span] , return_tensors='''pt''' )
SCREAMING_SNAKE_CASE : int = model(**a_ )
SCREAMING_SNAKE_CASE : Dict = encoding['''input_ids'''][0].tolist()
SCREAMING_SNAKE_CASE : str = input_ids.index(tokenizer.convert_tokens_to_ids('''<mask>''' ) )
SCREAMING_SNAKE_CASE : Union[str, Any] = outputs.logits[0][mask_position_id].argmax(dim=-1 )
assert "Japan" == tokenizer.decode(a_ )
SCREAMING_SNAKE_CASE : Any = outputs.entity_logits[0][0].argmax().item()
SCREAMING_SNAKE_CASE : int = [
entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id
]
assert [e for e in multilingual_predicted_entities if e.startswith('''en:''' )][0] == "en:Japan"
# Finally, save our PyTorch model and tokenizer
print('''Saving PyTorch model to {}'''.format(a_ ) )
model.save_pretrained(a_ )
def __A ( a_ : Tuple )-> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = ['''[MASK]''', '''[PAD]''', '''[UNK]''']
SCREAMING_SNAKE_CASE : List[Any] = [json.loads(a_ ) for line in open(a_ )]
SCREAMING_SNAKE_CASE : str = {}
for entry in data:
SCREAMING_SNAKE_CASE : Optional[int] = entry['''id''']
for entity_name, language in entry["entities"]:
if entity_name in SPECIAL_TOKENS:
SCREAMING_SNAKE_CASE : int = entity_id
break
SCREAMING_SNAKE_CASE : List[str] = F"{language}:{entity_name}"
SCREAMING_SNAKE_CASE : Dict = entity_id
return new_mapping
if __name__ == "__main__":
lowerCamelCase__ : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--checkpoint_path", type=str, help="Path to a pytorch_model.bin file.")
parser.add_argument(
"--metadata_path", default=None, type=str, help="Path to a metadata.json file, defining the configuration."
)
parser.add_argument(
"--entity_vocab_path",
default=None,
type=str,
help="Path to an entity_vocab.tsv file, containing the entity vocabulary.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to where to dump the output PyTorch model."
)
parser.add_argument(
"--model_size", default="base", type=str, choices=["base", "large"], help="Size of the model to be converted."
)
lowerCamelCase__ : List[Any] = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 18
|
"""simple docstring"""
import os
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from ...models.controlnet import ControlNetModel, ControlNetOutput
from ...models.modeling_utils import ModelMixin
from ...utils import logging
lowerCamelCase__ : List[Any] = logging.get_logger(__name__)
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
def __init__( self :List[str] , lowerCamelCase_ :Union[List[ControlNetModel], Tuple[ControlNetModel]] ) -> Union[str, Any]:
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE : Optional[Any] = nn.ModuleList(lowerCamelCase_ )
def __lowerCAmelCase ( self :Any , lowerCamelCase_ :torch.FloatTensor , lowerCamelCase_ :Union[torch.Tensor, float, int] , lowerCamelCase_ :torch.Tensor , lowerCamelCase_ :List[torch.tensor] , lowerCamelCase_ :List[float] , lowerCamelCase_ :Optional[torch.Tensor] = None , lowerCamelCase_ :Optional[torch.Tensor] = None , lowerCamelCase_ :Optional[torch.Tensor] = None , lowerCamelCase_ :Optional[Dict[str, Any]] = None , lowerCamelCase_ :bool = False , lowerCamelCase_ :bool = True , ) -> Union[ControlNetOutput, Tuple]:
'''simple docstring'''
for i, (image, scale, controlnet) in enumerate(zip(lowerCamelCase_ , lowerCamelCase_ , self.nets ) ):
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : List[Any] = controlnet(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , )
# merge samples
if i == 0:
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : int = down_samples, mid_sample
else:
SCREAMING_SNAKE_CASE : Optional[int] = [
samples_prev + samples_curr
for samples_prev, samples_curr in zip(lowerCamelCase_ , lowerCamelCase_ )
]
mid_block_res_sample += mid_sample
return down_block_res_samples, mid_block_res_sample
def __lowerCAmelCase ( self :Any , lowerCamelCase_ :Union[str, os.PathLike] , lowerCamelCase_ :bool = True , lowerCamelCase_ :Callable = None , lowerCamelCase_ :bool = False , lowerCamelCase_ :Optional[str] = None , ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = 0
SCREAMING_SNAKE_CASE : Any = save_directory
for controlnet in self.nets:
controlnet.save_pretrained(
lowerCamelCase_ , is_main_process=lowerCamelCase_ , save_function=lowerCamelCase_ , safe_serialization=lowerCamelCase_ , variant=lowerCamelCase_ , )
idx += 1
SCREAMING_SNAKE_CASE : Union[str, Any] = model_path_to_save + f"_{idx}"
@classmethod
def __lowerCAmelCase ( cls :Dict , lowerCamelCase_ :Optional[Union[str, os.PathLike]] , **lowerCamelCase_ :Tuple ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = 0
SCREAMING_SNAKE_CASE : Optional[int] = []
# load controlnet and append to list until no controlnet directory exists anymore
# first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained`
# second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ...
SCREAMING_SNAKE_CASE : Dict = pretrained_model_path
while os.path.isdir(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : Tuple = ControlNetModel.from_pretrained(lowerCamelCase_ , **lowerCamelCase_ )
controlnets.append(lowerCamelCase_ )
idx += 1
SCREAMING_SNAKE_CASE : Union[str, Any] = pretrained_model_path + f"_{idx}"
logger.info(f"{len(lowerCamelCase_ )} controlnets loaded from {pretrained_model_path}." )
if len(lowerCamelCase_ ) == 0:
raise ValueError(
f"No ControlNets found under {os.path.dirname(lowerCamelCase_ )}. Expected at least {pretrained_model_path + '_0'}." )
return cls(lowerCamelCase_ )
| 18
| 1
|
import gc
import importlib.metadata
import tempfile
import unittest
from packaging import version
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoTokenizer,
BitsAndBytesConfig,
pipeline,
)
from transformers.testing_utils import (
is_torch_available,
require_accelerate,
require_bitsandbytes,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
def a_ ( __lowerCAmelCase ):
if model.config.model_type == "gpt2":
return model.transformer.h[0].mlp.c_fc
return model.transformer.h[0].mlp.dense_ah_to_h
if is_torch_available():
import torch
import torch.nn as nn
class SCREAMING_SNAKE_CASE__ (nn.Module ):
def __init__( self : Any , __lowerCamelCase : Dict , __lowerCamelCase : Union[str, Any] ):
"""simple docstring"""
super().__init__()
lowerCAmelCase__ = module
lowerCAmelCase__ = nn.Sequential(
nn.Linear(module.in_features , __lowerCamelCase , bias=__lowerCamelCase ) , nn.Linear(__lowerCamelCase , module.out_features , bias=__lowerCamelCase ) , )
lowerCAmelCase__ = (2.0 / (5 * min(module.in_features , module.out_features ))) ** 0.5
nn.init.normal_(self.adapter[0].weight , std=__lowerCamelCase )
nn.init.zeros_(self.adapter[1].weight )
self.adapter.to(module.weight.device )
def A__ ( self : Optional[Any] , __lowerCamelCase : Any , *__lowerCamelCase : List[Any] , **__lowerCamelCase : List[str] ):
"""simple docstring"""
return self.module(__lowerCamelCase , *__lowerCamelCase , **__lowerCamelCase ) + self.adapter(__lowerCamelCase )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class SCREAMING_SNAKE_CASE__ (unittest.TestCase ):
lowercase_ : Optional[Any] = "bigscience/bloom-1b7"
# Constant values
lowercase_ : Union[str, Any] = 2.1_0_9_6_5_9_5_5_2_6_9_2_5_7_4
lowercase_ : List[str] = "Hello my name is"
lowercase_ : str = set()
EXPECTED_OUTPUTS.add("Hello my name is John and I am a professional photographer. I" )
EXPECTED_OUTPUTS.add("Hello my name is John.\nI am a friend of your father.\n" )
EXPECTED_OUTPUTS.add("Hello my name is John Doe, I am a student at the University" )
lowercase_ : Optional[int] = 10
def A__ ( self : List[str] ):
"""simple docstring"""
lowerCAmelCase__ = AutoTokenizer.from_pretrained(self.model_name )
class SCREAMING_SNAKE_CASE__ (lowercase__ ):
def A__ ( self : Optional[Any] ):
"""simple docstring"""
super().setUp()
# Models and tokenizer
lowerCAmelCase__ = AutoModelForCausalLM.from_pretrained(
self.model_name , torch_dtype=torch.floataa , device_map='''auto''' )
lowerCAmelCase__ = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=__lowerCamelCase , device_map='''auto''' )
def A__ ( self : str ):
"""simple docstring"""
del self.model_fpaa
del self.model_abit
gc.collect()
torch.cuda.empty_cache()
def A__ ( self : Union[str, Any] ):
"""simple docstring"""
lowerCAmelCase__ = self.model_abit.config
self.assertTrue(hasattr(__lowerCamelCase , '''quantization_config''' ) )
lowerCAmelCase__ = config.to_dict()
lowerCAmelCase__ = config.to_diff_dict()
lowerCAmelCase__ = config.to_json_string()
def A__ ( self : Optional[Any] ):
"""simple docstring"""
from bitsandbytes.nn import Paramsabit
lowerCAmelCase__ = self.model_fpaa.get_memory_footprint()
lowerCAmelCase__ = self.model_abit.get_memory_footprint()
self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE )
lowerCAmelCase__ = get_some_linear_layer(self.model_abit )
self.assertTrue(linear.weight.__class__ == Paramsabit )
def A__ ( self : Union[str, Any] ):
"""simple docstring"""
from transformers import TaPreTrainedModel
self.model_fpaa.get_memory_footprint()
self.model_abit.get_memory_footprint()
for name, module in self.model_abit.named_modules():
if isinstance(__lowerCamelCase , torch.nn.Linear ):
if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules:
# 4-bit parameters are packed in uint8 variables
self.assertTrue(module.weight.dtype == torch.uinta )
def A__ ( self : Union[str, Any] ):
"""simple docstring"""
lowerCAmelCase__ = self.tokenizer(self.input_text , return_tensors='''pt''' )
lowerCAmelCase__ = self.model_abit.generate(input_ids=encoded_input['''input_ids'''].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=__lowerCamelCase ) , self.EXPECTED_OUTPUTS )
def A__ ( self : Dict ):
"""simple docstring"""
lowerCAmelCase__ = BitsAndBytesConfig()
lowerCAmelCase__ = True
lowerCAmelCase__ = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=__lowerCamelCase , device_map='''auto''' )
lowerCAmelCase__ = self.tokenizer(self.input_text , return_tensors='''pt''' )
lowerCAmelCase__ = model_abit_from_config.generate(
input_ids=encoded_input['''input_ids'''].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=__lowerCamelCase ) , self.EXPECTED_OUTPUTS )
def A__ ( self : Tuple ):
"""simple docstring"""
with self.assertRaises(__lowerCamelCase ), tempfile.TemporaryDirectory() as tmpdirname:
self.model_abit.save_pretrained(__lowerCamelCase )
def A__ ( self : Any ):
"""simple docstring"""
lowerCAmelCase__ = BitsAndBytesConfig()
with self.assertRaises(__lowerCamelCase ):
lowerCAmelCase__ = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=__lowerCamelCase , load_in_abit=__lowerCamelCase , device_map='''auto''' , bnb_abit_quant_type='''nf4''' , )
def A__ ( self : str ):
"""simple docstring"""
with self.assertRaises(__lowerCamelCase ):
# Tries with `str`
self.model_abit.to('''cpu''' )
with self.assertRaises(__lowerCamelCase ):
# Tries with a `dtype``
self.model_abit.to(torch.floataa )
with self.assertRaises(__lowerCamelCase ):
# Tries with a `device`
self.model_abit.to(torch.device('''cuda:0''' ) )
with self.assertRaises(__lowerCamelCase ):
# Tries with a `device`
self.model_abit.float()
with self.assertRaises(__lowerCamelCase ):
# Tries with a `device`
self.model_abit.half()
# Test if we did not break anything
lowerCAmelCase__ = self.tokenizer(self.input_text , return_tensors='''pt''' )
lowerCAmelCase__ = self.model_fpaa.to(torch.floataa )
lowerCAmelCase__ = self.model_fpaa.generate(input_ids=encoded_input['''input_ids'''].to(0 ) , max_new_tokens=10 )
# Check this does not throw an error
lowerCAmelCase__ = self.model_fpaa.to('''cpu''' )
# Check this does not throw an error
lowerCAmelCase__ = self.model_fpaa.half()
# Check this does not throw an error
lowerCAmelCase__ = self.model_fpaa.float()
def A__ ( self : List[Any] ):
"""simple docstring"""
lowerCAmelCase__ = AutoModelForSeqaSeqLM.from_pretrained('''t5-small''' , load_in_abit=__lowerCamelCase , device_map='''auto''' )
self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class SCREAMING_SNAKE_CASE__ (unittest.TestCase ):
@classmethod
def A__ ( cls : Optional[int] ):
"""simple docstring"""
lowerCAmelCase__ = "t5-small"
lowerCAmelCase__ = "google/flan-t5-small" # flan-t5 uses dense-act instead of dense-relu-dense
lowerCAmelCase__ = AutoTokenizer.from_pretrained(cls.model_name )
lowerCAmelCase__ = "Translate in German: Hello, my dog is cute"
def A__ ( self : Union[str, Any] ):
"""simple docstring"""
gc.collect()
torch.cuda.empty_cache()
def A__ ( self : Dict ):
"""simple docstring"""
from transformers import TaForConditionalGeneration
lowerCAmelCase__ = TaForConditionalGeneration._keep_in_fpaa_modules
lowerCAmelCase__ = None
# test with `t5-small`
lowerCAmelCase__ = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=__lowerCamelCase , device_map='''auto''' )
lowerCAmelCase__ = self.tokenizer(self.input_text , return_tensors='''pt''' ).to(0 )
lowerCAmelCase__ = model.generate(**__lowerCamelCase )
# test with `flan-t5-small`
lowerCAmelCase__ = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=__lowerCamelCase , device_map='''auto''' )
lowerCAmelCase__ = self.tokenizer(self.input_text , return_tensors='''pt''' ).to(0 )
lowerCAmelCase__ = model.generate(**__lowerCamelCase )
lowerCAmelCase__ = modules
def A__ ( self : Tuple ):
"""simple docstring"""
import bitsandbytes as bnb
from transformers import TaForConditionalGeneration
# test with `t5-small`
lowerCAmelCase__ = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=__lowerCamelCase , device_map='''auto''' )
# there was a bug with decoders - this test checks that it is fixed
self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit ) )
lowerCAmelCase__ = self.tokenizer(self.input_text , return_tensors='''pt''' ).to(0 )
lowerCAmelCase__ = model.generate(**__lowerCamelCase )
# test with `flan-t5-small`
lowerCAmelCase__ = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=__lowerCamelCase , device_map='''auto''' )
lowerCAmelCase__ = self.tokenizer(self.input_text , return_tensors='''pt''' ).to(0 )
lowerCAmelCase__ = model.generate(**__lowerCamelCase )
class SCREAMING_SNAKE_CASE__ (lowercase__ ):
def A__ ( self : Optional[int] ):
"""simple docstring"""
super().setUp()
# model_name
lowerCAmelCase__ = "bigscience/bloom-560m"
lowerCAmelCase__ = "t5-small"
# Different types of model
lowerCAmelCase__ = AutoModel.from_pretrained(self.model_name , load_in_abit=__lowerCamelCase , device_map='''auto''' )
# Sequence classification model
lowerCAmelCase__ = AutoModelForSequenceClassification.from_pretrained(
self.model_name , load_in_abit=__lowerCamelCase , device_map='''auto''' )
# CausalLM model
lowerCAmelCase__ = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=__lowerCamelCase , device_map='''auto''' )
# Seq2seq model
lowerCAmelCase__ = AutoModelForSeqaSeqLM.from_pretrained(
self.seq_to_seq_name , load_in_abit=__lowerCamelCase , device_map='''auto''' )
def A__ ( self : int ):
"""simple docstring"""
del self.base_model
del self.sequence_model
del self.model_abit
del self.seq_to_seq_model
gc.collect()
torch.cuda.empty_cache()
def A__ ( self : Any ):
"""simple docstring"""
from bitsandbytes.nn import Paramsabit
self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit )
# Other heads should be nn.Parameter
self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter )
class SCREAMING_SNAKE_CASE__ (lowercase__ ):
def A__ ( self : List[Any] ):
"""simple docstring"""
super().setUp()
def A__ ( self : str ):
"""simple docstring"""
del self.pipe
gc.collect()
torch.cuda.empty_cache()
def A__ ( self : Optional[int] ):
"""simple docstring"""
lowerCAmelCase__ = pipeline(
'''text-generation''' , model=self.model_name , model_kwargs={'''device_map''': '''auto''', '''load_in_4bit''': True, '''torch_dtype''': torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , )
# Real second forward pass
lowerCAmelCase__ = self.pipe(self.input_text )
self.assertIn(pipeline_output[0]['''generated_text'''] , self.EXPECTED_OUTPUTS )
@require_torch_multi_gpu
class SCREAMING_SNAKE_CASE__ (lowercase__ ):
def A__ ( self : Union[str, Any] ):
"""simple docstring"""
super().setUp()
def A__ ( self : Optional[int] ):
"""simple docstring"""
lowerCAmelCase__ = AutoModelForCausalLM.from_pretrained(
self.model_name , load_in_abit=__lowerCamelCase , device_map='''balanced''' )
# Check correct device map
self.assertEqual(set(model_parallel.hf_device_map.values() ) , {0, 1} )
# Check that inference pass works on the model
lowerCAmelCase__ = self.tokenizer(self.input_text , return_tensors='''pt''' )
# Second real batch
lowerCAmelCase__ = model_parallel.generate(input_ids=encoded_input['''input_ids'''].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=__lowerCamelCase ) , self.EXPECTED_OUTPUTS )
class SCREAMING_SNAKE_CASE__ (lowercase__ ):
def A__ ( self : Dict ):
"""simple docstring"""
lowerCAmelCase__ = "facebook/opt-350m"
super().setUp()
def A__ ( self : Any ):
"""simple docstring"""
if version.parse(importlib.metadata.version('''bitsandbytes''' ) ) < version.parse('''0.37.0''' ):
return
# Step 1: freeze all parameters
lowerCAmelCase__ = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=__lowerCamelCase )
self.assertEqual(set(model.hf_device_map.values() ) , {torch.cuda.current_device()} )
for param in model.parameters():
lowerCAmelCase__ = False # freeze the model - train adapters later
if param.ndim == 1:
# cast the small parameters (e.g. layernorm) to fp32 for stability
lowerCAmelCase__ = param.data.to(torch.floataa )
# Step 2: add adapters
for _, module in model.named_modules():
if "OPTAttention" in repr(type(__lowerCamelCase ) ):
lowerCAmelCase__ = LoRALayer(module.q_proj , rank=16 )
lowerCAmelCase__ = LoRALayer(module.k_proj , rank=16 )
lowerCAmelCase__ = LoRALayer(module.v_proj , rank=16 )
# Step 3: dummy batch
lowerCAmelCase__ = self.tokenizer('''Test batch ''' , return_tensors='''pt''' ).to(0 )
# Step 4: Check if the gradient is not None
with torch.cuda.amp.autocast():
lowerCAmelCase__ = model.forward(**__lowerCamelCase )
out.logits.norm().backward()
for module in model.modules():
if isinstance(__lowerCamelCase , __lowerCamelCase ):
self.assertTrue(module.adapter[1].weight.grad is not None )
self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0 )
elif isinstance(__lowerCamelCase , nn.Embedding ):
self.assertTrue(module.weight.grad is None )
class SCREAMING_SNAKE_CASE__ (lowercase__ ):
lowercase_ : List[str] = "gpt2-xl"
lowercase_ : Optional[Any] = 3.3_1_9_1_8_5_4_8_5_4_1_5_2_1_8_7
| 615
|
'''simple docstring'''
import json
from typing import Dict, List, Optional, Tuple, Union
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_led import LEDTokenizer
a : List[Any] = logging.get_logger(__name__)
a : List[Any] = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
a : int = {
"vocab_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json",
},
"merges_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt",
},
"tokenizer_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json",
},
}
a : Any = {
"allenai/led-base-16384": 1_63_84,
}
class UpperCamelCase__ ( lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE__ : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE__ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE__ : Tuple = LEDTokenizer
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ["input_ids", "attention_mask"]
def __init__( self , snake_case=None , snake_case=None , snake_case=None , snake_case="replace" , snake_case="<s>" , snake_case="</s>" , snake_case="</s>" , snake_case="<s>" , snake_case="<unk>" , snake_case="<pad>" , snake_case="<mask>" , snake_case=False , snake_case=True , **snake_case , ):
'''simple docstring'''
super().__init__(
snake_case , snake_case , tokenizer_file=snake_case , errors=snake_case , bos_token=snake_case , eos_token=snake_case , sep_token=snake_case , cls_token=snake_case , unk_token=snake_case , pad_token=snake_case , mask_token=snake_case , add_prefix_space=snake_case , trim_offsets=snake_case , **snake_case , )
UpperCAmelCase : Any = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , snake_case ) != add_prefix_space:
UpperCAmelCase : Tuple = getattr(snake_case , pre_tok_state.pop("type" ) )
UpperCAmelCase : Any = add_prefix_space
UpperCAmelCase : str = pre_tok_class(**snake_case )
UpperCAmelCase : int = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
UpperCAmelCase : Dict = "post_processor"
UpperCAmelCase : Dict = getattr(self.backend_tokenizer , snake_case , snake_case )
if tokenizer_component_instance:
UpperCAmelCase : List[str] = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
UpperCAmelCase : int = tuple(state["sep"] )
if "cls" in state:
UpperCAmelCase : Union[str, Any] = tuple(state["cls"] )
UpperCAmelCase : Tuple = False
if state.get("add_prefix_space" , snake_case ) != add_prefix_space:
UpperCAmelCase : Optional[Any] = add_prefix_space
UpperCAmelCase : Optional[int] = True
if state.get("trim_offsets" , snake_case ) != trim_offsets:
UpperCAmelCase : Tuple = trim_offsets
UpperCAmelCase : List[str] = True
if changes_to_apply:
UpperCAmelCase : Optional[Any] = getattr(snake_case , state.pop("type" ) )
UpperCAmelCase : Tuple = component_class(**snake_case )
setattr(self.backend_tokenizer , snake_case , snake_case )
@property
# Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED
def A_ ( self ):
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def A_ ( self , snake_case ):
'''simple docstring'''
UpperCAmelCase : Tuple = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else value
UpperCAmelCase : Optional[Any] = value
def A_ ( self , *snake_case , **snake_case ):
'''simple docstring'''
UpperCAmelCase : List[str] = kwargs.get("is_split_into_words" , snake_case )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs." )
return super()._batch_encode_plus(*snake_case , **snake_case )
def A_ ( self , *snake_case , **snake_case ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = kwargs.get("is_split_into_words" , snake_case )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs." )
return super()._encode_plus(*snake_case , **snake_case )
def A_ ( self , snake_case , snake_case = None ):
'''simple docstring'''
UpperCAmelCase : str = self._tokenizer.model.save(snake_case , name=snake_case )
return tuple(snake_case )
def A_ ( self , snake_case , snake_case=None ):
'''simple docstring'''
UpperCAmelCase : Optional[Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def A_ ( self , snake_case , snake_case = None ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = [self.sep_token_id]
UpperCAmelCase : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def A_ ( self , snake_case , snake_case = None , snake_case = PaddingStrategy.DO_NOT_PAD , snake_case = None , snake_case = None , ):
'''simple docstring'''
UpperCAmelCase : int = super()._pad(
encoded_inputs=snake_case , max_length=snake_case , padding_strategy=snake_case , pad_to_multiple_of=snake_case , return_attention_mask=snake_case , )
# Load from model defaults
if return_attention_mask is None:
UpperCAmelCase : int = "attention_mask" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
UpperCAmelCase : Union[str, Any] = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
UpperCAmelCase : Optional[int] = len(encoded_inputs["global_attention_mask"] ) != len(snake_case )
if needs_to_be_padded:
UpperCAmelCase : Tuple = len(snake_case ) - len(encoded_inputs["global_attention_mask"] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
UpperCAmelCase : List[str] = (
encoded_inputs["global_attention_mask"] + [-1] * difference
)
elif self.padding_side == "left":
UpperCAmelCase : Any = [-1] * difference + encoded_inputs[
"global_attention_mask"
]
else:
raise ValueError("Invalid padding strategy:" + str(self.padding_side ) )
return encoded_inputs
| 679
| 0
|
"""simple docstring"""
import unittest
import torch
from diffusers import DDIMScheduler, DDPMScheduler, UNetaDModel
from diffusers.training_utils import set_seed
from diffusers.utils.testing_utils import slow
a = False
class lowercase_ ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase_ ( self : Tuple , _UpperCAmelCase : Union[str, Any]=32 ):
set_seed(0 )
_A = UNetaDModel(sample_size=_UpperCAmelCase , in_channels=3 , out_channels=3 )
_A = torch.optim.SGD(model.parameters() , lr=0.0001 )
return model, optimizer
@slow
def lowerCAmelCase_ ( self : Tuple ):
_A = 'cpu' # ensure full determinism without setting the CUBLAS_WORKSPACE_CONFIG env variable
_A = DDPMScheduler(
num_train_timesteps=1_000 , beta_start=0.0001 , beta_end=0.02 , beta_schedule='linear' , clip_sample=_UpperCAmelCase , )
_A = DDIMScheduler(
num_train_timesteps=1_000 , beta_start=0.0001 , beta_end=0.02 , beta_schedule='linear' , clip_sample=_UpperCAmelCase , )
assert ddpm_scheduler.config.num_train_timesteps == ddim_scheduler.config.num_train_timesteps
# shared batches for DDPM and DDIM
set_seed(0 )
_A = [torch.randn((4, 3, 32, 32) ).clip(-1 , 1 ).to(_UpperCAmelCase ) for _ in range(4 )]
_A = [torch.randn((4, 3, 32, 32) ).to(_UpperCAmelCase ) for _ in range(4 )]
_A = [torch.randint(0 , 1_000 , (4,) ).long().to(_UpperCAmelCase ) for _ in range(4 )]
# train with a DDPM scheduler
_A , _A = self.get_model_optimizer(resolution=32 )
model.train().to(_UpperCAmelCase )
for i in range(4 ):
optimizer.zero_grad()
_A = ddpm_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] )
_A = model(_UpperCAmelCase , timesteps[i] ).sample
_A = torch.nn.functional.mse_loss(_UpperCAmelCase , noise[i] )
loss.backward()
optimizer.step()
del model, optimizer
# recreate the model and optimizer, and retry with DDIM
_A , _A = self.get_model_optimizer(resolution=32 )
model.train().to(_UpperCAmelCase )
for i in range(4 ):
optimizer.zero_grad()
_A = ddim_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] )
_A = model(_UpperCAmelCase , timesteps[i] ).sample
_A = torch.nn.functional.mse_loss(_UpperCAmelCase , noise[i] )
loss.backward()
optimizer.step()
del model, optimizer
self.assertTrue(torch.allclose(_UpperCAmelCase , _UpperCAmelCase , atol=1E-5 ) )
self.assertTrue(torch.allclose(_UpperCAmelCase , _UpperCAmelCase , atol=1E-5 ) )
| 505
|
"""simple docstring"""
def _snake_case ( _snake_case : bytes ) -> str:
'''simple docstring'''
return "".join([hex(_snake_case )[2:].zfill(2 ).upper() for byte in list(_snake_case )] )
def _snake_case ( _snake_case : str ) -> bytes:
'''simple docstring'''
if (len(_snake_case ) % 2) != 0:
raise ValueError(
'Base16 encoded data is invalid:\nData does not have an even number of hex digits.' )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(_snake_case ) <= set('0123456789ABCDEF' ):
raise ValueError(
'Base16 encoded data is invalid:\nData is not uppercase hex or it contains invalid characters.' )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(_snake_case ) , 2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 505
| 1
|
"""simple docstring"""
import random
import unittest
import torch
from diffusers import IFInpaintingPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class SCREAMING_SNAKE_CASE ( a , a , unittest.TestCase ):
"""simple docstring"""
a_ : str =IFInpaintingPipeline
a_ : Dict =TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"width", "height"}
a_ : Dict =TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
a_ : str =PipelineTesterMixin.required_optional_params - {"latents"}
def _lowerCAmelCase ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
return self._get_dummy_components()
def _lowerCAmelCase ( self : List[str] , _snake_case : Any , _snake_case : Optional[Any]=0 ) -> Optional[int]:
'''simple docstring'''
if str(_snake_case ).startswith('mps' ):
a__ = torch.manual_seed(_snake_case )
else:
a__ = torch.Generator(device=_snake_case ).manual_seed(_snake_case )
a__ = floats_tensor((1, 3, 32, 32) , rng=random.Random(_snake_case ) ).to(_snake_case )
a__ = floats_tensor((1, 3, 32, 32) , rng=random.Random(_snake_case ) ).to(_snake_case )
a__ = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'mask_image': mask_image,
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def _lowerCAmelCase ( self : Any ) -> Dict:
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def _lowerCAmelCase ( self : Optional[int] ) -> str:
'''simple docstring'''
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA' )
def _lowerCAmelCase ( self : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
super().test_save_load_floataa(expected_max_diff=1E-1 )
def _lowerCAmelCase ( self : List[Any] ) -> List[str]:
'''simple docstring'''
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def _lowerCAmelCase ( self : Any ) -> Any:
'''simple docstring'''
self._test_save_load_local()
def _lowerCAmelCase ( self : Dict ) -> Optional[int]:
'''simple docstring'''
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 232
|
"""simple docstring"""
def _lowerCamelCase ( UpperCAmelCase__ ) -> bool:
'''simple docstring'''
a__ = 0
for ch in input_str:
a__ = ord(UpperCAmelCase__ )
a__ = pow(2,UpperCAmelCase__ )
# If we already turned on bit for current character's unicode
if bitmap >> ch_unicode & 1 == 1:
return False
bitmap |= ch_bit_index_on
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 232
| 1
|
import argparse
import torch
from transformers import (
UniSpeechSatConfig,
UniSpeechSatForAudioFrameClassification,
UniSpeechSatForSequenceClassification,
UniSpeechSatForXVector,
WavaVecaFeatureExtractor,
logging,
)
logging.set_verbosity_info()
_lowercase : Tuple = logging.get_logger(__name__)
def _lowerCAmelCase ( UpperCamelCase__: int , UpperCamelCase__: Any , UpperCamelCase__: Dict ) -> Dict:
"""simple docstring"""
A = UniSpeechSatForSequenceClassification.from_pretrained(UpperCamelCase__ , config=UpperCamelCase__ )
A = downstream_dict["""projector.weight"""]
A = downstream_dict["""projector.bias"""]
A = downstream_dict["""model.post_net.linear.weight"""]
A = downstream_dict["""model.post_net.linear.bias"""]
return model
def _lowerCAmelCase ( UpperCamelCase__: Optional[Any] , UpperCamelCase__: Any , UpperCamelCase__: Optional[Any] ) -> List[Any]:
"""simple docstring"""
A = UniSpeechSatForAudioFrameClassification.from_pretrained(UpperCamelCase__ , config=UpperCamelCase__ )
A = downstream_dict["""model.linear.weight"""]
A = downstream_dict["""model.linear.bias"""]
return model
def _lowerCAmelCase ( UpperCamelCase__: Tuple , UpperCamelCase__: List[Any] , UpperCamelCase__: Any ) -> str:
"""simple docstring"""
A = UniSpeechSatForXVector.from_pretrained(UpperCamelCase__ , config=UpperCamelCase__ )
A = downstream_dict["""connector.weight"""]
A = downstream_dict["""connector.bias"""]
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
A = downstream_dict[
f'model.framelevel_feature_extractor.module.{i}.kernel.weight'
]
A = downstream_dict[f'model.framelevel_feature_extractor.module.{i}.kernel.bias']
A = downstream_dict["""model.utterancelevel_feature_extractor.linear1.weight"""]
A = downstream_dict["""model.utterancelevel_feature_extractor.linear1.bias"""]
A = downstream_dict["""model.utterancelevel_feature_extractor.linear2.weight"""]
A = downstream_dict["""model.utterancelevel_feature_extractor.linear2.bias"""]
A = downstream_dict["""objective.W"""]
return model
@torch.no_grad()
def _lowerCAmelCase ( UpperCamelCase__: Any , UpperCamelCase__: List[str] , UpperCamelCase__: List[Any] , UpperCamelCase__: List[str] ) -> int:
"""simple docstring"""
A = torch.load(UpperCamelCase__ , map_location="""cpu""" )
A = checkpoint["""Downstream"""]
A = UniSpeechSatConfig.from_pretrained(UpperCamelCase__ )
A = WavaVecaFeatureExtractor.from_pretrained(
UpperCamelCase__ , return_attention_mask=UpperCamelCase__ , do_normalize=UpperCamelCase__ )
A = hf_config.architectures[0]
if arch.endswith("""ForSequenceClassification""" ):
A = convert_classification(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
elif arch.endswith("""ForAudioFrameClassification""" ):
A = convert_diarization(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
elif arch.endswith("""ForXVector""" ):
A = convert_xvector(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
else:
raise NotImplementedError(f'S3PRL weights conversion is not supported for {arch}' )
if hf_config.use_weighted_layer_sum:
A = checkpoint["""Featurizer"""]["""weights"""]
hf_feature_extractor.save_pretrained(UpperCamelCase__ )
hf_model.save_pretrained(UpperCamelCase__ )
if __name__ == "__main__":
_lowercase : List[str] = argparse.ArgumentParser()
parser.add_argument(
"--base_model_name", default=None, type=str, help="Name of the huggingface pretrained base model."
)
parser.add_argument("--config_path", default=None, type=str, help="Path to the huggingface classifier config.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to the s3prl checkpoint.")
parser.add_argument("--model_dump_path", default=None, type=str, help="Path to the final converted model.")
_lowercase : Tuple = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 546
|
import timeit
import numpy as np
import datasets
from datasets.arrow_writer import ArrowWriter
from datasets.features.features import _ArrayXD
def _lowerCAmelCase ( UpperCamelCase__: Any ) -> Tuple:
"""simple docstring"""
def wrapper(*UpperCamelCase__: Union[str, Any] , **UpperCamelCase__: List[str] ):
A = timeit.default_timer()
A = func(*UpperCamelCase__ , **UpperCamelCase__ )
A = timeit.default_timer() - starttime
return delta
A = func.__name__
return wrapper
def _lowerCAmelCase ( UpperCamelCase__: dict , UpperCamelCase__: List[str]=1_00 , UpperCamelCase__: int=None ) -> Any:
"""simple docstring"""
A = []
A = seq_shapes or {}
for i in range(UpperCamelCase__ ):
A = {}
for col_id, (k, v) in enumerate(features.items() ):
if isinstance(UpperCamelCase__ , _ArrayXD ):
A = np.random.rand(*v.shape ).astype(v.dtype )
elif isinstance(UpperCamelCase__ , datasets.Value ):
if v.dtype == "string":
A = """The small grey turtle was surprisingly fast when challenged."""
else:
A = np.random.randint(10 , size=1 ).astype(v.dtype ).item()
elif isinstance(UpperCamelCase__ , datasets.Sequence ):
while isinstance(UpperCamelCase__ , datasets.Sequence ):
A = v.feature
A = seq_shapes[k]
A = np.random.rand(*UpperCamelCase__ ).astype(v.dtype )
A = data
dummy_data.append((i, example) )
return dummy_data
def _lowerCAmelCase ( UpperCamelCase__: int , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: List[str]=1_00 , UpperCamelCase__: str=None ) -> Optional[int]:
"""simple docstring"""
A = generate_examples(UpperCamelCase__ , num_examples=UpperCamelCase__ , seq_shapes=UpperCamelCase__ )
with ArrowWriter(features=UpperCamelCase__ , path=UpperCamelCase__ ) as writer:
for key, record in dummy_data:
A = features.encode_example(UpperCamelCase__ )
writer.write(UpperCamelCase__ )
A , A = writer.finalize()
if not num_final_examples == num_examples:
raise ValueError(
f'Error writing the dataset, wrote {num_final_examples} examples but should have written {num_examples}.' )
A = datasets.Dataset.from_file(filename=UpperCamelCase__ , info=datasets.DatasetInfo(features=UpperCamelCase__ ) )
return dataset
| 546
| 1
|
'''simple docstring'''
import json
import os
import pickle
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers import is_faiss_available
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bart.tokenization_bart import BartTokenizer
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.dpr.tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.retrieval_rag import CustomHFIndex, RagRetriever
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_sentencepiece, require_tokenizers, require_torch
if is_faiss_available():
import faiss
@require_faiss
class lowerCAmelCase ( __UpperCAmelCase):
def lowerCAmelCase ( self ) -> Dict:
'''simple docstring'''
__snake_case = tempfile.mkdtemp()
__snake_case = 8
# DPR tok
__snake_case = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
__snake_case = os.path.join(self.tmpdirname , '''dpr_tokenizer''' )
os.makedirs(lowerCamelCase__ , exist_ok=lowerCamelCase__ )
__snake_case = os.path.join(lowerCamelCase__ , DPR_VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
# BART tok
__snake_case = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
__snake_case = dict(zip(lowerCamelCase__ , range(len(lowerCamelCase__ ) ) ) )
__snake_case = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
__snake_case = {'''unk_token''': '''<unk>'''}
__snake_case = os.path.join(self.tmpdirname , '''bart_tokenizer''' )
os.makedirs(lowerCamelCase__ , exist_ok=lowerCamelCase__ )
__snake_case = os.path.join(lowerCamelCase__ , BART_VOCAB_FILES_NAMES['''vocab_file'''] )
__snake_case = os.path.join(lowerCamelCase__ , BART_VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(lowerCamelCase__ ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(lowerCamelCase__ ) )
def lowerCAmelCase ( self ) -> DPRQuestionEncoderTokenizer:
'''simple docstring'''
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''dpr_tokenizer''' ) )
def lowerCAmelCase ( self ) -> DPRContextEncoderTokenizer:
'''simple docstring'''
return DPRContextEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''dpr_tokenizer''' ) )
def lowerCAmelCase ( self ) -> BartTokenizer:
'''simple docstring'''
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''bart_tokenizer''' ) )
def lowerCAmelCase ( self ) -> Any:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def lowerCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
__snake_case = Dataset.from_dict(
{
'''id''': ['''0''', '''1'''],
'''text''': ['''foo''', '''bar'''],
'''title''': ['''Foo''', '''Bar'''],
'''embeddings''': [np.ones(self.retrieval_vector_size ), 2 * np.ones(self.retrieval_vector_size )],
} )
dataset.add_faiss_index('''embeddings''' , string_factory='''Flat''' , metric_type=faiss.METRIC_INNER_PRODUCT )
return dataset
def lowerCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
__snake_case = self.get_dummy_dataset()
__snake_case = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , )
with patch('''transformers.models.rag.retrieval_rag.load_dataset''' ) as mock_load_dataset:
__snake_case = dataset
__snake_case = RagRetriever(
lowerCamelCase__ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
return retriever
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE ) -> Optional[int]:
'''simple docstring'''
__snake_case = self.get_dummy_dataset()
__snake_case = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='''custom''' , )
if from_disk:
__snake_case = os.path.join(self.tmpdirname , '''dataset''' )
__snake_case = os.path.join(self.tmpdirname , '''index.faiss''' )
dataset.get_index('''embeddings''' ).save(os.path.join(self.tmpdirname , '''index.faiss''' ) )
dataset.drop_index('''embeddings''' )
dataset.save_to_disk(os.path.join(self.tmpdirname , '''dataset''' ) )
del dataset
__snake_case = RagRetriever(
lowerCamelCase__ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
else:
__snake_case = RagRetriever(
lowerCamelCase__ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , index=CustomHFIndex(config.retrieval_vector_size , lowerCamelCase__ ) , )
return retriever
def lowerCAmelCase ( self ) -> List[str]:
'''simple docstring'''
__snake_case = Dataset.from_dict(
{
'''id''': ['''0''', '''1'''],
'''text''': ['''foo''', '''bar'''],
'''title''': ['''Foo''', '''Bar'''],
'''embeddings''': [np.ones(self.retrieval_vector_size + 1 ), 2 * np.ones(self.retrieval_vector_size + 1 )],
} )
dataset.add_faiss_index('''embeddings''' , string_factory='''Flat''' , metric_type=faiss.METRIC_INNER_PRODUCT )
__snake_case = os.path.join(self.tmpdirname , '''hf_bert_base.hnswSQ8_correct_phi_128.c_index''' )
dataset.save_faiss_index('''embeddings''' , index_file_name + '''.index.dpr''' )
pickle.dump(dataset['''id'''] , open(index_file_name + '''.index_meta.dpr''' , '''wb''' ) )
__snake_case = os.path.join(self.tmpdirname , '''psgs_w100.tsv.pkl''' )
__snake_case = {sample['''id''']: [sample['''text'''], sample['''title''']] for sample in dataset}
pickle.dump(lowerCamelCase__ , open(lowerCamelCase__ , '''wb''' ) )
__snake_case = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='''legacy''' , index_path=self.tmpdirname , )
__snake_case = RagRetriever(
lowerCamelCase__ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() )
return retriever
def lowerCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
__snake_case = 1
__snake_case = self.get_dummy_canonical_hf_index_retriever()
__snake_case = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__snake_case , __snake_case , __snake_case = retriever.retrieve(lowerCamelCase__ , n_docs=lowerCamelCase__ )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(lowerCamelCase__ ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''id'''] ) , lowerCamelCase__ )
self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def lowerCAmelCase ( self ) -> Dict:
'''simple docstring'''
__snake_case = self.get_dummy_canonical_hf_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
with patch('''transformers.models.rag.retrieval_rag.load_dataset''' ) as mock_load_dataset:
__snake_case = self.get_dummy_dataset()
retriever.save_pretrained(lowerCamelCase__ )
__snake_case = RagRetriever.from_pretrained(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
__snake_case = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__snake_case = retriever.retrieve(lowerCamelCase__ , n_docs=1 )
self.assertTrue(out is not None )
def lowerCAmelCase ( self ) -> List[str]:
'''simple docstring'''
__snake_case = 1
__snake_case = self.get_dummy_custom_hf_index_retriever(from_disk=lowerCamelCase__ )
__snake_case = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__snake_case , __snake_case , __snake_case = retriever.retrieve(lowerCamelCase__ , n_docs=lowerCamelCase__ )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(lowerCamelCase__ ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''id'''] ) , lowerCamelCase__ )
self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def lowerCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
__snake_case = self.get_dummy_custom_hf_index_retriever(from_disk=lowerCamelCase__ )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(lowerCamelCase__ )
__snake_case = RagRetriever.from_pretrained(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
__snake_case = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__snake_case = retriever.retrieve(lowerCamelCase__ , n_docs=1 )
self.assertTrue(out is not None )
def lowerCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
__snake_case = 1
__snake_case = self.get_dummy_custom_hf_index_retriever(from_disk=lowerCamelCase__ )
__snake_case = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__snake_case , __snake_case , __snake_case = retriever.retrieve(lowerCamelCase__ , n_docs=lowerCamelCase__ )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(lowerCamelCase__ ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''id'''] ) , lowerCamelCase__ )
self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def lowerCAmelCase ( self ) -> Dict:
'''simple docstring'''
__snake_case = self.get_dummy_custom_hf_index_retriever(from_disk=lowerCamelCase__ )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(lowerCamelCase__ )
__snake_case = RagRetriever.from_pretrained(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
__snake_case = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__snake_case = retriever.retrieve(lowerCamelCase__ , n_docs=1 )
self.assertTrue(out is not None )
def lowerCAmelCase ( self ) -> Any:
'''simple docstring'''
__snake_case = 1
__snake_case = self.get_dummy_legacy_index_retriever()
__snake_case = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__snake_case , __snake_case , __snake_case = retriever.retrieve(lowerCamelCase__ , n_docs=lowerCamelCase__ )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(lowerCamelCase__ ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''text'''] ) , lowerCamelCase__ )
self.assertEqual(doc_dicts[0]['''text'''][0] , '''bar''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''text'''][0] , '''foo''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def lowerCAmelCase ( self ) -> Dict:
'''simple docstring'''
__snake_case = self.get_dummy_legacy_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(lowerCamelCase__ )
__snake_case = RagRetriever.from_pretrained(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
__snake_case = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__snake_case = retriever.retrieve(lowerCamelCase__ , n_docs=1 )
self.assertTrue(out is not None )
@require_torch
@require_tokenizers
@require_sentencepiece
def lowerCAmelCase ( self ) -> Tuple:
'''simple docstring'''
import torch
__snake_case = 1
__snake_case = self.get_dummy_canonical_hf_index_retriever()
__snake_case = [[5, 7], [10, 11]]
__snake_case = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__snake_case = retriever(lowerCamelCase__ , lowerCamelCase__ , prefix=retriever.config.generator.prefix , n_docs=lowerCamelCase__ )
__snake_case , __snake_case , __snake_case = (
out['''context_input_ids'''],
out['''context_attention_mask'''],
out['''retrieved_doc_embeds'''],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , np.ndarray )
__snake_case = retriever(
lowerCamelCase__ , lowerCamelCase__ , prefix=retriever.config.generator.prefix , n_docs=lowerCamelCase__ , return_tensors='''pt''' , )
__snake_case , __snake_case , __snake_case , __snake_case = ( # noqa: F841
out['''context_input_ids'''],
out['''context_attention_mask'''],
out['''retrieved_doc_embeds'''],
out['''doc_ids'''],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(lowerCamelCase__ , torch.Tensor )
self.assertIsInstance(lowerCamelCase__ , torch.Tensor )
self.assertIsInstance(lowerCamelCase__ , torch.Tensor )
@require_torch
@require_tokenizers
@require_sentencepiece
def lowerCAmelCase ( self ) -> str:
'''simple docstring'''
__snake_case = self.get_dpr_ctx_encoder_tokenizer()
__snake_case = 1
__snake_case = self.get_dummy_custom_hf_index_retriever(from_disk=lowerCamelCase__ )
retriever.set_ctx_encoder_tokenizer(lowerCamelCase__ )
__snake_case = [[5, 7], [10, 11]]
__snake_case = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__snake_case = retriever(lowerCamelCase__ , lowerCamelCase__ , prefix=retriever.config.generator.prefix , n_docs=lowerCamelCase__ )
self.assertEqual(
len(lowerCamelCase__ ) , 6 ) # check whether the retriever output consist of 6 attributes including tokenized docs
self.assertEqual(
all(k in out for k in ('''tokenized_doc_ids''', '''tokenized_doc_attention_mask''') ) , lowerCamelCase__ ) # check for doc token related keys in dictionary.
| 24
|
'''simple docstring'''
from typing import Any, Callable, Dict, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
__A = "CompVis/stable-diffusion-v1-1"
__A = "CompVis/stable-diffusion-v1-2"
__A = "CompVis/stable-diffusion-v1-3"
__A = "CompVis/stable-diffusion-v1-4"
class A ( __UpperCAmelCase ):
def __init__( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = True , ) -> Optional[int]:
'''simple docstring'''
super()._init_()
lowercase__ = StableDiffusionPipeline.from_pretrained(lowerCamelCase__ )
lowercase__ = StableDiffusionPipeline.from_pretrained(lowerCamelCase__ )
lowercase__ = StableDiffusionPipeline.from_pretrained(lowerCamelCase__ )
lowercase__ = StableDiffusionPipeline(
vae=lowerCamelCase__ , text_encoder=lowerCamelCase__ , tokenizer=lowerCamelCase__ , unet=lowerCamelCase__ , scheduler=lowerCamelCase__ , safety_checker=lowerCamelCase__ , feature_extractor=lowerCamelCase__ , requires_safety_checker=lowerCamelCase__ , )
self.register_modules(pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea )
@property
def A__ ( self ) -> Dict[str, Any]:
'''simple docstring'''
return {k: getattr(self , lowerCamelCase__ ) for k in self.config.keys() if not k.startswith("""_""" )}
def A__ ( self , lowerCamelCase__ = "auto" ) -> int:
'''simple docstring'''
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
lowercase__ = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(lowerCamelCase__ )
def A__ ( self ) -> Dict:
'''simple docstring'''
self.enable_attention_slicing(lowerCamelCase__ )
@torch.no_grad()
def A__ ( self , lowerCamelCase__ , lowerCamelCase__ = 512 , lowerCamelCase__ = 512 , lowerCamelCase__ = 50 , lowerCamelCase__ = 7.5 , lowerCamelCase__ = None , lowerCamelCase__ = 1 , lowerCamelCase__ = 0.0 , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = "pil" , lowerCamelCase__ = True , lowerCamelCase__ = None , lowerCamelCase__ = 1 , **lowerCamelCase__ , ) -> str:
'''simple docstring'''
return self.pipea(
prompt=lowerCamelCase__ , height=lowerCamelCase__ , width=lowerCamelCase__ , num_inference_steps=lowerCamelCase__ , guidance_scale=lowerCamelCase__ , negative_prompt=lowerCamelCase__ , num_images_per_prompt=lowerCamelCase__ , eta=lowerCamelCase__ , generator=lowerCamelCase__ , latents=lowerCamelCase__ , output_type=lowerCamelCase__ , return_dict=lowerCamelCase__ , callback=lowerCamelCase__ , callback_steps=lowerCamelCase__ , **lowerCamelCase__ , )
@torch.no_grad()
def A__ ( self , lowerCamelCase__ , lowerCamelCase__ = 512 , lowerCamelCase__ = 512 , lowerCamelCase__ = 50 , lowerCamelCase__ = 7.5 , lowerCamelCase__ = None , lowerCamelCase__ = 1 , lowerCamelCase__ = 0.0 , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = "pil" , lowerCamelCase__ = True , lowerCamelCase__ = None , lowerCamelCase__ = 1 , **lowerCamelCase__ , ) -> str:
'''simple docstring'''
return self.pipea(
prompt=lowerCamelCase__ , height=lowerCamelCase__ , width=lowerCamelCase__ , num_inference_steps=lowerCamelCase__ , guidance_scale=lowerCamelCase__ , negative_prompt=lowerCamelCase__ , num_images_per_prompt=lowerCamelCase__ , eta=lowerCamelCase__ , generator=lowerCamelCase__ , latents=lowerCamelCase__ , output_type=lowerCamelCase__ , return_dict=lowerCamelCase__ , callback=lowerCamelCase__ , callback_steps=lowerCamelCase__ , **lowerCamelCase__ , )
@torch.no_grad()
def A__ ( self , lowerCamelCase__ , lowerCamelCase__ = 512 , lowerCamelCase__ = 512 , lowerCamelCase__ = 50 , lowerCamelCase__ = 7.5 , lowerCamelCase__ = None , lowerCamelCase__ = 1 , lowerCamelCase__ = 0.0 , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = "pil" , lowerCamelCase__ = True , lowerCamelCase__ = None , lowerCamelCase__ = 1 , **lowerCamelCase__ , ) -> Optional[Any]:
'''simple docstring'''
return self.pipea(
prompt=lowerCamelCase__ , height=lowerCamelCase__ , width=lowerCamelCase__ , num_inference_steps=lowerCamelCase__ , guidance_scale=lowerCamelCase__ , negative_prompt=lowerCamelCase__ , num_images_per_prompt=lowerCamelCase__ , eta=lowerCamelCase__ , generator=lowerCamelCase__ , latents=lowerCamelCase__ , output_type=lowerCamelCase__ , return_dict=lowerCamelCase__ , callback=lowerCamelCase__ , callback_steps=lowerCamelCase__ , **lowerCamelCase__ , )
@torch.no_grad()
def A__ ( self , lowerCamelCase__ , lowerCamelCase__ = 512 , lowerCamelCase__ = 512 , lowerCamelCase__ = 50 , lowerCamelCase__ = 7.5 , lowerCamelCase__ = None , lowerCamelCase__ = 1 , lowerCamelCase__ = 0.0 , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = "pil" , lowerCamelCase__ = True , lowerCamelCase__ = None , lowerCamelCase__ = 1 , **lowerCamelCase__ , ) -> Any:
'''simple docstring'''
return self.pipea(
prompt=lowerCamelCase__ , height=lowerCamelCase__ , width=lowerCamelCase__ , num_inference_steps=lowerCamelCase__ , guidance_scale=lowerCamelCase__ , negative_prompt=lowerCamelCase__ , num_images_per_prompt=lowerCamelCase__ , eta=lowerCamelCase__ , generator=lowerCamelCase__ , latents=lowerCamelCase__ , output_type=lowerCamelCase__ , return_dict=lowerCamelCase__ , callback=lowerCamelCase__ , callback_steps=lowerCamelCase__ , **lowerCamelCase__ , )
@torch.no_grad()
def A__ ( self , lowerCamelCase__ , lowerCamelCase__ = 512 , lowerCamelCase__ = 512 , lowerCamelCase__ = 50 , lowerCamelCase__ = 7.5 , lowerCamelCase__ = None , lowerCamelCase__ = 1 , lowerCamelCase__ = 0.0 , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = "pil" , lowerCamelCase__ = True , lowerCamelCase__ = None , lowerCamelCase__ = 1 , **lowerCamelCase__ , ) -> Optional[int]:
'''simple docstring'''
lowercase__ = """cuda""" if torch.cuda.is_available() else """cpu"""
self.to(lowerCamelCase__ )
# Checks if the height and width are divisible by 8 or not
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F'''`height` and `width` must be divisible by 8 but are {height} and {width}.''' )
# Get first result from Stable Diffusion Checkpoint v1.1
lowercase__ = self.textaimg_sda_a(
prompt=lowerCamelCase__ , height=lowerCamelCase__ , width=lowerCamelCase__ , num_inference_steps=lowerCamelCase__ , guidance_scale=lowerCamelCase__ , negative_prompt=lowerCamelCase__ , num_images_per_prompt=lowerCamelCase__ , eta=lowerCamelCase__ , generator=lowerCamelCase__ , latents=lowerCamelCase__ , output_type=lowerCamelCase__ , return_dict=lowerCamelCase__ , callback=lowerCamelCase__ , callback_steps=lowerCamelCase__ , **lowerCamelCase__ , )
# Get first result from Stable Diffusion Checkpoint v1.2
lowercase__ = self.textaimg_sda_a(
prompt=lowerCamelCase__ , height=lowerCamelCase__ , width=lowerCamelCase__ , num_inference_steps=lowerCamelCase__ , guidance_scale=lowerCamelCase__ , negative_prompt=lowerCamelCase__ , num_images_per_prompt=lowerCamelCase__ , eta=lowerCamelCase__ , generator=lowerCamelCase__ , latents=lowerCamelCase__ , output_type=lowerCamelCase__ , return_dict=lowerCamelCase__ , callback=lowerCamelCase__ , callback_steps=lowerCamelCase__ , **lowerCamelCase__ , )
# Get first result from Stable Diffusion Checkpoint v1.3
lowercase__ = self.textaimg_sda_a(
prompt=lowerCamelCase__ , height=lowerCamelCase__ , width=lowerCamelCase__ , num_inference_steps=lowerCamelCase__ , guidance_scale=lowerCamelCase__ , negative_prompt=lowerCamelCase__ , num_images_per_prompt=lowerCamelCase__ , eta=lowerCamelCase__ , generator=lowerCamelCase__ , latents=lowerCamelCase__ , output_type=lowerCamelCase__ , return_dict=lowerCamelCase__ , callback=lowerCamelCase__ , callback_steps=lowerCamelCase__ , **lowerCamelCase__ , )
# Get first result from Stable Diffusion Checkpoint v1.4
lowercase__ = self.textaimg_sda_a(
prompt=lowerCamelCase__ , height=lowerCamelCase__ , width=lowerCamelCase__ , num_inference_steps=lowerCamelCase__ , guidance_scale=lowerCamelCase__ , negative_prompt=lowerCamelCase__ , num_images_per_prompt=lowerCamelCase__ , eta=lowerCamelCase__ , generator=lowerCamelCase__ , latents=lowerCamelCase__ , output_type=lowerCamelCase__ , return_dict=lowerCamelCase__ , callback=lowerCamelCase__ , callback_steps=lowerCamelCase__ , **lowerCamelCase__ , )
# Get all result images into a single list and pass it via StableDiffusionPipelineOutput for final result
return StableDiffusionPipelineOutput([resa[0], resa[0], resa[0], resa[0]] )
| 325
| 0
|
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
UpperCAmelCase_ = logging.get_logger(__name__)
@add_end_docstrings(_a )
class lowerCAmelCase ( _a ):
def __init__( self , *lowerCAmelCase__ , **lowerCAmelCase__ ):
super().__init__(*lowerCAmelCase__ , **lowerCAmelCase__ )
requires_backends(self , 'vision' )
self.check_model_type(
TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
if self.framework == 'tf'
else MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING )
def a__ ( self , lowerCAmelCase__=None ):
_A= {}
if top_k is not None:
_A= top_k
return {}, {}, postprocess_params
def __call__( self , lowerCAmelCase__ , **lowerCAmelCase__ ):
return super().__call__(lowerCAmelCase__ , **lowerCAmelCase__ )
def a__ ( self , lowerCAmelCase__ ):
_A= load_image(lowerCAmelCase__ )
_A= self.image_processor(images=lowerCAmelCase__ , return_tensors=self.framework )
return model_inputs
def a__ ( self , lowerCAmelCase__ ):
_A= self.model(**lowerCAmelCase__ )
return model_outputs
def a__ ( self , lowerCAmelCase__ , lowerCAmelCase__=5 ):
if top_k > self.model.config.num_labels:
_A= self.model.config.num_labels
if self.framework == "pt":
_A= model_outputs.logits.softmax(-1 )[0]
_A, _A= probs.topk(lowerCAmelCase__ )
elif self.framework == "tf":
_A= stable_softmax(model_outputs.logits , axis=-1 )[0]
_A= tf.math.top_k(lowerCAmelCase__ , k=lowerCAmelCase__ )
_A, _A= topk.values.numpy(), topk.indices.numpy()
else:
raise ValueError(f"Unsupported framework: {self.framework}" )
_A= scores.tolist()
_A= ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(lowerCAmelCase__ , lowerCAmelCase__ )]
| 702
|
import os
import tempfile
import unittest
import uuid
from pathlib import Path
from transformers.testing_utils import get_tests_dir, require_soundfile, require_torch, require_vision
from transformers.tools.agent_types import AgentAudio, AgentImage, AgentText
from transformers.utils import is_soundfile_availble, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_soundfile_availble():
import soundfile as sf
if is_vision_available():
from PIL import Image
def UpperCamelCase ( lowerCAmelCase_="" ) -> str:
'''simple docstring'''
_A= tempfile.mkdtemp()
return os.path.join(lowerCAmelCase_ , str(uuid.uuida() ) + suffix )
@require_soundfile
@require_torch
class lowerCAmelCase ( unittest.TestCase ):
def a__ ( self ):
_A= torch.rand(12 , dtype=torch.floataa ) - 0.5
_A= AgentAudio(lowerCAmelCase__ )
_A= str(agent_type.to_string() )
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(lowerCAmelCase__ , agent_type.to_raw() , atol=1E-4 ) )
del agent_type
# Ensure the path remains even after the object deletion
self.assertTrue(os.path.exists(lowerCAmelCase__ ) )
# Ensure that the file contains the same value as the original tensor
_A, _A= sf.read(lowerCAmelCase__ )
self.assertTrue(torch.allclose(lowerCAmelCase__ , torch.tensor(lowerCAmelCase__ ) , atol=1E-4 ) )
def a__ ( self ):
_A= torch.rand(12 , dtype=torch.floataa ) - 0.5
_A= get_new_path(suffix='.wav' )
sf.write(lowerCAmelCase__ , lowerCAmelCase__ , 16000 )
_A= AgentAudio(lowerCAmelCase__ )
self.assertTrue(torch.allclose(lowerCAmelCase__ , agent_type.to_raw() , atol=1E-4 ) )
self.assertEqual(agent_type.to_string() , lowerCAmelCase__ )
@require_vision
@require_torch
class lowerCAmelCase ( unittest.TestCase ):
def a__ ( self ):
_A= torch.randint(0 , 256 , (64, 64, 3) )
_A= AgentImage(lowerCAmelCase__ )
_A= str(agent_type.to_string() )
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(lowerCAmelCase__ , agent_type._tensor , atol=1E-4 ) )
self.assertIsInstance(agent_type.to_raw() , Image.Image )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(lowerCAmelCase__ ) )
def a__ ( self ):
_A= Path(get_tests_dir('fixtures/tests_samples/COCO' ) ) / '000000039769.png'
_A= Image.open(lowerCAmelCase__ )
_A= AgentImage(lowerCAmelCase__ )
self.assertTrue(path.samefile(agent_type.to_string() ) )
self.assertTrue(image == agent_type.to_raw() )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(lowerCAmelCase__ ) )
def a__ ( self ):
_A= Path(get_tests_dir('fixtures/tests_samples/COCO' ) ) / '000000039769.png'
_A= Image.open(lowerCAmelCase__ )
_A= AgentImage(lowerCAmelCase__ )
self.assertFalse(path.samefile(agent_type.to_string() ) )
self.assertTrue(image == agent_type.to_raw() )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(lowerCAmelCase__ ) )
class lowerCAmelCase ( unittest.TestCase ):
def a__ ( self ):
_A= 'Hey!'
_A= AgentText(lowerCAmelCase__ )
self.assertEqual(lowerCAmelCase__ , agent_type.to_string() )
self.assertEqual(lowerCAmelCase__ , agent_type.to_raw() )
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
| 476
| 0
|
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class lowerCAmelCase_ ( lowercase ):
"""simple docstring"""
_snake_case : List[Any] = """ClapFeatureExtractor"""
_snake_case : int = ("""RobertaTokenizer""", """RobertaTokenizerFast""")
def __init__( self :Optional[Any] , lowerCamelCase__ :List[str] , lowerCamelCase__ :List[Any] ):
super().__init__(lowerCamelCase__ , lowerCamelCase__ )
def __call__( self :List[Any] , lowerCamelCase__ :List[Any]=None , lowerCamelCase__ :Optional[int]=None , lowerCamelCase__ :Dict=None , **lowerCamelCase__ :Optional[Any] ):
UpperCamelCase__ :List[str] = kwargs.pop("""sampling_rate""" , lowerCamelCase__ )
if text is None and audios is None:
raise ValueError("""You have to specify either text or audios. Both cannot be none.""" )
if text is not None:
UpperCamelCase__ :Optional[Any] = self.tokenizer(lowerCamelCase__ , return_tensors=lowerCamelCase__ , **lowerCamelCase__ )
if audios is not None:
UpperCamelCase__ :str = self.feature_extractor(
lowerCamelCase__ , sampling_rate=lowerCamelCase__ , return_tensors=lowerCamelCase__ , **lowerCamelCase__ )
if text is not None and audios is not None:
UpperCamelCase__ :Tuple = audio_features.input_features
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**lowerCamelCase__ ) , tensor_type=lowerCamelCase__ )
def __a ( self :Any , *lowerCamelCase__ :List[str] , **lowerCamelCase__ :Any ):
return self.tokenizer.batch_decode(*lowerCamelCase__ , **lowerCamelCase__ )
def __a ( self :int , *lowerCamelCase__ :Any , **lowerCamelCase__ :Optional[int] ):
return self.tokenizer.decode(*lowerCamelCase__ , **lowerCamelCase__ )
@property
def __a ( self :Tuple ):
UpperCamelCase__ :List[str] = self.tokenizer.model_input_names
UpperCamelCase__ :Tuple = self.feature_extractor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + feature_extractor_input_names ) )
| 45
|
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
UpperCamelCase = False
class lowerCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
pass
@nightly
@require_torch_gpu
class lowerCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __a ( self :Union[str, Any] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __a ( self :List[Any] ):
UpperCamelCase__ :List[str] = VersatileDiffusionPipeline.from_pretrained("""shi-labs/versatile-diffusion""" , torch_dtype=torch.floataa )
pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
UpperCamelCase__ :Dict = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg""" )
UpperCamelCase__ :Any = torch.manual_seed(0 )
UpperCamelCase__ :Optional[int] = pipe.dual_guided(
prompt="""first prompt""" , image=lowerCamelCase__ , text_to_image_strength=0.75 , generator=lowerCamelCase__ , guidance_scale=7.5 , num_inference_steps=2 , output_type="""numpy""" , ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(lowerCamelCase__ )
UpperCamelCase__ :List[str] = VersatileDiffusionPipeline.from_pretrained(lowerCamelCase__ , torch_dtype=torch.floataa )
pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
UpperCamelCase__ :str = generator.manual_seed(0 )
UpperCamelCase__ :str = pipe.dual_guided(
prompt="""first prompt""" , image=lowerCamelCase__ , text_to_image_strength=0.75 , generator=lowerCamelCase__ , guidance_scale=7.5 , num_inference_steps=2 , output_type="""numpy""" , ).images
assert np.abs(image - new_image ).sum() < 1e-5, "Models don't have the same forward pass"
def __a ( self :Dict ):
UpperCamelCase__ :List[Any] = VersatileDiffusionPipeline.from_pretrained("""shi-labs/versatile-diffusion""" , torch_dtype=torch.floataa )
pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
UpperCamelCase__ :Optional[Any] = """cyberpunk 2077"""
UpperCamelCase__ :str = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg""" )
UpperCamelCase__ :str = torch.manual_seed(0 )
UpperCamelCase__ :Dict = pipe.dual_guided(
prompt=lowerCamelCase__ , image=lowerCamelCase__ , text_to_image_strength=0.75 , generator=lowerCamelCase__ , guidance_scale=7.5 , num_inference_steps=50 , output_type="""numpy""" , ).images
UpperCamelCase__ :Tuple = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
UpperCamelCase__ :Any = np.array([0.1448, 0.1619, 0.1741, 0.1086, 0.1147, 0.1128, 0.1199, 0.1165, 0.1001] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
UpperCamelCase__ :List[Any] = """A painting of a squirrel eating a burger """
UpperCamelCase__ :List[str] = torch.manual_seed(0 )
UpperCamelCase__ :Optional[int] = pipe.text_to_image(
prompt=lowerCamelCase__ , generator=lowerCamelCase__ , guidance_scale=7.5 , num_inference_steps=50 , output_type="""numpy""" ).images
UpperCamelCase__ :str = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
UpperCamelCase__ :Union[str, Any] = np.array([0.3367, 0.3169, 0.2656, 0.3870, 0.4790, 0.3796, 0.4009, 0.4878, 0.4778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
UpperCamelCase__ :Optional[int] = pipe.image_variation(lowerCamelCase__ , generator=lowerCamelCase__ , output_type="""numpy""" ).images
UpperCamelCase__ :int = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
UpperCamelCase__ :List[Any] = np.array([0.3076, 0.3123, 0.3284, 0.3782, 0.3770, 0.3894, 0.4297, 0.4331, 0.4456] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
| 45
| 1
|
'''simple docstring'''
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert import BertTokenizer
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
SCREAMING_SNAKE_CASE__ = {
"vocab_file": {
"facebook/dpr-ctx_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-ctx_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-ctx_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-ctx_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json"
),
},
}
SCREAMING_SNAKE_CASE__ = {
"vocab_file": {
"facebook/dpr-question_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-question_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-question_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-question_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json"
),
},
}
SCREAMING_SNAKE_CASE__ = {
"vocab_file": {
"facebook/dpr-reader-single-nq-base": (
"https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-reader-multiset-base": (
"https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-reader-single-nq-base": (
"https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-reader-multiset-base": (
"https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json"
),
},
}
SCREAMING_SNAKE_CASE__ = {
"facebook/dpr-ctx_encoder-single-nq-base": 512,
"facebook/dpr-ctx_encoder-multiset-base": 512,
}
SCREAMING_SNAKE_CASE__ = {
"facebook/dpr-question_encoder-single-nq-base": 512,
"facebook/dpr-question_encoder-multiset-base": 512,
}
SCREAMING_SNAKE_CASE__ = {
"facebook/dpr-reader-single-nq-base": 512,
"facebook/dpr-reader-multiset-base": 512,
}
SCREAMING_SNAKE_CASE__ = {
"facebook/dpr-ctx_encoder-single-nq-base": {"do_lower_case": True},
"facebook/dpr-ctx_encoder-multiset-base": {"do_lower_case": True},
}
SCREAMING_SNAKE_CASE__ = {
"facebook/dpr-question_encoder-single-nq-base": {"do_lower_case": True},
"facebook/dpr-question_encoder-multiset-base": {"do_lower_case": True},
}
SCREAMING_SNAKE_CASE__ = {
"facebook/dpr-reader-single-nq-base": {"do_lower_case": True},
"facebook/dpr-reader-multiset-base": {"do_lower_case": True},
}
class snake_case (UpperCamelCase ):
lowerCAmelCase__ :List[Any] = VOCAB_FILES_NAMES
lowerCAmelCase__ :Union[str, Any] = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ :List[str] = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ :Optional[int] = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
class snake_case (UpperCamelCase ):
lowerCAmelCase__ :Tuple = VOCAB_FILES_NAMES
lowerCAmelCase__ :Dict = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ :Dict = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ :Optional[int] = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
SCREAMING_SNAKE_CASE__ = collections.namedtuple(
"DPRSpanPrediction", ["span_score", "relevance_score", "doc_id", "start_index", "end_index", "text"]
)
SCREAMING_SNAKE_CASE__ = collections.namedtuple("DPRReaderOutput", ["start_logits", "end_logits", "relevance_logits"])
SCREAMING_SNAKE_CASE__ = R"\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n ```\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n ```\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `'tf'`: Return TensorFlow `tf.constant` objects.\n - `'pt'`: Return PyTorch `torch.Tensor` objects.\n - `'np'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer's default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Returns:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n "
@add_start_docstrings(UpperCamelCase )
class snake_case :
def __call__( self ,UpperCAmelCase_ ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = False ,UpperCAmelCase_ = False ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,**UpperCAmelCase_ ,) -> BatchEncoding:
'''simple docstring'''
if titles is None and texts is None:
return super().__call__(
UpperCAmelCase_ ,padding=UpperCAmelCase_ ,truncation=UpperCAmelCase_ ,max_length=UpperCAmelCase_ ,return_tensors=UpperCAmelCase_ ,return_attention_mask=UpperCAmelCase_ ,**UpperCAmelCase_ ,)
elif titles is None or texts is None:
lowercase__ = titles if texts is None else texts
return super().__call__(
UpperCAmelCase_ ,UpperCAmelCase_ ,padding=UpperCAmelCase_ ,truncation=UpperCAmelCase_ ,max_length=UpperCAmelCase_ ,return_tensors=UpperCAmelCase_ ,return_attention_mask=UpperCAmelCase_ ,**UpperCAmelCase_ ,)
lowercase__ = titles if not isinstance(UpperCAmelCase_ ,UpperCAmelCase_ ) else [titles]
lowercase__ = texts if not isinstance(UpperCAmelCase_ ,UpperCAmelCase_ ) else [texts]
lowercase__ = len(UpperCAmelCase_ )
lowercase__ = questions if not isinstance(UpperCAmelCase_ ,UpperCAmelCase_ ) else [questions] * n_passages
if len(UpperCAmelCase_ ) != len(UpperCAmelCase_ ):
raise ValueError(
F'''There should be as many titles than texts but got {len(UpperCAmelCase_ )} titles and {len(UpperCAmelCase_ )} texts.''' )
lowercase__ = super().__call__(UpperCAmelCase_ ,UpperCAmelCase_ ,padding=UpperCAmelCase_ ,truncation=UpperCAmelCase_ )["input_ids"]
lowercase__ = super().__call__(UpperCAmelCase_ ,add_special_tokens=UpperCAmelCase_ ,padding=UpperCAmelCase_ ,truncation=UpperCAmelCase_ )["input_ids"]
lowercase__ = {
"input_ids": [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(UpperCAmelCase_ ,UpperCAmelCase_ )
]
}
if return_attention_mask is not False:
lowercase__ = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
lowercase__ = attention_mask
return self.pad(UpperCAmelCase_ ,padding=UpperCAmelCase_ ,max_length=UpperCAmelCase_ ,return_tensors=UpperCAmelCase_ )
def _a ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ = 16 ,UpperCAmelCase_ = 64 ,UpperCAmelCase_ = 4 ,) -> List[DPRSpanPrediction]:
'''simple docstring'''
lowercase__ = reader_input["input_ids"]
lowercase__ , lowercase__ , lowercase__ = reader_output[:3]
lowercase__ = len(UpperCAmelCase_ )
lowercase__ = sorted(range(UpperCAmelCase_ ) ,reverse=UpperCAmelCase_ ,key=relevance_logits.__getitem__ )
lowercase__ = []
for doc_id in sorted_docs:
lowercase__ = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
lowercase__ = sequence_ids.index(self.sep_token_id ,2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
lowercase__ = sequence_ids.index(self.pad_token_id )
else:
lowercase__ = len(UpperCAmelCase_ )
lowercase__ = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] ,end_logits=end_logits[doc_id][passage_offset:sequence_len] ,max_answer_length=UpperCAmelCase_ ,top_spans=UpperCAmelCase_ ,)
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] ,relevance_score=relevance_logits[doc_id] ,doc_id=UpperCAmelCase_ ,start_index=UpperCAmelCase_ ,end_index=UpperCAmelCase_ ,text=self.decode(sequence_ids[start_index : end_index + 1] ) ,) )
if len(UpperCAmelCase_ ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def _a ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,) -> List[DPRSpanPrediction]:
'''simple docstring'''
lowercase__ = []
for start_index, start_score in enumerate(UpperCAmelCase_ ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
lowercase__ = sorted(UpperCAmelCase_ ,key=lambda UpperCAmelCase_ : x[1] ,reverse=UpperCAmelCase_ )
lowercase__ = []
for (start_index, end_index), score in scores:
if start_index > end_index:
raise ValueError(F'''Wrong span indices: [{start_index}:{end_index}]''' )
lowercase__ = end_index - start_index + 1
if length > max_answer_length:
raise ValueError(F'''Span is too long: {length} > {max_answer_length}''' )
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(UpperCAmelCase_ ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(UpperCamelCase )
class snake_case (UpperCamelCase , UpperCamelCase ):
lowerCAmelCase__ :int = VOCAB_FILES_NAMES
lowerCAmelCase__ :Tuple = READER_PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ :Optional[Any] = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ :Union[str, Any] = READER_PRETRAINED_INIT_CONFIGURATION
lowerCAmelCase__ :Tuple = ["input_ids", "attention_mask"]
| 704
|
'''simple docstring'''
def lowerCamelCase ( _snake_case : list[int] ,_snake_case : list[int] ):
'''simple docstring'''
lowercase__ = len(_snake_case )
print("The following activities are selected:" )
# The first activity is always selected
lowercase__ = 0
print(_snake_case ,end="," )
# Consider rest of the activities
for j in range(_snake_case ):
# If this activity has start time greater than
# or equal to the finish time of previously
# selected activity, then select it
if start[j] >= finish[i]:
print(_snake_case ,end="," )
lowercase__ = j
if __name__ == "__main__":
import doctest
doctest.testmod()
SCREAMING_SNAKE_CASE__ = [1, 3, 0, 5, 8, 5]
SCREAMING_SNAKE_CASE__ = [2, 4, 6, 7, 9, 9]
print_max_activities(start, finish)
| 539
| 0
|
import inspect
import unittest
import torch
import torch.nn as nn
from accelerate.hooks import (
AlignDevicesHook,
ModelHook,
SequentialHook,
add_hook_to_module,
attach_align_device_hook,
remove_hook_from_module,
remove_hook_from_submodules,
)
from accelerate.test_utils import require_multi_gpu
class UpperCAmelCase_ ( nn.Module ):
'''simple docstring'''
def __init__( self ):
'''simple docstring'''
super().__init__()
__SCREAMING_SNAKE_CASE = nn.Linear(3 , 4 )
__SCREAMING_SNAKE_CASE = nn.BatchNormad(4 )
__SCREAMING_SNAKE_CASE = nn.Linear(4 , 5 )
def _A ( self , _A ):
'''simple docstring'''
return self.lineara(self.batchnorm(self.lineara(a_ ) ) )
class UpperCAmelCase_ ( UpperCamelCase_ ):
'''simple docstring'''
def _A ( self , _A , *_A , **_A ):
'''simple docstring'''
return (args[0] + 1,) + args[1:], kwargs
class UpperCAmelCase_ ( UpperCamelCase_ ):
'''simple docstring'''
def _A ( self , _A , _A ):
'''simple docstring'''
return output + 1
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = ModelForTest()
__SCREAMING_SNAKE_CASE = ModelHook()
add_hook_to_module(a_ , a_ )
self.assertEqual(test_model._hf_hook , a_ )
self.assertTrue(hasattr(a_ , '_old_forward' ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , 'forward' )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ['x'] )
remove_hook_from_module(a_ )
self.assertFalse(hasattr(a_ , '_hf_hook' ) )
self.assertFalse(hasattr(a_ , '_old_forward' ) )
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = ModelForTest()
__SCREAMING_SNAKE_CASE = ModelHook()
add_hook_to_module(a_ , a_ )
add_hook_to_module(a_ , a_ , append=a_ )
self.assertEqual(isinstance(test_model._hf_hook , a_ ) , a_ )
self.assertEqual(len(test_model._hf_hook.hooks ) , 2 )
self.assertTrue(hasattr(a_ , '_old_forward' ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , 'forward' )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ['x'] )
remove_hook_from_module(a_ )
self.assertFalse(hasattr(a_ , '_hf_hook' ) )
self.assertFalse(hasattr(a_ , '_old_forward' ) )
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = ModelForTest()
__SCREAMING_SNAKE_CASE = torch.randn(2 , 3 )
__SCREAMING_SNAKE_CASE = test_model(x + 1 )
__SCREAMING_SNAKE_CASE = test_model(x + 2 )
__SCREAMING_SNAKE_CASE = PreForwardHook()
add_hook_to_module(a_ , a_ )
__SCREAMING_SNAKE_CASE = test_model(a_ )
self.assertTrue(torch.allclose(a_ , a_ , atol=1e-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
__SCREAMING_SNAKE_CASE = PreForwardHook()
add_hook_to_module(a_ , a_ )
__SCREAMING_SNAKE_CASE = test_model(a_ )
self.assertTrue(torch.allclose(a_ , a_ , atol=1e-5 ) )
# You need to use the sequential hook to chain two or more hooks
__SCREAMING_SNAKE_CASE = SequentialHook(PreForwardHook() , PreForwardHook() )
add_hook_to_module(a_ , a_ )
__SCREAMING_SNAKE_CASE = test_model(a_ )
assert torch.allclose(a_ , a_ , atol=1e-5 )
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = ModelForTest()
__SCREAMING_SNAKE_CASE = torch.randn(2 , 3 )
__SCREAMING_SNAKE_CASE = test_model(a_ )
__SCREAMING_SNAKE_CASE = PostForwardHook()
add_hook_to_module(a_ , a_ )
__SCREAMING_SNAKE_CASE = test_model(a_ )
self.assertTrue(torch.allclose(a_ , output + 1 , atol=1e-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
__SCREAMING_SNAKE_CASE = PostForwardHook()
add_hook_to_module(a_ , a_ )
__SCREAMING_SNAKE_CASE = test_model(a_ )
self.assertTrue(torch.allclose(a_ , output + 1 , atol=1e-5 ) )
# You need to use the sequential hook to chain two or more hooks
__SCREAMING_SNAKE_CASE = SequentialHook(PostForwardHook() , PostForwardHook() )
add_hook_to_module(a_ , a_ )
__SCREAMING_SNAKE_CASE = test_model(a_ )
assert torch.allclose(a_ , output + 2 , atol=1e-5 )
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = ModelForTest()
__SCREAMING_SNAKE_CASE = torch.randn(2 , 3 )
__SCREAMING_SNAKE_CASE = test_model(a_ )
__SCREAMING_SNAKE_CASE = PostForwardHook()
add_hook_to_module(a_ , a_ )
__SCREAMING_SNAKE_CASE = test_model(a_ )
self.assertTrue(torch.allclose(a_ , output + 1 ) )
self.assertTrue(outputa.requires_grad )
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = test_model(a_ )
self.assertFalse(outputa.requires_grad )
@require_multi_gpu
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
# This will move each submodule on different devices
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=1 ) )
self.assertEqual(model.lineara.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device(0 ) )
self.assertEqual(model.lineara.weight.device , torch.device(1 ) )
# We can still make a forward pass. The input does not need to be on any particular device
__SCREAMING_SNAKE_CASE = torch.randn(2 , 3 )
__SCREAMING_SNAKE_CASE = model(a_ )
self.assertEqual(output.device , torch.device(1 ) )
# We can add a general hook to put back output on same device as input.
add_hook_to_module(a_ , AlignDevicesHook(io_same_device=a_ ) )
__SCREAMING_SNAKE_CASE = torch.randn(2 , 3 ).to(0 )
__SCREAMING_SNAKE_CASE = model(a_ )
self.assertEqual(output.device , torch.device(0 ) )
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
# This will move each submodule on different devices
__SCREAMING_SNAKE_CASE = {'execution_device': 0 if torch.cuda.is_available() else 'cpu', 'offload': True}
add_hook_to_module(model.lineara , AlignDevicesHook(**a_ ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**a_ ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**a_ ) )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('meta' ) )
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
# Buffers are not included in the offload by default, so are on the execution device
__SCREAMING_SNAKE_CASE = torch.device(hook_kwargs['execution_device'] )
self.assertEqual(model.batchnorm.running_mean.device , a_ )
__SCREAMING_SNAKE_CASE = torch.randn(2 , 3 )
__SCREAMING_SNAKE_CASE = model(a_ )
self.assertEqual(output.device , a_ )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
# Now test with buffers included in the offload
__SCREAMING_SNAKE_CASE = {
'execution_device': 0 if torch.cuda.is_available() else 'cpu',
'offload': True,
'offload_buffers': True,
}
add_hook_to_module(model.lineara , AlignDevicesHook(**a_ ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**a_ ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**a_ ) )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('meta' ) )
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device('meta' ) )
__SCREAMING_SNAKE_CASE = torch.randn(2 , 3 )
__SCREAMING_SNAKE_CASE = model(a_ )
self.assertEqual(output.device , a_ )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
# This will move each submodule on different devices
__SCREAMING_SNAKE_CASE = 0 if torch.cuda.is_available() else 'cpu'
attach_align_device_hook(a_ , execution_device=a_ , offload=a_ )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('meta' ) )
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
# Buffers are not included in the offload by default, so are on the execution device
__SCREAMING_SNAKE_CASE = torch.device(a_ )
self.assertEqual(model.batchnorm.running_mean.device , a_ )
__SCREAMING_SNAKE_CASE = torch.randn(2 , 3 )
__SCREAMING_SNAKE_CASE = model(a_ )
self.assertEqual(output.device , a_ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(a_ )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
# Now test with buffers included in the offload
attach_align_device_hook(a_ , execution_device=a_ , offload=a_ , offload_buffers=a_ )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('meta' ) )
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device('meta' ) )
__SCREAMING_SNAKE_CASE = torch.randn(2 , 3 )
__SCREAMING_SNAKE_CASE = model(a_ )
self.assertEqual(output.device , a_ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(a_ )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
# This will move each submodule on different devices
__SCREAMING_SNAKE_CASE = 0 if torch.cuda.is_available() else 'cpu'
attach_align_device_hook(
a_ , execution_device=a_ , offload=a_ , weights_map=model.state_dict() )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('meta' ) )
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
# Buffers are not included in the offload by default, so are on the execution device
__SCREAMING_SNAKE_CASE = torch.device(a_ )
self.assertEqual(model.batchnorm.running_mean.device , a_ )
__SCREAMING_SNAKE_CASE = torch.randn(2 , 3 )
__SCREAMING_SNAKE_CASE = model(a_ )
self.assertEqual(output.device , a_ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(a_ )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
# Now test with buffers included in the offload
attach_align_device_hook(
a_ , execution_device=a_ , offload=a_ , weights_map=model.state_dict() , offload_buffers=a_ , )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('meta' ) )
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device('meta' ) )
__SCREAMING_SNAKE_CASE = torch.randn(2 , 3 )
__SCREAMING_SNAKE_CASE = model(a_ )
self.assertEqual(output.device , a_ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(a_ )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
| 148
|
'''simple docstring'''
import torch
from diffusers import CMStochasticIterativeScheduler
from .test_schedulers import SchedulerCommonTest
class a_ ( snake_case ):
UpperCAmelCase : str = (CMStochasticIterativeScheduler,)
UpperCAmelCase : int = 10
def UpperCamelCase ( self : Dict , **a_ : List[str] ) -> Any:
snake_case: Any ={
'num_train_timesteps': 2_0_1,
'sigma_min': 0.0_0_2,
'sigma_max': 8_0.0,
}
config.update(**a_ )
return config
def UpperCamelCase ( self : List[Any] ) -> List[Any]:
snake_case: Any =1_0
snake_case: List[str] =self.get_scheduler_config()
snake_case: List[Any] =self.scheduler_classes[0](**a_ )
scheduler.set_timesteps(a_ )
snake_case: Dict =scheduler.timesteps[0]
snake_case: Union[str, Any] =scheduler.timesteps[1]
snake_case: List[str] =self.dummy_sample
snake_case: List[str] =0.1 * sample
snake_case: int =scheduler.step(a_ , a_ , a_ ).prev_sample
snake_case: Optional[Any] =scheduler.step(a_ , a_ , a_ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def UpperCamelCase ( self : int ) -> int:
for timesteps in [1_0, 5_0, 1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=a_ )
def UpperCamelCase ( self : Optional[Any] ) -> Dict:
for clip_denoised in [True, False]:
self.check_over_configs(clip_denoised=a_ )
def UpperCamelCase ( self : Tuple ) -> List[str]:
snake_case: List[Any] =self.scheduler_classes[0]
snake_case: List[Any] =self.get_scheduler_config()
snake_case: Any =scheduler_class(**a_ )
snake_case: Dict =1
scheduler.set_timesteps(a_ )
snake_case: List[Any] =scheduler.timesteps
snake_case: Optional[Any] =torch.manual_seed(0 )
snake_case: Optional[Any] =self.dummy_model()
snake_case: List[Any] =self.dummy_sample_deter * scheduler.init_noise_sigma
for i, t in enumerate(a_ ):
# 1. scale model input
snake_case: Any =scheduler.scale_model_input(a_ , a_ )
# 2. predict noise residual
snake_case: List[str] =model(a_ , a_ )
# 3. predict previous sample x_t-1
snake_case: Dict =scheduler.step(a_ , a_ , a_ , generator=a_ ).prev_sample
snake_case: List[Any] =pred_prev_sample
snake_case: Optional[Any] =torch.sum(torch.abs(a_ ) )
snake_case: Optional[Any] =torch.mean(torch.abs(a_ ) )
assert abs(result_sum.item() - 1_9_2.7_6_1_4 ) < 1E-2
assert abs(result_mean.item() - 0.2_5_1_0 ) < 1E-3
def UpperCamelCase ( self : Dict ) -> Union[str, Any]:
snake_case: Dict =self.scheduler_classes[0]
snake_case: Tuple =self.get_scheduler_config()
snake_case: str =scheduler_class(**a_ )
snake_case: List[Any] =[1_0_6, 0]
scheduler.set_timesteps(timesteps=a_ )
snake_case: Optional[Any] =scheduler.timesteps
snake_case: Dict =torch.manual_seed(0 )
snake_case: Optional[int] =self.dummy_model()
snake_case: Any =self.dummy_sample_deter * scheduler.init_noise_sigma
for t in timesteps:
# 1. scale model input
snake_case: List[Any] =scheduler.scale_model_input(a_ , a_ )
# 2. predict noise residual
snake_case: Any =model(a_ , a_ )
# 3. predict previous sample x_t-1
snake_case: List[str] =scheduler.step(a_ , a_ , a_ , generator=a_ ).prev_sample
snake_case: Optional[Any] =pred_prev_sample
snake_case: Union[str, Any] =torch.sum(torch.abs(a_ ) )
snake_case: Tuple =torch.mean(torch.abs(a_ ) )
assert abs(result_sum.item() - 3_4_7.6_3_5_7 ) < 1E-2
assert abs(result_mean.item() - 0.4_5_2_7 ) < 1E-3
def UpperCamelCase ( self : int ) -> Tuple:
snake_case: List[Any] =self.scheduler_classes[0]
snake_case: Union[str, Any] =self.get_scheduler_config()
snake_case: str =scheduler_class(**a_ )
snake_case: str =[3_9, 3_0, 1_2, 1_5, 0]
with self.assertRaises(a_ , msg='`timesteps` must be in descending order.' ):
scheduler.set_timesteps(timesteps=a_ )
def UpperCamelCase ( self : Dict ) -> Optional[int]:
snake_case: Optional[Any] =self.scheduler_classes[0]
snake_case: Dict =self.get_scheduler_config()
snake_case: str =scheduler_class(**a_ )
snake_case: Any =[3_9, 3_0, 1_2, 1, 0]
snake_case: List[Any] =len(a_ )
with self.assertRaises(a_ , msg='Can only pass one of `num_inference_steps` or `timesteps`.' ):
scheduler.set_timesteps(num_inference_steps=a_ , timesteps=a_ )
def UpperCamelCase ( self : Optional[Any] ) -> Tuple:
snake_case: Any =self.scheduler_classes[0]
snake_case: int =self.get_scheduler_config()
snake_case: Optional[Any] =scheduler_class(**a_ )
snake_case: List[Any] =[scheduler.config.num_train_timesteps]
with self.assertRaises(
a_ , msg='`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}' , ):
scheduler.set_timesteps(timesteps=a_ )
| 350
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
a_ : int = logging.get_logger(__name__)
a_ : List[str] = {
'google/bit-50': 'https://huggingface.co/google/bit-50/resolve/main/config.json',
}
class _snake_case ( A__ , A__ ):
_lowercase : Optional[Any] = '''bit'''
_lowercase : Tuple = ['''preactivation''', '''bottleneck''']
_lowercase : List[Any] = ['''SAME''', '''VALID''']
def __init__( self , a=3 , a=64 , a=[256, 512, 1024, 2048] , a=[3, 4, 6, 3] , a="preactivation" , a="relu" , a=None , a=32 , a=0.0 , a=False , a=32 , a=1 , a=None , a=None , **a , ) -> Any:
super().__init__(**a)
if layer_type not in self.layer_types:
raise ValueError(f'''layer_type={layer_type} is not one of {','.join(self.layer_types)}''')
if global_padding is not None:
if global_padding.upper() in self.supported_padding:
SCREAMING_SNAKE_CASE = global_padding.upper()
else:
raise ValueError(f'''Padding strategy {global_padding} not supported''')
SCREAMING_SNAKE_CASE = num_channels
SCREAMING_SNAKE_CASE = embedding_size
SCREAMING_SNAKE_CASE = hidden_sizes
SCREAMING_SNAKE_CASE = depths
SCREAMING_SNAKE_CASE = layer_type
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = global_padding
SCREAMING_SNAKE_CASE = num_groups
SCREAMING_SNAKE_CASE = drop_path_rate
SCREAMING_SNAKE_CASE = embedding_dynamic_padding
SCREAMING_SNAKE_CASE = output_stride
SCREAMING_SNAKE_CASE = width_factor
SCREAMING_SNAKE_CASE = ['stem'] + [f'''stage{idx}''' for idx in range(1 , len(a) + 1)]
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = get_aligned_output_features_output_indices(
out_features=a , out_indices=a , stage_names=self.stage_names)
| 444
|
import tempfile
import unittest
from transformers import TaConfig, is_torch_available
from transformers.testing_utils import (
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel
class _snake_case :
def __init__( self , a , a=99 , a=13 , a=7 , a=9 , a=True , a=True , a=False , a=32 , a=5 , a=4 , a=37 , a=8 , a=0.1 , a=0.0_02 , a=1 , a=0 , a=0 , a=None , a=None , ) -> List[str]:
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = encoder_seq_length
SCREAMING_SNAKE_CASE = decoder_seq_length
# For common tests
SCREAMING_SNAKE_CASE = self.decoder_seq_length
SCREAMING_SNAKE_CASE = is_training
SCREAMING_SNAKE_CASE = use_attention_mask
SCREAMING_SNAKE_CASE = use_labels
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = d_ff
SCREAMING_SNAKE_CASE = relative_attention_num_buckets
SCREAMING_SNAKE_CASE = dropout_rate
SCREAMING_SNAKE_CASE = initializer_factor
SCREAMING_SNAKE_CASE = eos_token_id
SCREAMING_SNAKE_CASE = pad_token_id
SCREAMING_SNAKE_CASE = decoder_start_token_id
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = decoder_layers
def SCREAMING_SNAKE_CASE__ ( self) -> Any:
return TaConfig.from_pretrained('google/umt5-base')
def SCREAMING_SNAKE_CASE__ ( self , a , a , a , a=None , a=None , a=None , a=None , a=None , ) -> Optional[int]:
if attention_mask is None:
SCREAMING_SNAKE_CASE = input_ids.ne(config.pad_token_id)
if decoder_attention_mask is None:
SCREAMING_SNAKE_CASE = decoder_input_ids.ne(config.pad_token_id)
if head_mask is None:
SCREAMING_SNAKE_CASE = torch.ones(config.num_hidden_layers , config.num_attention_heads , device=a)
if decoder_head_mask is None:
SCREAMING_SNAKE_CASE = torch.ones(config.num_decoder_layers , config.num_attention_heads , device=a)
if cross_attn_head_mask is None:
SCREAMING_SNAKE_CASE = torch.ones(
config.num_decoder_layers , config.num_attention_heads , device=a)
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
def SCREAMING_SNAKE_CASE__ ( self) -> Tuple:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.encoder_seq_length] , self.vocab_size)
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size)
# we need to clamp the input ids here to avoid having pad token in between
# this is because for NllbMoe the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
SCREAMING_SNAKE_CASE = input_ids.clamp(self.pad_token_id + 1)
SCREAMING_SNAKE_CASE = decoder_input_ids.clamp(self.pad_token_id + 1)
SCREAMING_SNAKE_CASE = self.get_config()
SCREAMING_SNAKE_CASE = config.num_attention_heads
SCREAMING_SNAKE_CASE = self.prepare_inputs_dict(a , a , a)
return config, input_dict
def SCREAMING_SNAKE_CASE__ ( self) -> List[str]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
return config, inputs_dict
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[int]:
return TaConfig(
vocab_size=166 , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def SCREAMING_SNAKE_CASE__ ( self) -> List[Any]:
return TaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def SCREAMING_SNAKE_CASE__ ( self , a , a , a , a , a , a , ) -> Dict:
SCREAMING_SNAKE_CASE = UMTaModel(config=a)
model.to(a)
model.eval()
SCREAMING_SNAKE_CASE = model(
input_ids=a , decoder_input_ids=a , attention_mask=a , decoder_attention_mask=a , )
SCREAMING_SNAKE_CASE = model(input_ids=a , decoder_input_ids=a)
SCREAMING_SNAKE_CASE = result.last_hidden_state
SCREAMING_SNAKE_CASE = result.past_key_values
SCREAMING_SNAKE_CASE = result.encoder_last_hidden_state
self.parent.assertEqual(encoder_output.size() , (self.batch_size, self.encoder_seq_length, self.hidden_size))
self.parent.assertEqual(decoder_output.size() , (self.batch_size, self.decoder_seq_length, self.hidden_size))
# There should be `num_layers` key value embeddings stored in decoder_past
self.parent.assertEqual(len(a) , config.num_layers)
# There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple
self.parent.assertEqual(len(decoder_past[0]) , 4)
def SCREAMING_SNAKE_CASE__ ( self , a , a , a , a , a , a , ) -> Optional[int]:
SCREAMING_SNAKE_CASE = UMTaModel(config=a).get_decoder().to(a).eval()
# first forward pass
SCREAMING_SNAKE_CASE = model(a , use_cache=a)
SCREAMING_SNAKE_CASE = model(a)
SCREAMING_SNAKE_CASE = model(a , use_cache=a)
self.parent.assertTrue(len(a) == len(a))
self.parent.assertTrue(len(a) == len(a) + 1)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
SCREAMING_SNAKE_CASE = ids_tensor((self.batch_size, 1) , config.vocab_size)
# append to next input_ids and
SCREAMING_SNAKE_CASE = torch.cat([input_ids, next_tokens] , dim=-1)
SCREAMING_SNAKE_CASE = model(a)['last_hidden_state']
SCREAMING_SNAKE_CASE = model(a , past_key_values=a)['last_hidden_state']
# select random slice
SCREAMING_SNAKE_CASE = ids_tensor((1,) , output_from_past.shape[-1]).item()
SCREAMING_SNAKE_CASE = output_from_no_past[:, -1, random_slice_idx].detach()
SCREAMING_SNAKE_CASE = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(a , a , atol=1E-3))
def SCREAMING_SNAKE_CASE__ ( self , a , a , ) -> str:
SCREAMING_SNAKE_CASE = UMTaModel(config=a).to(a).half().eval()
SCREAMING_SNAKE_CASE = model(**a)['last_hidden_state']
self.parent.assertFalse(torch.isnan(a).any().item())
@require_torch
class _snake_case ( A__ , A__ , A__ , unittest.TestCase ):
_lowercase : Any = (
(UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else ()
)
_lowercase : str = (UMTaForConditionalGeneration,) if is_torch_available() else ()
_lowercase : Tuple = (
{
'''conversational''': UMTaForConditionalGeneration,
'''feature-extraction''': UMTaModel,
'''summarization''': UMTaForConditionalGeneration,
'''text2text-generation''': UMTaForConditionalGeneration,
'''translation''': UMTaForConditionalGeneration,
'''question-answering''': UMTaForQuestionAnswering,
}
if is_torch_available()
else {}
)
_lowercase : int = True
_lowercase : List[Any] = False
_lowercase : Union[str, Any] = False
_lowercase : int = True
_lowercase : Any = True
# The small UMT5 model needs higher percentages for CPU/MP tests
_lowercase : int = [0.8, 0.9]
def SCREAMING_SNAKE_CASE__ ( self) -> Tuple:
SCREAMING_SNAKE_CASE = UMTaModelTester(self)
@unittest.skip('Test has a segmentation fault on torch 1.8.0')
def SCREAMING_SNAKE_CASE__ ( self) -> Dict:
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE = UMTaModel(config_and_inputs[0]).to(a)
with tempfile.TemporaryDirectory() as tmpdirname:
torch.onnx.export(
a , (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]) , f'''{tmpdirname}/t5_test.onnx''' , export_params=a , opset_version=9 , input_names=['input_ids', 'decoder_input_ids'] , )
@unittest.skipIf(torch_device == 'cpu' , 'Cant do half precision')
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[Any]:
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_fpaa_forward(*a)
def SCREAMING_SNAKE_CASE__ ( self) -> List[Any]:
SCREAMING_SNAKE_CASE = ['encoder_attentions', 'decoder_attentions', 'cross_attentions']
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE = config_and_inputs[0]
SCREAMING_SNAKE_CASE = UMTaForConditionalGeneration(a).eval()
model.to(a)
SCREAMING_SNAKE_CASE = {
'head_mask': torch.zeros(config.num_layers , config.num_heads , device=a),
'decoder_head_mask': torch.zeros(config.num_decoder_layers , config.num_heads , device=a),
'cross_attn_head_mask': torch.zeros(config.num_decoder_layers , config.num_heads , device=a),
}
for attn_name, (name, mask) in zip(a , head_masking.items()):
SCREAMING_SNAKE_CASE = {name: mask}
# Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified
if name == "head_mask":
SCREAMING_SNAKE_CASE = torch.ones(
config.num_decoder_layers , config.num_heads , device=a)
SCREAMING_SNAKE_CASE = model.generate(
config_and_inputs[1]['input_ids'] , num_beams=1 , max_length=3 , output_attentions=a , return_dict_in_generate=a , **a , )
# We check the state of decoder_attentions and cross_attentions just from the last step
SCREAMING_SNAKE_CASE = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1]
self.assertEqual(sum([w.sum().item() for w in attn_weights]) , 0.0)
@unittest.skip('Does not work on the tiny model as we keep hitting edge cases.')
def SCREAMING_SNAKE_CASE__ ( self) -> int:
pass
@require_torch
@require_sentencepiece
@require_tokenizers
class _snake_case ( unittest.TestCase ):
@slow
@unittest.skip(
'Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged')
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[int]:
SCREAMING_SNAKE_CASE = UMTaForConditionalGeneration.from_pretrained('google/umt5-small' , return_dict=a).to(a)
SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained('google/umt5-small' , use_fast=a , legacy=a)
SCREAMING_SNAKE_CASE = [
'Bonjour monsieur <extra_id_0> bien <extra_id_1>.',
'No se como puedo <extra_id_0>.',
'This is the reason why we <extra_id_0> them.',
'The <extra_id_0> walks in <extra_id_1>, seats',
'A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.',
]
SCREAMING_SNAKE_CASE = tokenizer(a , return_tensors='pt' , padding=a).input_ids
# fmt: off
SCREAMING_SNAKE_CASE = torch.tensor(
[
[ 3_8530, 21_0703, 25_6299, 1410, 25_6298, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 826, 321, 671, 2_5922, 25_6299, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 1460, 339, 312, 1_9014, 1_0620, 758, 25_6299, 2355,274, 1, 0, 0, 0, 0, 0, 0,0, 0],
[ 517, 25_6299, 1_4869, 281, 301, 25_6298, 275, 11_9983,1, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 320, 25_6299, 1_4869, 281, 2234, 289, 2275, 333,6_1391, 289, 25_6298, 543, 25_6297, 16_8714, 329, 25_6296,274, 1],
])
# fmt: on
torch.testing.assert_allclose(a , a)
SCREAMING_SNAKE_CASE = model.generate(input_ids.to(a))
SCREAMING_SNAKE_CASE = [
'<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>',
'<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
'<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
'<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
'<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
]
SCREAMING_SNAKE_CASE = tokenizer.batch_decode(a)
self.assertEqual(a , a)
| 444
| 1
|
'''simple docstring'''
import inspect
import unittest
from math import floor
from transformers import CvtConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import CvtForImageClassification, CvtModel
from transformers.models.cvt.modeling_cvt import CVT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class a ( __lowerCAmelCase ):
"""simple docstring"""
def lowercase_ ( self ):
'''simple docstring'''
__UpperCAmelCase: Union[str, Any] = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(snake_case_ , """embed_dim""" ) )
self.parent.assertTrue(hasattr(snake_case_ , """num_heads""" ) )
class a :
"""simple docstring"""
def __init__( self , snake_case_ , snake_case_=13 , snake_case_=64 , snake_case_=3 , snake_case_=[16, 48, 96] , snake_case_=[1, 3, 6] , snake_case_=[1, 2, 10] , snake_case_=[7, 3, 3] , snake_case_=[4, 2, 2] , snake_case_=[2, 1, 1] , snake_case_=[2, 2, 2] , snake_case_=[False, False, True] , snake_case_=[0.0, 0.0, 0.0] , snake_case_=0.0_2 , snake_case_=1e-1_2 , snake_case_=True , snake_case_=True , snake_case_=2 , ):
'''simple docstring'''
__UpperCAmelCase: int = parent
__UpperCAmelCase: List[Any] = batch_size
__UpperCAmelCase: Dict = image_size
__UpperCAmelCase: List[Any] = patch_sizes
__UpperCAmelCase: Any = patch_stride
__UpperCAmelCase: List[str] = patch_padding
__UpperCAmelCase: List[Any] = is_training
__UpperCAmelCase: str = use_labels
__UpperCAmelCase: Optional[int] = num_labels
__UpperCAmelCase: Union[str, Any] = num_channels
__UpperCAmelCase: int = embed_dim
__UpperCAmelCase: int = num_heads
__UpperCAmelCase: Any = stride_kv
__UpperCAmelCase: Any = depth
__UpperCAmelCase: Optional[Any] = cls_token
__UpperCAmelCase: Optional[int] = attention_drop_rate
__UpperCAmelCase: List[Any] = initializer_range
__UpperCAmelCase: Any = layer_norm_eps
def lowercase_ ( self ):
'''simple docstring'''
__UpperCAmelCase: int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__UpperCAmelCase: Any = None
if self.use_labels:
__UpperCAmelCase: str = ids_tensor([self.batch_size] , self.num_labels )
__UpperCAmelCase: Union[str, Any] = self.get_config()
return config, pixel_values, labels
def lowercase_ ( self ):
'''simple docstring'''
return CvtConfig(
image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , )
def lowercase_ ( self , snake_case_ , snake_case_ , snake_case_ ):
'''simple docstring'''
__UpperCAmelCase: Optional[Any] = CvtModel(config=snake_case_ )
model.to(snake_case_ )
model.eval()
__UpperCAmelCase: Tuple = model(snake_case_ )
__UpperCAmelCase: Tuple = (self.image_size, self.image_size)
__UpperCAmelCase, __UpperCAmelCase: Optional[int] = image_size[0], image_size[1]
for i in range(len(self.depth ) ):
__UpperCAmelCase: int = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
__UpperCAmelCase: List[str] = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width) )
def lowercase_ ( self , snake_case_ , snake_case_ , snake_case_ ):
'''simple docstring'''
__UpperCAmelCase: Dict = self.num_labels
__UpperCAmelCase: str = CvtForImageClassification(snake_case_ )
model.to(snake_case_ )
model.eval()
__UpperCAmelCase: Any = model(snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase_ ( self ):
'''simple docstring'''
__UpperCAmelCase: Dict = self.prepare_config_and_inputs()
__UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase: Optional[Any] = config_and_inputs
__UpperCAmelCase: List[str] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class a ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ):
"""simple docstring"""
__lowerCAmelCase = (CvtModel, CvtForImageClassification) if is_torch_available() else ()
__lowerCAmelCase = (
{"""feature-extraction""": CvtModel, """image-classification""": CvtForImageClassification}
if is_torch_available()
else {}
)
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = False
def lowercase_ ( self ):
'''simple docstring'''
__UpperCAmelCase: Union[str, Any] = CvtModelTester(self )
__UpperCAmelCase: int = ConfigTester(self , config_class=snake_case_ , has_text_modality=snake_case_ , hidden_size=37 )
def lowercase_ ( self ):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowercase_ ( self ):
'''simple docstring'''
return
@unittest.skip(reason="""Cvt does not output attentions""" )
def lowercase_ ( self ):
'''simple docstring'''
pass
@unittest.skip(reason="""Cvt does not use inputs_embeds""" )
def lowercase_ ( self ):
'''simple docstring'''
pass
@unittest.skip(reason="""Cvt does not support input and output embeddings""" )
def lowercase_ ( self ):
'''simple docstring'''
pass
def lowercase_ ( self ):
'''simple docstring'''
__UpperCAmelCase, __UpperCAmelCase: Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase: Any = model_class(snake_case_ )
__UpperCAmelCase: Union[str, Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__UpperCAmelCase: Optional[int] = [*signature.parameters.keys()]
__UpperCAmelCase: Dict = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , snake_case_ )
def lowercase_ ( self ):
'''simple docstring'''
__UpperCAmelCase: List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case_ )
def lowercase_ ( self ):
'''simple docstring'''
def check_hidden_states_output(snake_case_ , snake_case_ , snake_case_ ):
__UpperCAmelCase: Tuple = model_class(snake_case_ )
model.to(snake_case_ )
model.eval()
with torch.no_grad():
__UpperCAmelCase: Dict = model(**self._prepare_for_class(snake_case_ , snake_case_ ) )
__UpperCAmelCase: int = outputs.hidden_states
__UpperCAmelCase: List[Any] = len(self.model_tester.depth )
self.assertEqual(len(snake_case_ ) , snake_case_ )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.embed_dim[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
__UpperCAmelCase, __UpperCAmelCase: List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase: Tuple = True
check_hidden_states_output(snake_case_ , snake_case_ , snake_case_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__UpperCAmelCase: Tuple = True
check_hidden_states_output(snake_case_ , snake_case_ , snake_case_ )
def lowercase_ ( self ):
'''simple docstring'''
__UpperCAmelCase: Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*snake_case_ )
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def lowercase_ ( self ):
'''simple docstring'''
pass
@slow
def lowercase_ ( self ):
'''simple docstring'''
for model_name in CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCAmelCase: str = CvtModel.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
def UpperCamelCase__ ( ) -> Optional[int]:
__UpperCAmelCase: Dict = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class a ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def lowercase_ ( self ):
'''simple docstring'''
return AutoImageProcessor.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def lowercase_ ( self ):
'''simple docstring'''
__UpperCAmelCase: Any = CvtForImageClassification.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(snake_case_ )
__UpperCAmelCase: Any = self.default_image_processor
__UpperCAmelCase: int = prepare_img()
__UpperCAmelCase: int = image_processor(images=snake_case_ , return_tensors="""pt""" ).to(snake_case_ )
# forward pass
with torch.no_grad():
__UpperCAmelCase: Union[str, Any] = model(**snake_case_ )
# verify the logits
__UpperCAmelCase: Any = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , snake_case_ )
__UpperCAmelCase: Any = torch.tensor([0.9_2_8_5, 0.9_0_1_5, -0.3_1_5_0] ).to(snake_case_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , snake_case_ , atol=1e-4 ) )
| 523
|
'''simple docstring'''
import sys
import webbrowser
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
print('Googling.....')
SCREAMING_SNAKE_CASE_ = 'https://www.google.com/search?q=' + ' '.join(sys.argv[1:])
SCREAMING_SNAKE_CASE_ = requests.get(url, headers={'UserAgent': UserAgent().random})
# res.raise_for_status()
with open('project1a.html', 'wb') as out_file: # only for knowing the class
for data in res.iter_content(1_00_00):
out_file.write(data)
SCREAMING_SNAKE_CASE_ = BeautifulSoup(res.text, 'html.parser')
SCREAMING_SNAKE_CASE_ = list(soup.select('.eZt8xd'))[:5]
print(len(links))
for link in links:
if link.text == "Maps":
webbrowser.open(link.get('href'))
else:
webbrowser.open(F"""https://google.com{link.get("href")}""")
| 523
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {'openai-gpt': 'https://huggingface.co/openai-gpt/resolve/main/config.json'}
class A__ ( A ):
"""simple docstring"""
_lowercase : Optional[Any] = '''openai-gpt'''
_lowercase : Dict = {
'''max_position_embeddings''': '''n_positions''',
'''hidden_size''': '''n_embd''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self : Tuple , A_ : int=4_0_4_7_8 , A_ : int=5_1_2 , A_ : Dict=7_6_8 , A_ : Union[str, Any]=1_2 , A_ : int=1_2 , A_ : Dict="gelu" , A_ : Optional[Any]=0.1 , A_ : Union[str, Any]=0.1 , A_ : Any=0.1 , A_ : List[Any]=1E-5 , A_ : Tuple=0.02 , A_ : List[Any]="cls_index" , A_ : Union[str, Any]=True , A_ : Union[str, Any]=None , A_ : Tuple=True , A_ : str=0.1 , **A_ : Optional[Any] , ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = vocab_size
_lowerCAmelCase : Tuple = n_positions
_lowerCAmelCase : Optional[int] = n_embd
_lowerCAmelCase : Dict = n_layer
_lowerCAmelCase : Dict = n_head
_lowerCAmelCase : List[Any] = afn
_lowerCAmelCase : Optional[int] = resid_pdrop
_lowerCAmelCase : Tuple = embd_pdrop
_lowerCAmelCase : Tuple = attn_pdrop
_lowerCAmelCase : Tuple = layer_norm_epsilon
_lowerCAmelCase : Tuple = initializer_range
_lowerCAmelCase : Optional[int] = summary_type
_lowerCAmelCase : List[str] = summary_use_proj
_lowerCAmelCase : Any = summary_activation
_lowerCAmelCase : Dict = summary_first_dropout
_lowerCAmelCase : Tuple = summary_proj_to_labels
super().__init__(**A_ )
| 503
|
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
flip_channel_order,
get_resize_output_image_size,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, is_vision_available, logging
if is_vision_available():
import PIL
if is_torch_available():
import torch
__UpperCAmelCase = logging.get_logger(__name__)
class A__ ( A ):
"""simple docstring"""
_lowercase : List[Any] = ['''pixel_values''']
def __init__( self : Tuple , A_ : bool = True , A_ : Dict[str, int] = None , A_ : PILImageResampling = PILImageResampling.BILINEAR , A_ : bool = True , A_ : Union[int, float] = 1 / 2_5_5 , A_ : bool = True , A_ : Dict[str, int] = None , A_ : bool = True , **A_ : List[Any] , ):
'''simple docstring'''
super().__init__(**A_ )
_lowerCAmelCase : Optional[int] = size if size is not None else {"shortest_edge": 2_2_4}
_lowerCAmelCase : Optional[int] = get_size_dict(A_ , default_to_square=A_ )
_lowerCAmelCase : Any = crop_size if crop_size is not None else {"height": 2_5_6, "width": 2_5_6}
_lowerCAmelCase : str = get_size_dict(A_ , param_name="crop_size" )
_lowerCAmelCase : Any = do_resize
_lowerCAmelCase : Optional[Any] = size
_lowerCAmelCase : str = resample
_lowerCAmelCase : Optional[Any] = do_rescale
_lowerCAmelCase : Dict = rescale_factor
_lowerCAmelCase : Any = do_center_crop
_lowerCAmelCase : List[Any] = crop_size
_lowerCAmelCase : List[Any] = do_flip_channel_order
def __magic_name__ ( self : Tuple , A_ : np.ndarray , A_ : Dict[str, int] , A_ : PILImageResampling = PIL.Image.BILINEAR , A_ : Optional[Union[str, ChannelDimension]] = None , **A_ : Dict , ):
'''simple docstring'''
_lowerCAmelCase : Any = get_size_dict(A_ , default_to_square=A_ )
if "shortest_edge" not in size:
raise ValueError(F'''The `size` dictionary must contain the key `shortest_edge`. Got {size.keys()}''' )
_lowerCAmelCase : Union[str, Any] = get_resize_output_image_size(A_ , size=size["shortest_edge"] , default_to_square=A_ )
return resize(A_ , size=A_ , resample=A_ , data_format=A_ , **A_ )
def __magic_name__ ( self : Union[str, Any] , A_ : np.ndarray , A_ : Dict[str, int] , A_ : Optional[Union[str, ChannelDimension]] = None , **A_ : List[str] , ):
'''simple docstring'''
_lowerCAmelCase : str = get_size_dict(A_ )
if "height" not in size or "width" not in size:
raise ValueError(F'''The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}''' )
return center_crop(A_ , size=(size["height"], size["width"]) , data_format=A_ , **A_ )
def __magic_name__ ( self : Tuple , A_ : np.ndarray , A_ : Union[int, float] , A_ : Optional[Union[str, ChannelDimension]] = None , **A_ : Any , ):
'''simple docstring'''
return rescale(A_ , scale=A_ , data_format=A_ , **A_ )
def __magic_name__ ( self : Optional[Any] , A_ : np.ndarray , A_ : Optional[Union[str, ChannelDimension]] = None ):
'''simple docstring'''
return flip_channel_order(A_ , data_format=A_ )
def __magic_name__ ( self : List[Any] , A_ : ImageInput , A_ : bool = None , A_ : Dict[str, int] = None , A_ : PILImageResampling = None , A_ : bool = None , A_ : float = None , A_ : bool = None , A_ : Dict[str, int] = None , A_ : bool = None , A_ : Optional[Union[str, TensorType]] = None , A_ : ChannelDimension = ChannelDimension.FIRST , **A_ : Tuple , ):
'''simple docstring'''
_lowerCAmelCase : str = do_resize if do_resize is not None else self.do_resize
_lowerCAmelCase : Dict = resample if resample is not None else self.resample
_lowerCAmelCase : str = do_rescale if do_rescale is not None else self.do_rescale
_lowerCAmelCase : List[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
_lowerCAmelCase : Tuple = do_center_crop if do_center_crop is not None else self.do_center_crop
_lowerCAmelCase : str = (
do_flip_channel_order if do_flip_channel_order is not None else self.do_flip_channel_order
)
_lowerCAmelCase : str = size if size is not None else self.size
_lowerCAmelCase : str = get_size_dict(A_ , default_to_square=A_ )
_lowerCAmelCase : Optional[Any] = crop_size if crop_size is not None else self.crop_size
_lowerCAmelCase : List[str] = get_size_dict(A_ , param_name="crop_size" )
_lowerCAmelCase : Dict = make_list_of_images(A_ )
if not valid_images(A_ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
# All transformations expect numpy arrays.
_lowerCAmelCase : Optional[int] = [to_numpy_array(A_ ) for image in images]
if do_resize:
_lowerCAmelCase : Any = [self.resize(image=A_ , size=A_ , resample=A_ ) for image in images]
if do_center_crop:
_lowerCAmelCase : Tuple = [self.center_crop(image=A_ , size=A_ ) for image in images]
if do_rescale:
_lowerCAmelCase : Any = [self.rescale(image=A_ , scale=A_ ) for image in images]
# the pretrained checkpoints assume images are BGR, not RGB
if do_flip_channel_order:
_lowerCAmelCase : Dict = [self.flip_channel_order(image=A_ ) for image in images]
_lowerCAmelCase : Optional[Any] = [to_channel_dimension_format(A_ , A_ ) for image in images]
_lowerCAmelCase : Tuple = {"pixel_values": images}
return BatchFeature(data=A_ , tensor_type=A_ )
def __magic_name__ ( self : List[Any] , A_ : Union[str, Any] , A_ : List[Tuple] = None ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(A_ ) != len(A_ ):
raise ValueError(
"Make sure that you pass in as many target sizes as the batch dimension of the logits" )
if is_torch_tensor(A_ ):
_lowerCAmelCase : Dict = target_sizes.numpy()
_lowerCAmelCase : List[Any] = []
for idx in range(len(A_ ) ):
_lowerCAmelCase : Tuple = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode="bilinear" , align_corners=A_ )
_lowerCAmelCase : Any = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(A_ )
else:
_lowerCAmelCase : Tuple = logits.argmax(dim=1 )
_lowerCAmelCase : Tuple = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 503
| 1
|
"""simple docstring"""
import os
def lowerCAmelCase_ () -> List[str]:
with open(os.path.dirname(_SCREAMING_SNAKE_CASE ) + "/p022_names.txt" ) as file:
a_ : Dict = str(file.readlines()[0] )
a_ : int = names.replace("\"" , "" ).split("," )
names.sort()
a_ : List[Any] = 0
a_ : Any = 0
for i, name in enumerate(_SCREAMING_SNAKE_CASE ):
for letter in name:
name_score += ord(_SCREAMING_SNAKE_CASE ) - 64
total_score += (i + 1) * name_score
a_ : List[str] = 0
return total_score
if __name__ == "__main__":
print(solution())
| 473
|
"""simple docstring"""
def lowerCAmelCase_ (_SCREAMING_SNAKE_CASE :int = 1 , _SCREAMING_SNAKE_CASE :int = 1000 ) -> int:
a_ : Tuple = 1
a_ : Optional[int] = 0
for divide_by_number in range(_SCREAMING_SNAKE_CASE , digit + 1 ):
a_ : list[int] = []
a_ : Any = numerator
for _ in range(1 , digit + 1 ):
if now_divide in has_been_divided:
if longest_list_length < len(_SCREAMING_SNAKE_CASE ):
a_ : Optional[Any] = len(_SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = divide_by_number
else:
has_been_divided.append(_SCREAMING_SNAKE_CASE )
a_ : Any = now_divide * 10 % divide_by_number
return the_digit
# Tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 473
| 1
|
'''simple docstring'''
from __future__ import annotations
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase = None , UpperCamelCase = None , UpperCamelCase = False , ):
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = cipher_alphabet or [chr(UpperCamelCase ) for i in range(97 , 123 )]
# If the argument is None or the user provided an empty dictionary
if not frequencies_dict:
# Frequencies of letters in the english language (how much they show up)
lowerCAmelCase__ : Any = {
"""a""": 0.0_8497,
"""b""": 0.0_1492,
"""c""": 0.0_2202,
"""d""": 0.0_4253,
"""e""": 0.1_1162,
"""f""": 0.0_2228,
"""g""": 0.0_2015,
"""h""": 0.0_6094,
"""i""": 0.0_7546,
"""j""": 0.0_0153,
"""k""": 0.0_1292,
"""l""": 0.0_4025,
"""m""": 0.0_2406,
"""n""": 0.0_6749,
"""o""": 0.0_7507,
"""p""": 0.0_1929,
"""q""": 0.0_0095,
"""r""": 0.0_7587,
"""s""": 0.0_6327,
"""t""": 0.0_9356,
"""u""": 0.0_2758,
"""v""": 0.0_0978,
"""w""": 0.0_2560,
"""x""": 0.0_0150,
"""y""": 0.0_1994,
"""z""": 0.0_0077,
}
else:
# Custom frequencies dictionary
lowerCAmelCase__ : Dict = frequencies_dict
if not case_sensitive:
lowerCAmelCase__ : Optional[Any] = ciphertext.lower()
# Chi squared statistic values
lowerCAmelCase__ : dict[int, tuple[float, str]] = {}
# cycle through all of the shifts
for shift in range(len(UpperCamelCase ) ):
lowerCAmelCase__ : Any = """"""
# decrypt the message with the shift
for letter in ciphertext:
try:
# Try to index the letter in the alphabet
lowerCAmelCase__ : int = (alphabet_letters.index(letter.lower() ) - shift) % len(
UpperCamelCase )
decrypted_with_shift += (
alphabet_letters[new_key].upper()
if case_sensitive and letter.isupper()
else alphabet_letters[new_key]
)
except ValueError:
# Append the character if it isn't in the alphabet
decrypted_with_shift += letter
lowerCAmelCase__ : Tuple = 0.0
# Loop through each letter in the decoded message with the shift
for letter in decrypted_with_shift:
if case_sensitive:
lowerCAmelCase__ : List[Any] = letter.lower()
if letter in frequencies:
# Get the amount of times the letter occurs in the message
lowerCAmelCase__ : Any = decrypted_with_shift.lower().count(UpperCamelCase )
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
lowerCAmelCase__ : Tuple = frequencies[letter] * occurrences
# Complete the chi squared statistic formula
lowerCAmelCase__ : Any = ((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
else:
if letter.lower() in frequencies:
# Get the amount of times the letter occurs in the message
lowerCAmelCase__ : List[Any] = decrypted_with_shift.count(UpperCamelCase )
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
lowerCAmelCase__ : List[Any] = frequencies[letter] * occurrences
# Complete the chi squared statistic formula
lowerCAmelCase__ : int = ((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
# Add the data to the chi_squared_statistic_values dictionary
lowerCAmelCase__ : str = (
chi_squared_statistic,
decrypted_with_shift,
)
# Get the most likely cipher by finding the cipher with the smallest chi squared
# statistic
def chi_squared_statistic_values_sorting_key(UpperCamelCase ) -> tuple[float, str]:
return chi_squared_statistic_values[key]
lowerCAmelCase__ : int = min(
UpperCamelCase , key=UpperCamelCase , )
# Get all the data from the most likely cipher (key, decoded message)
(
(
lowerCAmelCase__
) , (
lowerCAmelCase__
) ,
) : Any = chi_squared_statistic_values[most_likely_cipher]
# Return the data on the most likely shift
return (
most_likely_cipher,
most_likely_cipher_chi_squared_value,
decoded_most_likely_cipher,
)
| 160
|
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import PNDMPipeline, PNDMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class lowerCAmelCase_( unittest.TestCase ):
'''simple docstring'''
@property
def UpperCAmelCase_ ( self ) -> List[str]:
torch.manual_seed(0 )
lowerCAmelCase__ : Any = UNetaDModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=3 ,out_channels=3 ,down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") ,up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") ,)
return model
def UpperCAmelCase_ ( self ) -> Any:
lowerCAmelCase__ : List[str] = self.dummy_uncond_unet
lowerCAmelCase__ : Optional[Any] = PNDMScheduler()
lowerCAmelCase__ : List[Any] = PNDMPipeline(unet=__UpperCAmelCase ,scheduler=__UpperCAmelCase )
pndm.to(__UpperCAmelCase )
pndm.set_progress_bar_config(disable=__UpperCAmelCase )
lowerCAmelCase__ : Optional[int] = torch.manual_seed(0 )
lowerCAmelCase__ : int = pndm(generator=__UpperCAmelCase ,num_inference_steps=20 ,output_type="""numpy""" ).images
lowerCAmelCase__ : List[str] = torch.manual_seed(0 )
lowerCAmelCase__ : Any = pndm(generator=__UpperCAmelCase ,num_inference_steps=20 ,output_type="""numpy""" ,return_dict=__UpperCAmelCase )[0]
lowerCAmelCase__ : Dict = image[0, -3:, -3:, -1]
lowerCAmelCase__ : Union[str, Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowerCAmelCase__ : Union[str, Any] = np.array([1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch
class lowerCAmelCase_( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase_ ( self ) -> str:
lowerCAmelCase__ : Any = """google/ddpm-cifar10-32"""
lowerCAmelCase__ : str = UNetaDModel.from_pretrained(__UpperCAmelCase )
lowerCAmelCase__ : Tuple = PNDMScheduler()
lowerCAmelCase__ : Dict = PNDMPipeline(unet=__UpperCAmelCase ,scheduler=__UpperCAmelCase )
pndm.to(__UpperCAmelCase )
pndm.set_progress_bar_config(disable=__UpperCAmelCase )
lowerCAmelCase__ : Optional[Any] = torch.manual_seed(0 )
lowerCAmelCase__ : int = pndm(generator=__UpperCAmelCase ,output_type="""numpy""" ).images
lowerCAmelCase__ : List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowerCAmelCase__ : Optional[int] = np.array([0.1_5_6_4, 0.1_4_6_4_5, 0.1_4_0_6, 0.1_4_7_1_5, 0.1_2_4_2_5, 0.1_4_0_4_5, 0.1_3_1_1_5, 0.1_2_1_7_5, 0.1_2_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 160
| 1
|
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=__lowerCamelCase )
class a ( __lowerCamelCase ):
# `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization
__lowerCAmelCase : str = field(default="""summarization""" , metadata={"""include_in_asdict_even_if_is_default""": True} )
__lowerCAmelCase : ClassVar[Features] = Features({"""text""": Value("""string""" )} )
__lowerCAmelCase : ClassVar[Features] = Features({"""summary""": Value("""string""" )} )
__lowerCAmelCase : str = "text"
__lowerCAmelCase : str = "summary"
@property
def __lowerCamelCase ( self :Dict ):
return {self.text_column: "text", self.summary_column: "summary"}
| 252
|
from __future__ import annotations
import unittest
from transformers import XGLMConfig, XGLMTokenizer, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.xglm.modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
)
@require_tf
class a :
__lowerCAmelCase : int = XGLMConfig
__lowerCAmelCase : Any = {}
__lowerCAmelCase : str = """gelu"""
def __init__( self :Optional[Any] ,__lowercase :int ,__lowercase :int=1_4 ,__lowercase :Optional[int]=7 ,__lowercase :Dict=True ,__lowercase :Union[str, Any]=True ,__lowercase :Tuple=True ,__lowercase :Tuple=9_9 ,__lowercase :Dict=3_2 ,__lowercase :Optional[Any]=2 ,__lowercase :Optional[int]=4 ,__lowercase :Dict=3_7 ,__lowercase :List[str]="gelu" ,__lowercase :Optional[Any]=0.1 ,__lowercase :List[str]=0.1 ,__lowercase :Optional[Any]=5_1_2 ,__lowercase :List[str]=0.02 ,):
snake_case__ : Dict = parent
snake_case__ : Dict = batch_size
snake_case__ : str = seq_length
snake_case__ : Optional[int] = is_training
snake_case__ : List[Any] = use_input_mask
snake_case__ : Tuple = use_labels
snake_case__ : Optional[Any] = vocab_size
snake_case__ : Optional[Any] = d_model
snake_case__ : Optional[Any] = num_hidden_layers
snake_case__ : Any = num_attention_heads
snake_case__ : int = ffn_dim
snake_case__ : Optional[int] = activation_function
snake_case__ : int = activation_dropout
snake_case__ : Any = attention_dropout
snake_case__ : List[Any] = max_position_embeddings
snake_case__ : Tuple = initializer_range
snake_case__ : List[str] = None
snake_case__ : Any = 0
snake_case__ : Dict = 2
snake_case__ : List[Any] = 1
def __lowerCamelCase ( self :Optional[int] ):
return XGLMConfig.from_pretrained('''facebook/xglm-564M''' )
def __lowerCamelCase ( self :Any ):
snake_case__ : Optional[Any] = tf.clip_by_value(
ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size ) ,clip_value_min=0 ,clip_value_max=3 )
snake_case__ : Tuple = None
if self.use_input_mask:
snake_case__ : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
snake_case__ : int = self.get_config()
snake_case__ : int = floats_tensor([self.num_hidden_layers, self.num_attention_heads] ,2 )
return (
config,
input_ids,
input_mask,
head_mask,
)
def __lowerCamelCase ( self :Dict ):
return XGLMConfig(
vocab_size=self.vocab_size ,d_model=self.hidden_size ,num_layers=self.num_hidden_layers ,attention_heads=self.num_attention_heads ,ffn_dim=self.ffn_dim ,activation_function=self.activation_function ,activation_dropout=self.activation_dropout ,attention_dropout=self.attention_dropout ,max_position_embeddings=self.max_position_embeddings ,initializer_range=self.initializer_range ,use_cache=__lowercase ,bos_token_id=self.bos_token_id ,eos_token_id=self.eos_token_id ,pad_token_id=self.pad_token_id ,return_dict=__lowercase ,)
def __lowerCamelCase ( self :int ):
snake_case__ : Optional[int] = self.prepare_config_and_inputs()
(
(
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) ,
) : int = config_and_inputs
snake_case__ : Tuple = {
'''input_ids''': input_ids,
'''head_mask''': head_mask,
}
return config, inputs_dict
@require_tf
class a ( __lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
__lowerCAmelCase : List[Any] = (TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else ()
__lowerCAmelCase : List[str] = (TFXGLMForCausalLM,) if is_tf_available() else ()
__lowerCAmelCase : int = (
{"""feature-extraction""": TFXGLMModel, """text-generation""": TFXGLMForCausalLM} if is_tf_available() else {}
)
__lowerCAmelCase : str = False
__lowerCAmelCase : Tuple = False
__lowerCAmelCase : Optional[int] = False
def __lowerCamelCase ( self :List[str] ):
snake_case__ : int = TFXGLMModelTester(self )
snake_case__ : int = ConfigTester(self ,config_class=__lowercase ,n_embd=3_7 )
def __lowerCamelCase ( self :Union[str, Any] ):
self.config_tester.run_common_tests()
@slow
def __lowerCamelCase ( self :Optional[Any] ):
for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case__ : Any = TFXGLMModel.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
@unittest.skip(reason='''Currently, model embeddings are going to undergo a major refactor.''' )
def __lowerCamelCase ( self :Tuple ):
super().test_resize_token_embeddings()
@require_tf
class a ( unittest.TestCase ):
@slow
def __lowerCamelCase ( self :List[str] ,__lowercase :List[Any]=True ):
snake_case__ : List[str] = TFXGLMForCausalLM.from_pretrained('''facebook/xglm-564M''' )
snake_case__ : int = tf.convert_to_tensor([[2, 2_6_8, 9_8_6_5]] ,dtype=tf.intaa ) # The dog
# </s> The dog is a very friendly dog. He is very affectionate and loves to play with other
# fmt: off
snake_case__ : str = [2, 2_6_8, 9_8_6_5, 6_7, 1_1, 1_9_8_8, 5_7_2_5_2, 9_8_6_5, 5, 9_8_4, 6_7, 1_9_8_8, 2_1_3_8_3_8, 1_6_5_8, 5_3, 7_0_4_4_6, 3_3, 6_6_5_7, 2_7_8, 1_5_8_1]
# fmt: on
snake_case__ : Tuple = model.generate(__lowercase ,do_sample=__lowercase ,num_beams=1 )
if verify_outputs:
self.assertListEqual(output_ids[0].numpy().tolist() ,__lowercase )
@slow
def __lowerCamelCase ( self :Optional[Any] ):
snake_case__ : Any = XGLMTokenizer.from_pretrained('''facebook/xglm-564M''' )
snake_case__ : Tuple = TFXGLMForCausalLM.from_pretrained('''facebook/xglm-564M''' )
tf.random.set_seed(0 )
snake_case__ : int = tokenizer('''Today is a nice day and''' ,return_tensors='''tf''' )
snake_case__ : Optional[Any] = tokenized.input_ids
# forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices)
with tf.device(''':/CPU:0''' ):
snake_case__ : Optional[Any] = model.generate(__lowercase ,do_sample=__lowercase ,seed=[7, 0] )
snake_case__ : List[str] = tokenizer.decode(output_ids[0] ,skip_special_tokens=__lowercase )
snake_case__ : int = (
'''Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due'''
)
self.assertEqual(__lowercase ,__lowercase )
@slow
def __lowerCamelCase ( self :Optional[Any] ):
snake_case__ : List[str] = TFXGLMForCausalLM.from_pretrained('''facebook/xglm-564M''' )
snake_case__ : List[str] = XGLMTokenizer.from_pretrained('''facebook/xglm-564M''' )
snake_case__ : List[str] = '''left'''
# use different length sentences to test batching
snake_case__ : List[str] = [
'''This is an extremelly long sentence that only exists to test the ability of the model to cope with '''
'''left-padding, such as in batched generation. The output for the sequence below should be the same '''
'''regardless of whether left padding is applied or not. When''',
'''Hello, my dog is a little''',
]
snake_case__ : Optional[int] = tokenizer(__lowercase ,return_tensors='''tf''' ,padding=__lowercase )
snake_case__ : List[str] = inputs['''input_ids''']
snake_case__ : Union[str, Any] = model.generate(input_ids=__lowercase ,attention_mask=inputs['''attention_mask'''] ,max_new_tokens=1_2 )
snake_case__ : str = tokenizer(sentences[0] ,return_tensors='''tf''' ).input_ids
snake_case__ : int = model.generate(input_ids=__lowercase ,max_new_tokens=1_2 )
snake_case__ : Optional[int] = tokenizer(sentences[1] ,return_tensors='''tf''' ).input_ids
snake_case__ : Dict = model.generate(input_ids=__lowercase ,max_new_tokens=1_2 )
snake_case__ : List[str] = tokenizer.batch_decode(__lowercase ,skip_special_tokens=__lowercase )
snake_case__ : int = tokenizer.decode(output_non_padded[0] ,skip_special_tokens=__lowercase )
snake_case__ : Union[str, Any] = tokenizer.decode(output_padded[0] ,skip_special_tokens=__lowercase )
snake_case__ : List[Any] = [
'''This is an extremelly long sentence that only exists to test the ability of the model to cope with '''
'''left-padding, such as in batched generation. The output for the sequence below should be the same '''
'''regardless of whether left padding is applied or not. When left padding is applied, the sequence will be '''
'''a single''',
'''Hello, my dog is a little bit of a shy one, but he is very friendly''',
]
self.assertListEqual(__lowercase ,__lowercase )
self.assertListEqual(__lowercase ,[non_padded_sentence, padded_sentence] )
| 252
| 1
|
from __future__ import annotations
from collections import deque
from collections.abc import Iterator
from dataclasses import dataclass
@dataclass
class __A :
"""simple docstring"""
UpperCamelCase__ : int
UpperCamelCase__ : int
class __A :
"""simple docstring"""
def __init__( self , lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase : list[list[Edge]] =[[] for _ in range(lowerCamelCase__ )]
__UpperCamelCase : int =size
def __getitem__( self , lowerCamelCase__ ):
"""simple docstring"""
return iter(self._graph[vertex] )
@property
def __lowercase ( self ):
"""simple docstring"""
return self._size
def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
if weight not in (0, 1):
raise ValueError('Edge weight must be either 0 or 1.' )
if to_vertex < 0 or to_vertex >= self.size:
raise ValueError('Vertex indexes must be in [0; size).' )
self._graph[from_vertex].append(Edge(lowerCamelCase__ , lowerCamelCase__ ) )
def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase : Optional[int] =deque([start_vertex] )
__UpperCamelCase : list[int | None] =[None] * self.size
__UpperCamelCase : Optional[Any] =0
while queue:
__UpperCamelCase : Tuple =queue.popleft()
__UpperCamelCase : Optional[int] =distances[current_vertex]
if current_distance is None:
continue
for edge in self[current_vertex]:
__UpperCamelCase : List[str] =current_distance + edge.weight
__UpperCamelCase : Any =distances[edge.destination_vertex]
if (
isinstance(lowerCamelCase__ , lowerCamelCase__ )
and new_distance >= dest_vertex_distance
):
continue
__UpperCamelCase : str =new_distance
if edge.weight == 0:
queue.appendleft(edge.destination_vertex )
else:
queue.append(edge.destination_vertex )
if distances[finish_vertex] is None:
raise ValueError('No path from start_vertex to finish_vertex.' )
return distances[finish_vertex]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 154
|
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Image
from .base import TaskTemplate
@dataclass(frozen=a )
class __A ( a ):
"""simple docstring"""
UpperCamelCase__ : str =field(default="""image-classification""" , metadata={"""include_in_asdict_even_if_is_default""": True} )
UpperCamelCase__ : ClassVar[Features] =Features({"""image""": Image()} )
UpperCamelCase__ : ClassVar[Features] =Features({"""labels""": ClassLabel} )
UpperCamelCase__ : str ="image"
UpperCamelCase__ : str ="labels"
def __lowercase ( self , lowerCamelCase__ ):
"""simple docstring"""
if self.label_column not in features:
raise ValueError(f'Column {self.label_column} is not present in features.' )
if not isinstance(features[self.label_column] , lowerCamelCase__ ):
raise ValueError(f'Column {self.label_column} is not a ClassLabel.' )
__UpperCamelCase : List[str] =copy.deepcopy(self )
__UpperCamelCase : Optional[Any] =self.label_schema.copy()
__UpperCamelCase : List[Any] =features[self.label_column]
__UpperCamelCase : Optional[int] =label_schema
return task_template
@property
def __lowercase ( self ):
"""simple docstring"""
return {
self.image_column: "image",
self.label_column: "labels",
}
| 154
| 1
|
"""simple docstring"""
from math import sqrt
def lowercase ( __snake_case : int ):
lowercase_ : Optional[int] = 0
for i in range(1 , int(sqrt(__snake_case ) + 1 ) ):
if n % i == 0 and i != sqrt(__snake_case ):
total += i + n // i
elif i == sqrt(__snake_case ):
total += i
return total - n
def lowercase ( __snake_case : int = 1_0_0_0_0 ):
lowercase_ : Tuple = sum(
i
for i in range(1 , __snake_case )
if sum_of_divisors(sum_of_divisors(__snake_case ) ) == i and sum_of_divisors(__snake_case ) != i )
return total
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 231
|
"""simple docstring"""
from __future__ import annotations
def lowercase ( __snake_case : list[list[int]] ):
lowercase_ : Optional[Any] = len(__snake_case )
# We need to create solution object to save path.
lowercase_ : List[str] = [[0 for _ in range(__snake_case )] for _ in range(__snake_case )]
lowercase_ : List[Any] = run_maze(__snake_case , 0 , 0 , __snake_case )
if solved:
print('''\n'''.join(str(__snake_case ) for row in solutions ) )
else:
print('''No solution exists!''' )
return solved
def lowercase ( __snake_case : list[list[int]] , __snake_case : int , __snake_case : int , __snake_case : list[list[int]] ):
lowercase_ : int = len(__snake_case )
# Final check point.
if i == j == (size - 1):
lowercase_ : List[Any] = 1
return True
lowercase_ : Optional[int] = (not i < 0) and (not j < 0) # Check lower bounds
lowercase_ : str = (i < size) and (j < size) # Check upper bounds
if lower_flag and upper_flag:
# check for already visited and block points.
lowercase_ : Optional[int] = (not solutions[i][j]) and (not maze[i][j])
if block_flag:
# check visited
lowercase_ : List[Any] = 1
# check for directions
if (
run_maze(__snake_case , i + 1 , __snake_case , __snake_case )
or run_maze(__snake_case , __snake_case , j + 1 , __snake_case )
or run_maze(__snake_case , i - 1 , __snake_case , __snake_case )
or run_maze(__snake_case , __snake_case , j - 1 , __snake_case )
):
return True
lowercase_ : List[Any] = 0
return False
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 231
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
UpperCamelCase__ : Optional[Any] = {
"configuration_nezha": ["NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP", "NezhaConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : Optional[Any] = [
"NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST",
"NezhaForNextSentencePrediction",
"NezhaForMaskedLM",
"NezhaForPreTraining",
"NezhaForMultipleChoice",
"NezhaForQuestionAnswering",
"NezhaForSequenceClassification",
"NezhaForTokenClassification",
"NezhaModel",
"NezhaPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_nezha import NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP, NezhaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nezha import (
NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
NezhaPreTrainedModel,
)
else:
import sys
UpperCamelCase__ : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 712
|
def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
if index == number_of_items:
return 0
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = knapsack(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , index + 1 )
if weights[index] <= max_weight:
SCREAMING_SNAKE_CASE_ = values[index] + knapsack(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , max_weight - weights[index] , index + 1 )
return max(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 620
| 0
|
from functools import lru_cache
def SCREAMING_SNAKE_CASE_ ( _snake_case :int ) -> set:
_A = 2
_A = set()
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.add(_snake_case )
if n > 1:
factors.add(_snake_case )
return factors
@lru_cache
def SCREAMING_SNAKE_CASE_ ( _snake_case :int ) -> int:
return len(unique_prime_factors(_snake_case ) )
def SCREAMING_SNAKE_CASE_ ( _snake_case :list ) -> bool:
return len(set(_snake_case ) ) in (0, 1)
def SCREAMING_SNAKE_CASE_ ( _snake_case :int ) -> list:
_A = 2
while True:
# Increment each value of a generated range
_A = [base + i for i in range(_snake_case )]
# Run elements through out unique_prime_factors function
# Append our target number to the end.
_A = [upf_len(_snake_case ) for x in group]
checker.append(_snake_case )
# If all numbers in the list are equal, return the group variable.
if equality(_snake_case ):
return group
# Increment our base variable by 1
base += 1
def SCREAMING_SNAKE_CASE_ ( _snake_case :int = 4 ) -> int:
_A = run(_snake_case )
return results[0] if len(_snake_case ) else None
if __name__ == "__main__":
print(solution())
| 2
|
'''simple docstring'''
import re
from filelock import FileLock
try:
import nltk
__lowerCAmelCase = True
except (ImportError, ModuleNotFoundError):
__lowerCAmelCase = False
if NLTK_AVAILABLE:
with FileLock('''.lock''') as lock:
nltk.download('''punkt''', quiet=True)
def __lowerCamelCase ( lowerCAmelCase_ ) -> str:
re.sub('<n>' , '' , lowerCAmelCase_ ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(lowerCAmelCase_ ) )
| 358
| 0
|
from __future__ import annotations
import math
def _lowerCAmelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> list:
'''simple docstring'''
if len(_lowerCAmelCase ) != 2 or len(a[0] ) != 2 or len(_lowerCAmelCase ) != 2 or len(b[0] ) != 2:
raise Exception("Matrices are not 2x2" )
__snake_case = [
[a[0][0] * b[0][0] + a[0][1] * b[1][0], a[0][0] * b[0][1] + a[0][1] * b[1][1]],
[a[1][0] * b[0][0] + a[1][1] * b[1][0], a[1][0] * b[0][1] + a[1][1] * b[1][1]],
]
return new_matrix
def _lowerCAmelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> Optional[int]:
'''simple docstring'''
return [
[matrix_a[row][col] + matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(_lowerCAmelCase ) )
]
def _lowerCAmelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
return [
[matrix_a[row][col] - matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(_lowerCAmelCase ) )
]
def _lowerCAmelCase ( _lowerCAmelCase ) -> tuple[list, list, list, list]:
'''simple docstring'''
if len(_lowerCAmelCase ) % 2 != 0 or len(a[0] ) % 2 != 0:
raise Exception("Odd matrices are not supported!" )
__snake_case = len(_lowerCAmelCase )
__snake_case = matrix_length // 2
__snake_case = [[a[i][j] for j in range(_lowerCAmelCase , _lowerCAmelCase )] for i in range(_lowerCAmelCase )]
__snake_case = [
[a[i][j] for j in range(_lowerCAmelCase , _lowerCAmelCase )] for i in range(_lowerCAmelCase , _lowerCAmelCase )
]
__snake_case = [[a[i][j] for j in range(_lowerCAmelCase )] for i in range(_lowerCAmelCase )]
__snake_case = [[a[i][j] for j in range(_lowerCAmelCase )] for i in range(_lowerCAmelCase , _lowerCAmelCase )]
return top_left, top_right, bot_left, bot_right
def _lowerCAmelCase ( _lowerCAmelCase ) -> tuple[int, int]:
'''simple docstring'''
return len(_lowerCAmelCase ), len(matrix[0] )
def _lowerCAmelCase ( _lowerCAmelCase ) -> None:
'''simple docstring'''
print("\n".join(str(_lowerCAmelCase ) for line in matrix ) )
def _lowerCAmelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> list:
'''simple docstring'''
if matrix_dimensions(_lowerCAmelCase ) == (2, 2):
return default_matrix_multiplication(_lowerCAmelCase , _lowerCAmelCase )
__snake_case , __snake_case , __snake_case , __snake_case = split_matrix(_lowerCAmelCase )
__snake_case , __snake_case , __snake_case , __snake_case = split_matrix(_lowerCAmelCase )
__snake_case = actual_strassen(_lowerCAmelCase , matrix_subtraction(_lowerCAmelCase , _lowerCAmelCase ) )
__snake_case = actual_strassen(matrix_addition(_lowerCAmelCase , _lowerCAmelCase ) , _lowerCAmelCase )
__snake_case = actual_strassen(matrix_addition(_lowerCAmelCase , _lowerCAmelCase ) , _lowerCAmelCase )
__snake_case = actual_strassen(_lowerCAmelCase , matrix_subtraction(_lowerCAmelCase , _lowerCAmelCase ) )
__snake_case = actual_strassen(matrix_addition(_lowerCAmelCase , _lowerCAmelCase ) , matrix_addition(_lowerCAmelCase , _lowerCAmelCase ) )
__snake_case = actual_strassen(matrix_subtraction(_lowerCAmelCase , _lowerCAmelCase ) , matrix_addition(_lowerCAmelCase , _lowerCAmelCase ) )
__snake_case = actual_strassen(matrix_subtraction(_lowerCAmelCase , _lowerCAmelCase ) , matrix_addition(_lowerCAmelCase , _lowerCAmelCase ) )
__snake_case = matrix_addition(matrix_subtraction(matrix_addition(_lowerCAmelCase , _lowerCAmelCase ) , _lowerCAmelCase ) , _lowerCAmelCase )
__snake_case = matrix_addition(_lowerCAmelCase , _lowerCAmelCase )
__snake_case = matrix_addition(_lowerCAmelCase , _lowerCAmelCase )
__snake_case = matrix_subtraction(matrix_subtraction(matrix_addition(_lowerCAmelCase , _lowerCAmelCase ) , _lowerCAmelCase ) , _lowerCAmelCase )
# construct the new matrix from our 4 quadrants
__snake_case = []
for i in range(len(_lowerCAmelCase ) ):
new_matrix.append(top_left[i] + top_right[i] )
for i in range(len(_lowerCAmelCase ) ):
new_matrix.append(bot_left[i] + bot_right[i] )
return new_matrix
def _lowerCAmelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> list:
'''simple docstring'''
if matrix_dimensions(_lowerCAmelCase )[1] != matrix_dimensions(_lowerCAmelCase )[0]:
__snake_case = (
"Unable to multiply these matrices, please check the dimensions.\n"
F'''Matrix A: {matrixa}\n'''
F'''Matrix B: {matrixa}'''
)
raise Exception(_lowerCAmelCase )
__snake_case = matrix_dimensions(_lowerCAmelCase )
__snake_case = matrix_dimensions(_lowerCAmelCase )
if dimensiona[0] == dimensiona[1] and dimensiona[0] == dimensiona[1]:
return [matrixa, matrixa]
__snake_case = max(*_lowerCAmelCase , *_lowerCAmelCase )
__snake_case = int(math.pow(2 , math.ceil(math.loga(_lowerCAmelCase ) ) ) )
__snake_case = matrixa
__snake_case = matrixa
# Adding zeros to the matrices so that the arrays dimensions are the same and also
# power of 2
for i in range(0 , _lowerCAmelCase ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , _lowerCAmelCase ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
if i < dimensiona[0]:
for _ in range(dimensiona[1] , _lowerCAmelCase ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
__snake_case = actual_strassen(_lowerCAmelCase , _lowerCAmelCase )
# Removing the additional zeros
for i in range(0 , _lowerCAmelCase ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , _lowerCAmelCase ):
final_matrix[i].pop()
else:
final_matrix.pop()
return final_matrix
if __name__ == "__main__":
A : List[Any] = [
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 2, 3, 1],
]
A : List[Any] = [[0, 2, 1, 1], [16, 2, 3, 3], [2, 2, 7, 7], [13, 11, 22, 4]]
print(strassen(matrixa, matrixa))
| 473
|
from sklearn.metrics import mean_squared_error
import datasets
A : List[Any] = '\\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n'
A : Optional[int] = '\\nMean Squared Error(MSE) is the average of the square of difference between the predicted\nand actual values.\n'
A : str = '\nArgs:\n predictions: array-like of shape (n_samples,) or (n_samples, n_outputs)\n Estimated target values.\n references: array-like of shape (n_samples,) or (n_samples, n_outputs)\n Ground truth (correct) target values.\n sample_weight: array-like of shape (n_samples,), default=None\n Sample weights.\n multioutput: {"raw_values", "uniform_average"} or array-like of shape (n_outputs,), default="uniform_average"\n Defines aggregating of multiple output values. Array-like value defines weights used to average errors.\n\n "raw_values" : Returns a full set of errors in case of multioutput input.\n\n "uniform_average" : Errors of all outputs are averaged with uniform weight.\n\n squared : bool, default=True\n If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value.\n\nReturns:\n mse : mean squared error.\nExamples:\n\n >>> mse_metric = datasets.load_metric("mse")\n >>> predictions = [2.5, 0.0, 2, 8]\n >>> references = [3, -0.5, 2, 7]\n >>> results = mse_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'mse\': 0.375}\n >>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False)\n >>> print(rmse_result)\n {\'mse\': 0.6123724356957945}\n\n If you\'re using multi-dimensional lists, then set the config as follows :\n\n >>> mse_metric = datasets.load_metric("mse", "multilist")\n >>> predictions = [[0.5, 1], [-1, 1], [7, -6]]\n >>> references = [[0, 2], [-1, 2], [8, -5]]\n >>> results = mse_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'mse\': 0.7083333333333334}\n >>> results = mse_metric.compute(predictions=predictions, references=references, multioutput=\'raw_values\')\n >>> print(results) # doctest: +NORMALIZE_WHITESPACE\n {\'mse\': array([0.41666667, 1. ])}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCamelCase( datasets.Metric ):
def SCREAMING_SNAKE_CASE_ ( self : Dict ) -> Union[str, Any]:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , reference_urls=[
"https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html"
] , )
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> Dict:
'''simple docstring'''
if self.config_name == "multilist":
return {
"predictions": datasets.Sequence(datasets.Value("float" ) ),
"references": datasets.Sequence(datasets.Value("float" ) ),
}
else:
return {
"predictions": datasets.Value("float" ),
"references": datasets.Value("float" ),
}
def SCREAMING_SNAKE_CASE_ ( self : Dict , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : str=None , SCREAMING_SNAKE_CASE : int="uniform_average" , SCREAMING_SNAKE_CASE : Optional[int]=True ) -> Tuple:
'''simple docstring'''
__snake_case = mean_squared_error(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , sample_weight=SCREAMING_SNAKE_CASE , multioutput=SCREAMING_SNAKE_CASE , squared=SCREAMING_SNAKE_CASE )
return {"mse": mse}
| 473
| 1
|
import os
import sys
__A : Optional[int] = os.path.join(os.path.dirname(__file__), '''src''')
sys.path.append(SRC_DIR)
from transformers import (
AutoConfig,
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForQuestionAnswering,
AutoModelForSequenceClassification,
AutoTokenizer,
add_start_docstrings,
)
__A : str = [
'''torch''',
'''numpy''',
'''tokenizers''',
'''filelock''',
'''requests''',
'''tqdm''',
'''regex''',
'''sentencepiece''',
'''sacremoses''',
'''importlib_metadata''',
'''huggingface_hub''',
]
@add_start_docstrings(AutoConfig.__doc__ )
def SCREAMING_SNAKE_CASE__ ( *_UpperCAmelCase, **_UpperCAmelCase ) -> List[str]:
'''simple docstring'''
return AutoConfig.from_pretrained(*_UpperCAmelCase, **_UpperCAmelCase )
@add_start_docstrings(AutoTokenizer.__doc__ )
def SCREAMING_SNAKE_CASE__ ( *_UpperCAmelCase, **_UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
return AutoTokenizer.from_pretrained(*_UpperCAmelCase, **_UpperCAmelCase )
@add_start_docstrings(AutoModel.__doc__ )
def SCREAMING_SNAKE_CASE__ ( *_UpperCAmelCase, **_UpperCAmelCase ) -> Tuple:
'''simple docstring'''
return AutoModel.from_pretrained(*_UpperCAmelCase, **_UpperCAmelCase )
@add_start_docstrings(AutoModelForCausalLM.__doc__ )
def SCREAMING_SNAKE_CASE__ ( *_UpperCAmelCase, **_UpperCAmelCase ) -> Any:
'''simple docstring'''
return AutoModelForCausalLM.from_pretrained(*_UpperCAmelCase, **_UpperCAmelCase )
@add_start_docstrings(AutoModelForMaskedLM.__doc__ )
def SCREAMING_SNAKE_CASE__ ( *_UpperCAmelCase, **_UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
return AutoModelForMaskedLM.from_pretrained(*_UpperCAmelCase, **_UpperCAmelCase )
@add_start_docstrings(AutoModelForSequenceClassification.__doc__ )
def SCREAMING_SNAKE_CASE__ ( *_UpperCAmelCase, **_UpperCAmelCase ) -> Any:
'''simple docstring'''
return AutoModelForSequenceClassification.from_pretrained(*_UpperCAmelCase, **_UpperCAmelCase )
@add_start_docstrings(AutoModelForQuestionAnswering.__doc__ )
def SCREAMING_SNAKE_CASE__ ( *_UpperCAmelCase, **_UpperCAmelCase ) -> Any:
'''simple docstring'''
return AutoModelForQuestionAnswering.from_pretrained(*_UpperCAmelCase, **_UpperCAmelCase )
| 343
|
class __A :
def __init__( self : Dict , UpperCAmelCase_ : Any , UpperCAmelCase_ : int ):
lowerCAmelCase : Optional[Any] = name
lowerCAmelCase : int = val
def __str__( self : str ):
return f"{self.__class__.__name__}({self.name}, {self.val})"
def __lt__( self : Union[str, Any] , UpperCAmelCase_ : Dict ):
return self.val < other.val
class __A :
def __init__( self : Union[str, Any] , UpperCAmelCase_ : str ):
lowerCAmelCase : Optional[Any] = {}
lowerCAmelCase : Tuple = {}
lowerCAmelCase : Optional[Any] = self.build_heap(UpperCAmelCase_ )
def __getitem__( self : Union[str, Any] , UpperCAmelCase_ : str ):
return self.get_value(UpperCAmelCase_ )
def lowercase__ ( self : int , UpperCAmelCase_ : Any ):
return (idx - 1) // 2
def lowercase__ ( self : int , UpperCAmelCase_ : str ):
return idx * 2 + 1
def lowercase__ ( self : Optional[int] , UpperCAmelCase_ : Any ):
return idx * 2 + 2
def lowercase__ ( self : List[str] , UpperCAmelCase_ : List[Any] ):
return self.heap_dict[key]
def lowercase__ ( self : Optional[Any] , UpperCAmelCase_ : Optional[Any] ):
lowerCAmelCase : Optional[Any] = len(UpperCAmelCase_ ) - 1
lowerCAmelCase : Union[str, Any] = self.get_parent_idx(UpperCAmelCase_ )
for idx, i in enumerate(UpperCAmelCase_ ):
lowerCAmelCase : Any = idx
lowerCAmelCase : Union[str, Any] = i.val
for i in range(UpperCAmelCase_ , -1 , -1 ):
self.sift_down(UpperCAmelCase_ , UpperCAmelCase_ )
return array
def lowercase__ ( self : Tuple , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Any ):
while True:
lowerCAmelCase : Optional[int] = self.get_left_child_idx(UpperCAmelCase_ ) # noqa: E741
lowerCAmelCase : Union[str, Any] = self.get_right_child_idx(UpperCAmelCase_ )
lowerCAmelCase : Any = idx
if l < len(UpperCAmelCase_ ) and array[l] < array[idx]:
lowerCAmelCase : Tuple = l
if r < len(UpperCAmelCase_ ) and array[r] < array[smallest]:
lowerCAmelCase : Any = r
if smallest != idx:
lowerCAmelCase , lowerCAmelCase : Union[str, Any] = array[smallest], array[idx]
(
(
lowerCAmelCase
) , (
lowerCAmelCase
) ,
) : List[Any] = (
self.idx_of_element[array[smallest]],
self.idx_of_element[array[idx]],
)
lowerCAmelCase : List[str] = smallest
else:
break
def lowercase__ ( self : Any , UpperCAmelCase_ : Optional[Any] ):
lowerCAmelCase : Optional[Any] = self.get_parent_idx(UpperCAmelCase_ )
while p >= 0 and self.heap[p] > self.heap[idx]:
lowerCAmelCase , lowerCAmelCase : Optional[int] = self.heap[idx], self.heap[p]
lowerCAmelCase , lowerCAmelCase : Union[str, Any] = (
self.idx_of_element[self.heap[idx]],
self.idx_of_element[self.heap[p]],
)
lowerCAmelCase : Dict = p
lowerCAmelCase : Optional[Any] = self.get_parent_idx(UpperCAmelCase_ )
def lowercase__ ( self : str ):
return self.heap[0]
def lowercase__ ( self : int ):
lowerCAmelCase , lowerCAmelCase : str = self.heap[-1], self.heap[0]
lowerCAmelCase , lowerCAmelCase : Dict = (
self.idx_of_element[self.heap[-1]],
self.idx_of_element[self.heap[0]],
)
lowerCAmelCase : Any = self.heap.pop()
del self.idx_of_element[x]
self.sift_down(0 , self.heap )
return x
def lowercase__ ( self : Any , UpperCAmelCase_ : Any ):
self.heap.append(UpperCAmelCase_ )
lowerCAmelCase : str = len(self.heap ) - 1
lowerCAmelCase : List[Any] = node.val
self.sift_up(len(self.heap ) - 1 )
def lowercase__ ( self : Optional[int] ):
return len(self.heap ) == 0
def lowercase__ ( self : Union[str, Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : int ):
assert (
self.heap[self.idx_of_element[node]].val > new_value
), "newValue must be less that current value"
lowerCAmelCase : Optional[int] = new_value
lowerCAmelCase : str = new_value
self.sift_up(self.idx_of_element[node] )
__A : Tuple = Node('''R''', -1)
__A : int = Node('''B''', 6)
__A : int = Node('''A''', 3)
__A : Optional[Any] = Node('''X''', 1)
__A : List[str] = Node('''E''', 4)
# Use one of these two ways to generate Min-Heap
# Generating Min-Heap from array
__A : Optional[int] = MinHeap([r, b, a, x, e])
# Generating Min-Heap by Insert method
# myMinHeap.insert(a)
# myMinHeap.insert(b)
# myMinHeap.insert(x)
# myMinHeap.insert(r)
# myMinHeap.insert(e)
# Before
print('''Min Heap - before decrease key''')
for i in my_min_heap.heap:
print(i)
print('''Min Heap - After decrease key of node [B -> -17]''')
my_min_heap.decrease_key(b, -17)
# After
for i in my_min_heap.heap:
print(i)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 343
| 1
|
'''simple docstring'''
def lowerCAmelCase_ ( a : Union[str, Any] ):
a__ = []
a__ = []
a__ = {
'^': 3,
'*': 2,
'/': 2,
'%': 2,
'+': 1,
'-': 1,
} # Priority of each operator
a__ = len(a ) if (len(a ) > 7) else 7
# Print table header for output
print(
'Symbol'.center(8 ) , 'Stack'.center(a ) , 'Postfix'.center(a ) , sep=' | ' , )
print('-' * (print_width * 3 + 7) )
for x in infix:
if x.isalpha() or x.isdigit():
post_fix.append(a ) # if x is Alphabet / Digit, add it to Postfix
elif x == "(":
stack.append(a ) # if x is "(" push to Stack
elif x == ")": # if x is ")" pop stack until "(" is encountered
while stack[-1] != "(":
post_fix.append(stack.pop() ) # Pop stack & add the content to Postfix
stack.pop()
else:
if len(a ) == 0:
stack.append(a ) # If stack is empty, push x to stack
else: # while priority of x is not > priority of element in the stack
while len(a ) > 0 and priority[x] <= priority[stack[-1]]:
post_fix.append(stack.pop() ) # pop stack & add to Postfix
stack.append(a ) # push x to stack
print(
x.center(8 ) , (''.join(a )).ljust(a ) , (''.join(a )).ljust(a ) , sep=' | ' , ) # Output in tabular format
while len(a ) > 0: # while stack is not empty
post_fix.append(stack.pop() ) # pop stack & add to Postfix
print(
' '.center(8 ) , (''.join(a )).ljust(a ) , (''.join(a )).ljust(a ) , sep=' | ' , ) # Output in tabular format
return "".join(a ) # return Postfix as str
def lowerCAmelCase_ ( a : int ):
a__ = list(infix[::-1] ) # reverse the infix equation
for i in range(len(a ) ):
if infix[i] == "(":
a__ = ')' # change "(" to ")"
elif infix[i] == ")":
a__ = '(' # change ")" to "("
return (infix_2_postfix(''.join(a ) ))[
::-1
] # call infix_2_postfix on Infix, return reverse of Postfix
if __name__ == "__main__":
__A : str = input('\nEnter an Infix Equation = ') # Input an Infix equation
__A : Any = ''.join(Infix.split()) # Remove spaces from the input
print('\n\t', Infix, '(Infix) -> ', infix_2_prefix(Infix), '(Prefix)')
| 717
|
'''simple docstring'''
from __future__ import annotations
__A : Optional[int] = list[list[int]]
# assigning initial values to the grid
__A : Matrix = [
[3, 0, 6, 5, 0, 8, 4, 0, 0],
[5, 2, 0, 0, 0, 0, 0, 0, 0],
[0, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
# a grid with no solution
__A : Matrix = [
[5, 0, 6, 5, 0, 8, 4, 0, 3],
[5, 2, 0, 0, 0, 0, 0, 0, 2],
[1, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
def lowerCAmelCase_ ( a : Matrix , a : int , a : int , a : int ):
for i in range(9 ):
if grid[row][i] == n or grid[i][column] == n:
return False
for i in range(3 ):
for j in range(3 ):
if grid[(row - row % 3) + i][(column - column % 3) + j] == n:
return False
return True
def lowerCAmelCase_ ( a : Matrix ):
for i in range(9 ):
for j in range(9 ):
if grid[i][j] == 0:
return i, j
return None
def lowerCAmelCase_ ( a : Matrix ):
if location := find_empty_location(a ):
a__ , a__ = location
else:
# If the location is ``None``, then the grid is solved.
return grid
for digit in range(1 , 10 ):
if is_safe(a , a , a , a ):
a__ = digit
if sudoku(a ) is not None:
return grid
a__ = 0
return None
def lowerCAmelCase_ ( a : Matrix ):
for row in grid:
for cell in row:
print(a , end=' ' )
print()
if __name__ == "__main__":
# make a copy of grid so that you can compare with the unmodified grid
for example_grid in (initial_grid, no_solution):
print('\nExample grid:\n' + '=' * 20)
print_solution(example_grid)
print('\nExample grid solution:')
__A : Optional[int] = sudoku(example_grid)
if solution is not None:
print_solution(solution)
else:
print('Cannot find a solution.')
| 126
| 0
|
from pickle import UnpicklingError
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict
from ..utils import logging
_UpperCamelCase = logging.get_logger(__name__)
def lowerCAmelCase__( lowercase : Union[str, Any] , lowercase : Optional[Any] ) -> Tuple:
try:
with open(lowercase , "rb" ) as flax_state_f:
__snake_case : Any = from_bytes(lowercase , flax_state_f.read() )
except UnpicklingError as e:
try:
with open(lowercase ) as f:
if f.read().startswith("version" ):
raise OSError(
"You seem to have cloned a repository without having git-lfs installed. Please"
" install git-lfs and run `git lfs install` followed by `git lfs pull` in the"
" folder you cloned." )
else:
raise ValueError from e
except (UnicodeDecodeError, ValueError):
raise EnvironmentError(f"""Unable to convert {model_file} to Flax deserializable object. """ )
return load_flax_weights_in_pytorch_model(lowercase , lowercase )
def lowerCAmelCase__( lowercase : Any , lowercase : int ) -> List[Any]:
try:
import torch # noqa: F401
except ImportError:
logger.error(
"Loading Flax weights in PyTorch requires both PyTorch and Flax to be installed. Please see"
" https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation"
" instructions." )
raise
# check if we have bf16 weights
__snake_case : Any = flatten_dict(jax.tree_util.tree_map(lambda lowercase : x.dtype == jnp.bfloataa , lowercase ) ).values()
if any(lowercase ):
# convert all weights to fp32 if they are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
"Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` "
"before loading those in PyTorch model." )
__snake_case : List[str] = jax.tree_util.tree_map(
lambda lowercase : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , lowercase )
__snake_case : List[str] = ""
__snake_case : int = flatten_dict(lowercase , sep="." )
__snake_case : str = pt_model.state_dict()
# keep track of unexpected & missing keys
__snake_case : List[Any] = []
__snake_case : int = set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
__snake_case : Any = flax_key_tuple.split("." )
if flax_key_tuple_array[-1] == "kernel" and flax_tensor.ndim == 4:
__snake_case : List[Any] = flax_key_tuple_array[:-1] + ["weight"]
__snake_case : Union[str, Any] = jnp.transpose(lowercase , (3, 2, 0, 1) )
elif flax_key_tuple_array[-1] == "kernel":
__snake_case : int = flax_key_tuple_array[:-1] + ["weight"]
__snake_case : List[Any] = flax_tensor.T
elif flax_key_tuple_array[-1] == "scale":
__snake_case : Optional[int] = flax_key_tuple_array[:-1] + ["weight"]
if "time_embedding" not in flax_key_tuple_array:
for i, flax_key_tuple_string in enumerate(lowercase ):
__snake_case : Dict = (
flax_key_tuple_string.replace("_0" , ".0" )
.replace("_1" , ".1" )
.replace("_2" , ".2" )
.replace("_3" , ".3" )
.replace("_4" , ".4" )
.replace("_5" , ".5" )
.replace("_6" , ".6" )
.replace("_7" , ".7" )
.replace("_8" , ".8" )
.replace("_9" , ".9" )
)
__snake_case : str = ".".join(lowercase )
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
f"""Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected """
f"""to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.""" )
else:
# add weight to pytorch dict
__snake_case : Tuple = np.asarray(lowercase ) if not isinstance(lowercase , np.ndarray ) else flax_tensor
__snake_case : List[Any] = torch.from_numpy(lowercase )
# remove from missing keys
missing_keys.remove(lowercase )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(lowercase )
pt_model.load_state_dict(lowercase )
# re-transform missing_keys to list
__snake_case : Optional[Any] = list(lowercase )
if len(lowercase ) > 0:
logger.warning(
"Some weights of the Flax model were not used when initializing the PyTorch model"
f""" {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing"""
f""" {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture"""
" (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This"
f""" IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect"""
" to be exactly identical (e.g. initializing a BertForSequenceClassification model from a"
" FlaxBertForSequenceClassification model)." )
if len(lowercase ) > 0:
logger.warning(
f"""Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly"""
f""" initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to"""
" use it for predictions and inference." )
return pt_model
| 243
|
'''simple docstring'''
import requests
def snake_case_ (UpperCamelCase : str , UpperCamelCase : str ):
'''simple docstring'''
_a = {'''Content-Type''': '''application/json'''}
_a = requests.post(UpperCamelCase , json={'''text''': message_body} , headers=UpperCamelCase )
if response.status_code != 200:
_a = (
'''Request to slack returned an error '''
f'{response.status_code}, the response is:\n{response.text}'
)
raise ValueError(UpperCamelCase )
if __name__ == "__main__":
# Set the slack url to the one provided by Slack when you create the webhook at
# https://my.slack.com/services/new/incoming-webhook/
send_slack_message('<YOUR MESSAGE BODY>', '<SLACK CHANNEL URL>')
| 22
| 0
|
def UpperCamelCase (lowercase_: Optional[int] , lowercase_: Any ) -> Tuple:
A__ : Any = len(_lowerCAmelCase )
print("""The following activities are selected:""" )
# The first activity is always selected
A__ : Optional[Any] = 0
print(_lowerCAmelCase , end=""",""" )
# Consider rest of the activities
for j in range(_lowerCAmelCase ):
# If this activity has start time greater than
# or equal to the finish time of previously
# selected activity, then select it
if start[j] >= finish[i]:
print(_lowerCAmelCase , end=""",""" )
A__ : int = j
if __name__ == "__main__":
import doctest
doctest.testmod()
A_ : Optional[int] = [1, 3, 0, 5, 8, 5]
A_ : Any = [2, 4, 6, 7, 9, 9]
print_max_activities(start, finish)
| 700
|
import os
try:
from .build_directory_md import good_file_paths
except ImportError:
from build_directory_md import good_file_paths # type: ignore
A_ : Dict = list(good_file_paths())
assert filepaths, "good_file_paths() failed!"
A_ : Optional[Any] = [file for file in filepaths if file != file.lower()]
if upper_files:
print(f'''{len(upper_files)} files contain uppercase characters:''')
print('\n'.join(upper_files) + '\n')
A_ : Tuple = [file for file in filepaths if ' ' in file]
if space_files:
print(f'''{len(space_files)} files contain space characters:''')
print('\n'.join(space_files) + '\n')
A_ : Any = [file for file in filepaths if '-' in file]
if hyphen_files:
print(f'''{len(hyphen_files)} files contain hyphen characters:''')
print('\n'.join(hyphen_files) + '\n')
A_ : List[str] = [file for file in filepaths if os.sep not in file]
if nodir_files:
print(f'''{len(nodir_files)} files are not in a directory:''')
print('\n'.join(nodir_files) + '\n')
A_ : Any = len(upper_files + space_files + hyphen_files + nodir_files)
if bad_files:
import sys
sys.exit(bad_files)
| 64
| 0
|
snake_case = {str(digit): digit**5 for digit in range(1_0)}
def SCREAMING_SNAKE_CASE__ ( snake_case__ :int ) -> int:
return sum(DIGITS_FIFTH_POWER[digit] for digit in str(snake_case__ ) )
def SCREAMING_SNAKE_CASE__ ( ) -> int:
return sum(
number
for number in range(1000 , 100_0000 )
if number == digits_fifth_powers_sum(snake_case__ ) )
if __name__ == "__main__":
print(solution())
| 67
|
def SCREAMING_SNAKE_CASE__ ( snake_case__ :str ) -> Union[str, Any]:
_lowercase = len(snake_case__ )
_lowercase = sum(snake_case__ )
_lowercase = [[False for x in range(s + 1 )] for y in range(n + 1 )]
for i in range(1 , n + 1 ):
_lowercase = True
for i in range(1 , s + 1 ):
_lowercase = False
for i in range(1 , n + 1 ):
for j in range(1 , s + 1 ):
_lowercase = dp[i][j - 1]
if arr[i - 1] <= j:
_lowercase = dp[i][j] or dp[i - 1][j - arr[i - 1]]
for j in range(int(s / 2 ) , -1 , -1 ):
if dp[n][j] is True:
_lowercase = s - 2 * j
break
return diff
| 67
| 1
|
from collections.abc import Sequence
from queue import Queue
class __SCREAMING_SNAKE_CASE:
def __init__( self: Optional[int] , UpperCamelCase: Optional[int] , UpperCamelCase: Tuple , UpperCamelCase: Optional[Any] , UpperCamelCase: List[Any]=None , UpperCamelCase: List[str]=None ) -> Dict:
snake_case__ = start
snake_case__ = end
snake_case__ = val
snake_case__ = (start + end) // 2
snake_case__ = left
snake_case__ = right
def __repr__( self: str ) -> List[Any]:
return F'''SegmentTreeNode(start={self.start}, end={self.end}, val={self.val})'''
class __SCREAMING_SNAKE_CASE:
def __init__( self: Tuple , UpperCamelCase: int , UpperCamelCase: Optional[int] ) -> Dict:
snake_case__ = collection
snake_case__ = function
if self.collection:
snake_case__ = self._build_tree(0 , len(UpperCamelCase ) - 1 )
def lowerCAmelCase_ ( self: Union[str, Any] , UpperCamelCase: Tuple , UpperCamelCase: Dict ) -> Union[str, Any]:
self._update_tree(self.root , UpperCamelCase , UpperCamelCase )
def lowerCAmelCase_ ( self: str , UpperCamelCase: int , UpperCamelCase: List[Any] ) -> List[Any]:
return self._query_range(self.root , UpperCamelCase , UpperCamelCase )
def lowerCAmelCase_ ( self: Any , UpperCamelCase: Any , UpperCamelCase: List[str] ) -> str:
if start == end:
return SegmentTreeNode(UpperCamelCase , UpperCamelCase , self.collection[start] )
snake_case__ = (start + end) // 2
snake_case__ = self._build_tree(UpperCamelCase , UpperCamelCase )
snake_case__ = self._build_tree(mid + 1 , UpperCamelCase )
return SegmentTreeNode(UpperCamelCase , UpperCamelCase , self.fn(left.val , right.val ) , UpperCamelCase , UpperCamelCase )
def lowerCAmelCase_ ( self: Optional[int] , UpperCamelCase: List[str] , UpperCamelCase: Union[str, Any] , UpperCamelCase: Optional[Any] ) -> List[Any]:
if node.start == i and node.end == i:
snake_case__ = val
return
if i <= node.mid:
self._update_tree(node.left , UpperCamelCase , UpperCamelCase )
else:
self._update_tree(node.right , UpperCamelCase , UpperCamelCase )
snake_case__ = self.fn(node.left.val , node.right.val )
def lowerCAmelCase_ ( self: List[Any] , UpperCamelCase: int , UpperCamelCase: Any , UpperCamelCase: Optional[Any] ) -> Optional[Any]:
if node.start == i and node.end == j:
return node.val
if i <= node.mid:
if j <= node.mid:
# range in left child tree
return self._query_range(node.left , UpperCamelCase , UpperCamelCase )
else:
# range in left child tree and right child tree
return self.fn(
self._query_range(node.left , UpperCamelCase , node.mid ) , self._query_range(node.right , node.mid + 1 , UpperCamelCase ) , )
else:
# range in right child tree
return self._query_range(node.right , UpperCamelCase , UpperCamelCase )
def lowerCAmelCase_ ( self: List[str] ) -> Optional[int]:
if self.root is not None:
snake_case__ = Queue()
queue.put(self.root )
while not queue.empty():
snake_case__ = queue.get()
yield node
if node.left is not None:
queue.put(node.left )
if node.right is not None:
queue.put(node.right )
if __name__ == "__main__":
import operator
for fn in [operator.add, max, min]:
print("""*""" * 50)
__UpperCamelCase : str = SegmentTree([2, 1, 5, 3, 4], fn)
for node in arr.traverse():
print(node)
print()
arr.update(1, 5)
for node in arr.traverse():
print(node)
print()
print(arr.query_range(3, 4)) # 7
print(arr.query_range(2, 2)) # 5
print(arr.query_range(1, 3)) # 13
print()
| 719
|
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all image processors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...image_processing_utils import ImageProcessingMixin
from ...utils import CONFIG_NAME, IMAGE_PROCESSOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
__UpperCamelCase : List[Any] = logging.get_logger(__name__)
__UpperCamelCase : str = OrderedDict(
[
("""align""", """EfficientNetImageProcessor"""),
("""beit""", """BeitImageProcessor"""),
("""bit""", """BitImageProcessor"""),
("""blip""", """BlipImageProcessor"""),
("""blip-2""", """BlipImageProcessor"""),
("""bridgetower""", """BridgeTowerImageProcessor"""),
("""chinese_clip""", """ChineseCLIPImageProcessor"""),
("""clip""", """CLIPImageProcessor"""),
("""clipseg""", """ViTImageProcessor"""),
("""conditional_detr""", """ConditionalDetrImageProcessor"""),
("""convnext""", """ConvNextImageProcessor"""),
("""convnextv2""", """ConvNextImageProcessor"""),
("""cvt""", """ConvNextImageProcessor"""),
("""data2vec-vision""", """BeitImageProcessor"""),
("""deformable_detr""", """DeformableDetrImageProcessor"""),
("""deit""", """DeiTImageProcessor"""),
("""deta""", """DetaImageProcessor"""),
("""detr""", """DetrImageProcessor"""),
("""dinat""", """ViTImageProcessor"""),
("""donut-swin""", """DonutImageProcessor"""),
("""dpt""", """DPTImageProcessor"""),
("""efficientformer""", """EfficientFormerImageProcessor"""),
("""efficientnet""", """EfficientNetImageProcessor"""),
("""flava""", """FlavaImageProcessor"""),
("""focalnet""", """BitImageProcessor"""),
("""git""", """CLIPImageProcessor"""),
("""glpn""", """GLPNImageProcessor"""),
("""groupvit""", """CLIPImageProcessor"""),
("""imagegpt""", """ImageGPTImageProcessor"""),
("""instructblip""", """BlipImageProcessor"""),
("""layoutlmv2""", """LayoutLMv2ImageProcessor"""),
("""layoutlmv3""", """LayoutLMv3ImageProcessor"""),
("""levit""", """LevitImageProcessor"""),
("""mask2former""", """Mask2FormerImageProcessor"""),
("""maskformer""", """MaskFormerImageProcessor"""),
("""mgp-str""", """ViTImageProcessor"""),
("""mobilenet_v1""", """MobileNetV1ImageProcessor"""),
("""mobilenet_v2""", """MobileNetV2ImageProcessor"""),
("""mobilevit""", """MobileViTImageProcessor"""),
("""mobilevit""", """MobileViTImageProcessor"""),
("""mobilevitv2""", """MobileViTImageProcessor"""),
("""nat""", """ViTImageProcessor"""),
("""oneformer""", """OneFormerImageProcessor"""),
("""owlvit""", """OwlViTImageProcessor"""),
("""perceiver""", """PerceiverImageProcessor"""),
("""pix2struct""", """Pix2StructImageProcessor"""),
("""poolformer""", """PoolFormerImageProcessor"""),
("""regnet""", """ConvNextImageProcessor"""),
("""resnet""", """ConvNextImageProcessor"""),
("""sam""", """SamImageProcessor"""),
("""segformer""", """SegformerImageProcessor"""),
("""swiftformer""", """ViTImageProcessor"""),
("""swin""", """ViTImageProcessor"""),
("""swin2sr""", """Swin2SRImageProcessor"""),
("""swinv2""", """ViTImageProcessor"""),
("""table-transformer""", """DetrImageProcessor"""),
("""timesformer""", """VideoMAEImageProcessor"""),
("""tvlt""", """TvltImageProcessor"""),
("""upernet""", """SegformerImageProcessor"""),
("""van""", """ConvNextImageProcessor"""),
("""videomae""", """VideoMAEImageProcessor"""),
("""vilt""", """ViltImageProcessor"""),
("""vit""", """ViTImageProcessor"""),
("""vit_hybrid""", """ViTHybridImageProcessor"""),
("""vit_mae""", """ViTImageProcessor"""),
("""vit_msn""", """ViTImageProcessor"""),
("""xclip""", """CLIPImageProcessor"""),
("""yolos""", """YolosImageProcessor"""),
]
)
__UpperCamelCase : Union[str, Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, IMAGE_PROCESSOR_MAPPING_NAMES)
def a_ ( _A ) -> Optional[int]:
"""simple docstring"""
for module_name, extractors in IMAGE_PROCESSOR_MAPPING_NAMES.items():
if class_name in extractors:
snake_case__ = model_type_to_module_name(_A )
snake_case__ = importlib.import_module(f'''.{module_name}''' , 'transformers.models' )
try:
return getattr(_A , _A )
except AttributeError:
continue
for _, extractor in IMAGE_PROCESSOR_MAPPING._extra_content.items():
if getattr(_A , '__name__' , _A ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
snake_case__ = importlib.import_module('transformers' )
if hasattr(_A , _A ):
return getattr(_A , _A )
return None
def a_ ( _A , _A = None , _A = False , _A = False , _A = None , _A = None , _A = None , _A = False , **_A , ) -> Optional[Any]:
"""simple docstring"""
snake_case__ = get_file_from_repo(
_A , _A , cache_dir=_A , force_download=_A , resume_download=_A , proxies=_A , use_auth_token=_A , revision=_A , local_files_only=_A , )
if resolved_config_file is None:
logger.info(
'Could not locate the image processor configuration file, will try to use the model config instead.' )
return {}
with open(_A , encoding='utf-8' ) as reader:
return json.load(_A )
class __SCREAMING_SNAKE_CASE:
def __init__( self: Optional[int] ) -> Union[str, Any]:
raise EnvironmentError(
'AutoImageProcessor is designed to be instantiated '
'using the `AutoImageProcessor.from_pretrained(pretrained_model_name_or_path)` method.' )
@classmethod
@replace_list_option_in_docstrings(UpperCamelCase )
def lowerCAmelCase_ ( cls: int , UpperCamelCase: int , **UpperCamelCase: str ) -> Optional[Any]:
snake_case__ = kwargs.pop('config' , UpperCamelCase )
snake_case__ = kwargs.pop('trust_remote_code' , UpperCamelCase )
snake_case__ = True
snake_case__ , snake_case__ = ImageProcessingMixin.get_image_processor_dict(UpperCamelCase , **UpperCamelCase )
snake_case__ = config_dict.get('image_processor_type' , UpperCamelCase )
snake_case__ = None
if "AutoImageProcessor" in config_dict.get('auto_map' , {} ):
snake_case__ = config_dict['auto_map']['AutoImageProcessor']
# If we still don't have the image processor class, check if we're loading from a previous feature extractor config
# and if so, infer the image processor class from there.
if image_processor_class is None and image_processor_auto_map is None:
snake_case__ = config_dict.pop('feature_extractor_type' , UpperCamelCase )
if feature_extractor_class is not None:
logger.warning(
'Could not find image processor class in the image processor config or the model config. Loading'
' based on pattern matching with the model\'s feature extractor configuration.' )
snake_case__ = feature_extractor_class.replace('FeatureExtractor' , 'ImageProcessor' )
if "AutoFeatureExtractor" in config_dict.get('auto_map' , {} ):
snake_case__ = config_dict['auto_map']['AutoFeatureExtractor']
snake_case__ = feature_extractor_auto_map.replace('FeatureExtractor' , 'ImageProcessor' )
logger.warning(
'Could not find image processor auto map in the image processor config or the model config.'
' Loading based on pattern matching with the model\'s feature extractor configuration.' )
# If we don't find the image processor class in the image processor config, let's try the model config.
if image_processor_class is None and image_processor_auto_map is None:
if not isinstance(UpperCamelCase , UpperCamelCase ):
snake_case__ = AutoConfig.from_pretrained(UpperCamelCase , **UpperCamelCase )
# It could be in `config.image_processor_type``
snake_case__ = getattr(UpperCamelCase , 'image_processor_type' , UpperCamelCase )
if hasattr(UpperCamelCase , 'auto_map' ) and "AutoImageProcessor" in config.auto_map:
snake_case__ = config.auto_map['AutoImageProcessor']
if image_processor_class is not None:
snake_case__ = image_processor_class_from_name(UpperCamelCase )
snake_case__ = image_processor_auto_map is not None
snake_case__ = image_processor_class is not None or type(UpperCamelCase ) in IMAGE_PROCESSOR_MAPPING
snake_case__ = resolve_trust_remote_code(
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
if has_remote_code and trust_remote_code:
snake_case__ = get_class_from_dynamic_module(
UpperCamelCase , UpperCamelCase , **UpperCamelCase )
snake_case__ = kwargs.pop('code_revision' , UpperCamelCase )
if os.path.isdir(UpperCamelCase ):
image_processor_class.register_for_auto_class()
return image_processor_class.from_dict(UpperCamelCase , **UpperCamelCase )
elif image_processor_class is not None:
return image_processor_class.from_dict(UpperCamelCase , **UpperCamelCase )
# Last try: we use the IMAGE_PROCESSOR_MAPPING.
elif type(UpperCamelCase ) in IMAGE_PROCESSOR_MAPPING:
snake_case__ = IMAGE_PROCESSOR_MAPPING[type(UpperCamelCase )]
return image_processor_class.from_dict(UpperCamelCase , **UpperCamelCase )
raise ValueError(
F'''Unrecognized image processor in {pretrained_model_name_or_path}. Should have a '''
F'''`image_processor_type` key in its {IMAGE_PROCESSOR_NAME} of {CONFIG_NAME}, or one of the following '''
F'''`model_type` keys in its {CONFIG_NAME}: {', '.join(c for c in IMAGE_PROCESSOR_MAPPING_NAMES.keys() )}''' )
@staticmethod
def lowerCAmelCase_ ( UpperCamelCase: Optional[Any] , UpperCamelCase: int ) -> Optional[Any]:
IMAGE_PROCESSOR_MAPPING.register(UpperCamelCase , UpperCamelCase )
| 372
| 0
|
"""simple docstring"""
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import SeqaSeqTrainer
from seqaseq_training_args import SeqaSeqTrainingArguments
import transformers
from transformers import (
AutoConfig,
AutoModelForSeqaSeqLM,
AutoTokenizer,
HfArgumentParser,
MBartTokenizer,
MBartTokenizerFast,
set_seed,
)
from transformers.trainer_utils import EvaluationStrategy, is_main_process
from transformers.training_args import ParallelMode
from utils import (
SeqaSeqDataCollator,
SeqaSeqDataset,
assert_all_frozen,
build_compute_metrics_fn,
check_output_dir,
freeze_embeds,
freeze_params,
lmap,
save_json,
use_task_specific_params,
write_txt_file,
)
__magic_name__ : Tuple = logging.getLogger(__name__)
@dataclass
class lowercase__ :
"""simple docstring"""
__lowerCAmelCase : str = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
__lowerCAmelCase : Optional[str] = field(
default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
__lowerCAmelCase : Optional[str] = field(
default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
__lowerCAmelCase : Optional[str] = field(
default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
__lowerCAmelCase : bool = field(default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """Whether tp freeze the encoder."""} )
__lowerCAmelCase : bool = field(default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """Whether to freeze the embeddings."""} )
@dataclass
class lowercase__ :
"""simple docstring"""
__lowerCAmelCase : str = field(
metadata={"""help""": """The input data dir. Should contain the .tsv files (or other data files) for the task."""} )
__lowerCAmelCase : Optional[str] = field(
default="""summarization""" , metadata={"""help""": """Task name, summarization (or summarization_{dataset} for pegasus) or translation"""} , )
__lowerCAmelCase : Optional[int] = field(
default=1024 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
__lowerCAmelCase : Optional[int] = field(
default=128 , metadata={
"""help""": (
"""The maximum total sequence length for target text after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
__lowerCAmelCase : Optional[int] = field(
default=142 , metadata={
"""help""": (
"""The maximum total sequence length for validation target text after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded. """
"""This argument is also used to override the ``max_length`` param of ``model.generate``, which is used """
"""during ``evaluate`` and ``predict``."""
)
} , )
__lowerCAmelCase : Optional[int] = field(
default=142 , metadata={
"""help""": (
"""The maximum total sequence length for test target text after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
__lowerCAmelCase : Optional[int] = field(default=-1 , metadata={"""help""": """# training examples. -1 means use all."""} )
__lowerCAmelCase : Optional[int] = field(default=-1 , metadata={"""help""": """# validation examples. -1 means use all."""} )
__lowerCAmelCase : Optional[int] = field(default=-1 , metadata={"""help""": """# test examples. -1 means use all."""} )
__lowerCAmelCase : Optional[str] = field(default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """Source language id for translation."""} )
__lowerCAmelCase : Optional[str] = field(default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """Target language id for translation."""} )
__lowerCAmelCase : Optional[int] = field(default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """# num_beams to use for evaluation."""} )
__lowerCAmelCase : bool = field(
default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined."""} , )
def UpperCamelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
logger.info(f"""***** {split} metrics *****""" )
for key in sorted(metrics.keys() ):
logger.info(f""" {key} = {metrics[key]}""" )
save_json(SCREAMING_SNAKE_CASE , os.path.join(SCREAMING_SNAKE_CASE , f"""{split}_results.json""" ) )
def UpperCamelCase ():
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
UpperCamelCase : List[Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
UpperCamelCase , UpperCamelCase , UpperCamelCase : List[str] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
UpperCamelCase , UpperCamelCase , UpperCamelCase : Optional[int] = parser.parse_args_into_dataclasses()
check_output_dir(SCREAMING_SNAKE_CASE )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"""Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s""" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) , training_args.fpaa , )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info("""Training/evaluation parameters %s""" , SCREAMING_SNAKE_CASE )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
UpperCamelCase : List[Any] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
UpperCamelCase : Tuple = ("""encoder_layerdrop""", """decoder_layerdrop""", """dropout""", """attention_dropout""")
for p in extra_model_params:
if getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
assert hasattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ), f"""({config.__class__.__name__}) doesn't have a `{p}` attribute"""
setattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
UpperCamelCase : Optional[Any] = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
UpperCamelCase : Any = AutoModelForSeqaSeqLM.from_pretrained(
model_args.model_name_or_path , from_tf=""".ckpt""" in model_args.model_name_or_path , config=SCREAMING_SNAKE_CASE , cache_dir=model_args.cache_dir , )
# use task specific params
use_task_specific_params(SCREAMING_SNAKE_CASE , data_args.task )
# set num_beams for evaluation
if data_args.eval_beams is None:
UpperCamelCase : Dict = model.config.num_beams
# set decoder_start_token_id for MBart
if model.config.decoder_start_token_id is None and isinstance(SCREAMING_SNAKE_CASE , (MBartTokenizer, MBartTokenizerFast) ):
assert (
data_args.tgt_lang is not None and data_args.src_lang is not None
), "mBart requires --tgt_lang and --src_lang"
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
UpperCamelCase : List[str] = tokenizer.lang_code_to_id[data_args.tgt_lang]
else:
UpperCamelCase : List[Any] = tokenizer.convert_tokens_to_ids(data_args.tgt_lang )
if model_args.freeze_embeds:
freeze_embeds(SCREAMING_SNAKE_CASE )
if model_args.freeze_encoder:
freeze_params(model.get_encoder() )
assert_all_frozen(model.get_encoder() )
UpperCamelCase : Optional[Any] = SeqaSeqDataset
# Get datasets
UpperCamelCase : List[Any] = (
dataset_class(
SCREAMING_SNAKE_CASE , type_path="""train""" , data_dir=data_args.data_dir , n_obs=data_args.n_train , max_target_length=data_args.max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or """""" , )
if training_args.do_train
else None
)
UpperCamelCase : Optional[int] = (
dataset_class(
SCREAMING_SNAKE_CASE , type_path="""val""" , data_dir=data_args.data_dir , n_obs=data_args.n_val , max_target_length=data_args.val_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or """""" , )
if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO
else None
)
UpperCamelCase : Optional[Any] = (
dataset_class(
SCREAMING_SNAKE_CASE , type_path="""test""" , data_dir=data_args.data_dir , n_obs=data_args.n_test , max_target_length=data_args.test_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or """""" , )
if training_args.do_predict
else None
)
# Initialize our Trainer
UpperCamelCase : Tuple = (
build_compute_metrics_fn(data_args.task , SCREAMING_SNAKE_CASE ) if training_args.predict_with_generate else None
)
UpperCamelCase : Optional[int] = SeqaSeqTrainer(
model=SCREAMING_SNAKE_CASE , args=SCREAMING_SNAKE_CASE , data_args=SCREAMING_SNAKE_CASE , train_dataset=SCREAMING_SNAKE_CASE , eval_dataset=SCREAMING_SNAKE_CASE , data_collator=SeqaSeqDataCollator(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , model.config.decoder_start_token_id , training_args.tpu_num_cores ) , compute_metrics=SCREAMING_SNAKE_CASE , tokenizer=SCREAMING_SNAKE_CASE , )
UpperCamelCase : Optional[Any] = {}
# Training
if training_args.do_train:
logger.info("""*** Train ***""" )
UpperCamelCase : Optional[Any] = trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
UpperCamelCase : Dict = train_result.metrics
UpperCamelCase : Optional[int] = data_args.n_train
trainer.save_model() # this also saves the tokenizer
if trainer.is_world_process_zero():
handle_metrics("""train""" , SCREAMING_SNAKE_CASE , training_args.output_dir )
all_metrics.update(SCREAMING_SNAKE_CASE )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , """trainer_state.json""" ) )
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
UpperCamelCase : List[str] = trainer.evaluate(metric_key_prefix="""val""" )
UpperCamelCase : int = data_args.n_val
UpperCamelCase : Any = round(metrics["""val_loss"""] , 4 )
if trainer.is_world_process_zero():
handle_metrics("""val""" , SCREAMING_SNAKE_CASE , training_args.output_dir )
all_metrics.update(SCREAMING_SNAKE_CASE )
if training_args.do_predict:
logger.info("""*** Predict ***""" )
UpperCamelCase : List[Any] = trainer.predict(test_dataset=SCREAMING_SNAKE_CASE , metric_key_prefix="""test""" )
UpperCamelCase : Union[str, Any] = test_output.metrics
UpperCamelCase : List[str] = data_args.n_test
if trainer.is_world_process_zero():
UpperCamelCase : Dict = round(metrics["""test_loss"""] , 4 )
handle_metrics("""test""" , SCREAMING_SNAKE_CASE , training_args.output_dir )
all_metrics.update(SCREAMING_SNAKE_CASE )
if training_args.predict_with_generate:
UpperCamelCase : str = tokenizer.batch_decode(
test_output.predictions , skip_special_tokens=SCREAMING_SNAKE_CASE , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE )
UpperCamelCase : Any = lmap(str.strip , SCREAMING_SNAKE_CASE )
write_txt_file(SCREAMING_SNAKE_CASE , os.path.join(training_args.output_dir , """test_generations.txt""" ) )
if trainer.is_world_process_zero():
save_json(SCREAMING_SNAKE_CASE , os.path.join(training_args.output_dir , """all_results.json""" ) )
return all_metrics
def UpperCamelCase (SCREAMING_SNAKE_CASE ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 102
|
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def UpperCamelCase_ ( self : Any ):
__A = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(A ,"tf_padding" ) )
self.parent.assertTrue(hasattr(A ,"depth_multiplier" ) )
class UpperCAmelCase :
'''simple docstring'''
def __init__( self : Optional[Any] ,A : int ,A : List[Any]=13 ,A : int=3 ,A : Optional[Any]=32 ,A : Union[str, Any]=0.25 ,A : Tuple=8 ,A : Optional[int]=True ,A : Union[str, Any]=10_24 ,A : Any=32 ,A : Optional[int]="relu6" ,A : int=0.1 ,A : Optional[Any]=0.02 ,A : Optional[Any]=True ,A : List[str]=True ,A : str=10 ,A : str=None ,):
__A = parent
__A = batch_size
__A = num_channels
__A = image_size
__A = depth_multiplier
__A = min_depth
__A = tf_padding
__A = int(last_hidden_size * depth_multiplier )
__A = output_stride
__A = hidden_act
__A = classifier_dropout_prob
__A = use_labels
__A = is_training
__A = num_labels
__A = initializer_range
__A = scope
def UpperCamelCase_ ( self : Optional[int] ):
__A = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__A = None
__A = None
if self.use_labels:
__A = ids_tensor([self.batch_size] ,self.num_labels )
__A = ids_tensor([self.batch_size, self.image_size, self.image_size] ,self.num_labels )
__A = self.get_config()
return config, pixel_values, labels, pixel_labels
def UpperCamelCase_ ( self : Any ):
return MobileNetVaConfig(
num_channels=self.num_channels ,image_size=self.image_size ,depth_multiplier=self.depth_multiplier ,min_depth=self.min_depth ,tf_padding=self.tf_padding ,hidden_act=self.hidden_act ,classifier_dropout_prob=self.classifier_dropout_prob ,initializer_range=self.initializer_range ,)
def UpperCamelCase_ ( self : Optional[int] ,A : str ,A : Tuple ,A : Optional[int] ,A : List[str] ):
__A = MobileNetVaModel(config=A )
model.to(A )
model.eval()
__A = model(A )
self.parent.assertEqual(
result.last_hidden_state.shape ,(
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) ,)
def UpperCamelCase_ ( self : List[Any] ,A : Union[str, Any] ,A : List[Any] ,A : int ,A : Union[str, Any] ):
__A = self.num_labels
__A = MobileNetVaForImageClassification(A )
model.to(A )
model.eval()
__A = model(A ,labels=A )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def UpperCamelCase_ ( self : Tuple ):
__A = self.prepare_config_and_inputs()
__A , __A , __A , __A = config_and_inputs
__A = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
snake_case_ = (MobileNetVaModel, MobileNetVaForImageClassification) if is_torch_available() else ()
snake_case_ = (
{"feature-extraction": MobileNetVaModel, "image-classification": MobileNetVaForImageClassification}
if is_torch_available()
else {}
)
snake_case_ = False
snake_case_ = False
snake_case_ = False
snake_case_ = False
def UpperCamelCase_ ( self : Any ):
__A = MobileNetVaModelTester(self )
__A = MobileNetVaConfigTester(self ,config_class=A ,has_text_modality=A )
def UpperCamelCase_ ( self : str ):
self.config_tester.run_common_tests()
@unittest.skip(reason="MobileNetV1 does not use inputs_embeds" )
def UpperCamelCase_ ( self : Union[str, Any] ):
pass
@unittest.skip(reason="MobileNetV1 does not support input and output embeddings" )
def UpperCamelCase_ ( self : Tuple ):
pass
@unittest.skip(reason="MobileNetV1 does not output attentions" )
def UpperCamelCase_ ( self : Any ):
pass
def UpperCamelCase_ ( self : Optional[int] ):
__A , __A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__A = model_class(A )
__A = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__A = [*signature.parameters.keys()]
__A = ["pixel_values"]
self.assertListEqual(arg_names[:1] ,A )
def UpperCamelCase_ ( self : List[Any] ):
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
def UpperCamelCase_ ( self : Optional[int] ):
def check_hidden_states_output(A : List[Any] ,A : List[Any] ,A : Optional[int] ):
__A = model_class(A )
model.to(A )
model.eval()
with torch.no_grad():
__A = model(**self._prepare_for_class(A ,A ) )
__A = outputs.hidden_states
__A = 26
self.assertEqual(len(A ) ,A )
__A , __A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__A = True
check_hidden_states_output(A ,A ,A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__A = True
check_hidden_states_output(A ,A ,A )
def UpperCamelCase_ ( self : Tuple ):
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A )
@slow
def UpperCamelCase_ ( self : Union[str, Any] ):
for model_name in MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__A = MobileNetVaModel.from_pretrained(A )
self.assertIsNotNone(A )
def UpperCAmelCase ( ) -> str:
"""simple docstring"""
__A = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCamelCase_ ( self : List[str] ):
return (
MobileNetVaImageProcessor.from_pretrained("google/mobilenet_v1_1.0_224" ) if is_vision_available() else None
)
@slow
def UpperCamelCase_ ( self : Optional[Any] ):
__A = MobileNetVaForImageClassification.from_pretrained("google/mobilenet_v1_1.0_224" ).to(A )
__A = self.default_image_processor
__A = prepare_img()
__A = image_processor(images=A ,return_tensors="pt" ).to(A )
# forward pass
with torch.no_grad():
__A = model(**A )
# verify the logits
__A = torch.Size((1, 10_01) )
self.assertEqual(outputs.logits.shape ,A )
__A = torch.tensor([-4.17_39, -1.12_33, 3.12_05] ).to(A )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,A ,atol=1E-4 ) )
| 55
| 0
|
from manim import *
class a_ ( lowerCamelCase_ ):
"""simple docstring"""
def _lowerCAmelCase ( self : Dict ):
SCREAMING_SNAKE_CASE =Rectangle(height=0.5 ,width=0.5 )
SCREAMING_SNAKE_CASE =Rectangle(height=0.46 ,width=0.46 ).set_stroke(width=0 )
SCREAMING_SNAKE_CASE =Rectangle(height=0.25 ,width=0.25 )
SCREAMING_SNAKE_CASE =[mem.copy() for i in range(6 )]
SCREAMING_SNAKE_CASE =[mem.copy() for i in range(6 )]
SCREAMING_SNAKE_CASE =VGroup(*snake_case ).arrange(snake_case ,buff=0 )
SCREAMING_SNAKE_CASE =VGroup(*snake_case ).arrange(snake_case ,buff=0 )
SCREAMING_SNAKE_CASE =VGroup(snake_case ,snake_case ).arrange(snake_case ,buff=0 )
SCREAMING_SNAKE_CASE =Text('CPU' ,font_size=24 )
SCREAMING_SNAKE_CASE =Group(snake_case ,snake_case ).arrange(snake_case ,buff=0.5 ,aligned_edge=snake_case )
cpu.move_to([-2.5, -0.5, 0] )
self.add(snake_case )
SCREAMING_SNAKE_CASE =[mem.copy() for i in range(4 )]
SCREAMING_SNAKE_CASE =VGroup(*snake_case ).arrange(snake_case ,buff=0 )
SCREAMING_SNAKE_CASE =Text('GPU' ,font_size=24 )
SCREAMING_SNAKE_CASE =Group(snake_case ,snake_case ).arrange(snake_case ,buff=0.5 ,aligned_edge=snake_case )
gpu.move_to([-1, -1, 0] )
self.add(snake_case )
SCREAMING_SNAKE_CASE =[mem.copy() for i in range(6 )]
SCREAMING_SNAKE_CASE =VGroup(*snake_case ).arrange(snake_case ,buff=0 )
SCREAMING_SNAKE_CASE =Text('Model' ,font_size=24 )
SCREAMING_SNAKE_CASE =Group(snake_case ,snake_case ).arrange(snake_case ,buff=0.5 ,aligned_edge=snake_case )
model.move_to([3, -1.0, 0] )
self.add(snake_case )
SCREAMING_SNAKE_CASE =[]
SCREAMING_SNAKE_CASE =[]
for i, rect in enumerate(snake_case ):
SCREAMING_SNAKE_CASE =fill.copy().set_fill(snake_case ,opacity=0.8 )
target.move_to(snake_case )
model_arr.append(snake_case )
SCREAMING_SNAKE_CASE =Rectangle(height=0.46 ,width=0.46 ).set_stroke(width=0.0 ).set_fill(snake_case ,opacity=0.8 )
cpu_target.move_to(cpu_left_col_base[i] )
model_cpu_arr.append(snake_case )
self.add(*snake_case ,*snake_case )
SCREAMING_SNAKE_CASE =[meta_mem.copy() for i in range(6 )]
SCREAMING_SNAKE_CASE =[meta_mem.copy() for i in range(6 )]
SCREAMING_SNAKE_CASE =VGroup(*snake_case ).arrange(snake_case ,buff=0 )
SCREAMING_SNAKE_CASE =VGroup(*snake_case ).arrange(snake_case ,buff=0 )
SCREAMING_SNAKE_CASE =VGroup(snake_case ,snake_case ).arrange(snake_case ,buff=0 )
SCREAMING_SNAKE_CASE =Text('Disk' ,font_size=24 )
SCREAMING_SNAKE_CASE =Group(snake_case ,snake_case ).arrange(snake_case ,buff=0.5 ,aligned_edge=snake_case )
disk.move_to([-4, -1.25, 0] )
self.add(snake_case ,snake_case )
SCREAMING_SNAKE_CASE =Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
SCREAMING_SNAKE_CASE =MarkupText(
f'<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model' ,font_size=18 ,)
key_text.move_to([-5, 2.4, 0] )
self.add(snake_case ,snake_case )
SCREAMING_SNAKE_CASE =MarkupText(
f'<span fgcolor=\'{BLUE}\'>●</span> Checkpoint' ,font_size=18 ,)
blue_text.next_to(snake_case ,DOWN * 2.4 ,aligned_edge=key_text.get_left() )
self.add(snake_case )
SCREAMING_SNAKE_CASE =MarkupText(
f'Now watch as an input is passed through the model\nand how the memory is utilized and handled.' ,font_size=24 ,)
step_a.move_to([2, 2, 0] )
self.play(Write(snake_case ) )
SCREAMING_SNAKE_CASE =Square(0.3 )
input.set_fill(snake_case ,opacity=1.0 )
input.set_stroke(width=0.0 )
input.next_to(model_base[0] ,snake_case ,buff=0.5 )
self.play(Write(snake_case ) )
input.generate_target()
input.target.next_to(model_arr[0] ,direction=snake_case ,buff=0.02 )
self.play(MoveToTarget(snake_case ) )
self.play(FadeOut(snake_case ) )
SCREAMING_SNAKE_CASE =Arrow(start=snake_case ,end=snake_case ,color=snake_case ,buff=0.5 )
a.next_to(model_arr[0].get_left() ,snake_case ,buff=0.2 )
model_cpu_arr[0].generate_target()
model_cpu_arr[0].target.move_to(gpu_rect[0] )
SCREAMING_SNAKE_CASE =MarkupText(
f'As the input reaches a layer, the hook triggers\nand weights are moved from the CPU\nto the GPU and back.' ,font_size=24 ,)
step_a.move_to([2, 2, 0] )
self.play(Write(snake_case ,run_time=3 ) )
SCREAMING_SNAKE_CASE ={'run_time': 1, 'fade_in': True, 'fade_out': True, 'buff': 0.02}
self.play(
Write(snake_case ) ,Circumscribe(model_arr[0] ,color=snake_case ,**snake_case ) ,Circumscribe(model_cpu_arr[0] ,color=snake_case ,**snake_case ) ,Circumscribe(gpu_rect[0] ,color=snake_case ,**snake_case ) ,)
self.play(MoveToTarget(model_cpu_arr[0] ) )
SCREAMING_SNAKE_CASE =a.copy()
for i in range(6 ):
a_c.next_to(model_arr[i].get_right() + 0.02 ,snake_case ,buff=0.2 )
input.generate_target()
input.target.move_to(model_arr[i].get_right() + 0.02 )
SCREAMING_SNAKE_CASE =AnimationGroup(
FadeOut(snake_case ,run_time=0.5 ) ,MoveToTarget(snake_case ,run_time=0.5 ) ,FadeIn(snake_case ,run_time=0.5 ) ,lag_ratio=0.2 )
self.play(snake_case )
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[i] )
if i < 5:
model_cpu_arr[i + 1].generate_target()
model_cpu_arr[i + 1].target.move_to(gpu_rect[0] )
if i >= 1:
SCREAMING_SNAKE_CASE =0.7
self.play(
Circumscribe(model_arr[i] ,**snake_case ) ,Circumscribe(cpu_left_col_base[i] ,**snake_case ) ,Circumscribe(cpu_left_col_base[i + 1] ,color=snake_case ,**snake_case ) ,Circumscribe(gpu_rect[0] ,color=snake_case ,**snake_case ) ,Circumscribe(model_arr[i + 1] ,color=snake_case ,**snake_case ) ,)
if i < 1:
self.play(
MoveToTarget(model_cpu_arr[i] ) ,MoveToTarget(model_cpu_arr[i + 1] ) ,)
else:
self.play(
MoveToTarget(model_cpu_arr[i] ,run_time=0.7 ) ,MoveToTarget(model_cpu_arr[i + 1] ,run_time=0.7 ) ,)
else:
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[-1] )
input.generate_target()
input.target.next_to(model_arr[-1].get_right() ,RIGHT + 0.02 ,buff=0.2 )
self.play(
Circumscribe(model_arr[-1] ,color=snake_case ,**snake_case ) ,Circumscribe(cpu_left_col_base[-1] ,color=snake_case ,**snake_case ) ,Circumscribe(gpu_rect[0] ,color=snake_case ,**snake_case ) ,)
self.play(MoveToTarget(model_cpu_arr[i] ) )
SCREAMING_SNAKE_CASE =a_c
SCREAMING_SNAKE_CASE =a_c.copy()
input.generate_target()
input.target.next_to(model_base[-1] ,RIGHT + 0.02 ,buff=0.5 )
self.play(
FadeOut(snake_case ) ,FadeOut(snake_case ,run_time=0.5 ) ,)
SCREAMING_SNAKE_CASE =MarkupText(f'Inference on a model too large for GPU memory\nis successfully completed.' ,font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(snake_case ,run_time=3 ) ,MoveToTarget(snake_case ) )
self.wait()
| 252
|
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import EsmForMaskedLM, EsmForSequenceClassification, EsmForTokenClassification, EsmModel
from transformers.models.esm.modeling_esm import (
ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
EsmEmbeddings,
create_position_ids_from_input_ids,
)
class a_ :
"""simple docstring"""
def __init__( self : Tuple ,snake_case : List[str] ,snake_case : List[str]=13 ,snake_case : Optional[Any]=7 ,snake_case : Union[str, Any]=False ,snake_case : str=True ,snake_case : Tuple=False ,snake_case : List[Any]=True ,snake_case : Tuple=33 ,snake_case : Dict=32 ,snake_case : str=5 ,snake_case : str=4 ,snake_case : int=37 ,snake_case : int="gelu" ,snake_case : int=0.1 ,snake_case : Dict=0.1 ,snake_case : int=512 ,snake_case : Optional[Any]=16 ,snake_case : List[Any]=2 ,snake_case : Tuple=0.02 ,snake_case : int=3 ,snake_case : Tuple=4 ,snake_case : List[str]=None ,):
SCREAMING_SNAKE_CASE =parent
SCREAMING_SNAKE_CASE =batch_size
SCREAMING_SNAKE_CASE =seq_length
SCREAMING_SNAKE_CASE =is_training
SCREAMING_SNAKE_CASE =use_input_mask
SCREAMING_SNAKE_CASE =use_token_type_ids
SCREAMING_SNAKE_CASE =use_labels
SCREAMING_SNAKE_CASE =vocab_size
SCREAMING_SNAKE_CASE =hidden_size
SCREAMING_SNAKE_CASE =num_hidden_layers
SCREAMING_SNAKE_CASE =num_attention_heads
SCREAMING_SNAKE_CASE =intermediate_size
SCREAMING_SNAKE_CASE =hidden_act
SCREAMING_SNAKE_CASE =hidden_dropout_prob
SCREAMING_SNAKE_CASE =attention_probs_dropout_prob
SCREAMING_SNAKE_CASE =max_position_embeddings
SCREAMING_SNAKE_CASE =type_vocab_size
SCREAMING_SNAKE_CASE =type_sequence_label_size
SCREAMING_SNAKE_CASE =initializer_range
SCREAMING_SNAKE_CASE =num_labels
SCREAMING_SNAKE_CASE =num_choices
SCREAMING_SNAKE_CASE =scope
def _lowerCAmelCase ( self : List[str] ):
SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
SCREAMING_SNAKE_CASE =None
if self.use_input_mask:
SCREAMING_SNAKE_CASE =random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE =None
SCREAMING_SNAKE_CASE =None
SCREAMING_SNAKE_CASE =None
if self.use_labels:
SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size] ,self.type_sequence_label_size )
SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size] ,self.num_choices )
SCREAMING_SNAKE_CASE =self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowerCAmelCase ( self : str ):
return EsmConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,pad_token_id=1 ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,initializer_range=self.initializer_range ,)
def _lowerCAmelCase ( self : Dict ,snake_case : List[str] ,snake_case : Union[str, Any] ,snake_case : Tuple ,snake_case : List[Any] ,snake_case : List[str] ,snake_case : str ):
SCREAMING_SNAKE_CASE =EsmModel(config=snake_case )
model.to(snake_case )
model.eval()
SCREAMING_SNAKE_CASE =model(snake_case ,attention_mask=snake_case )
SCREAMING_SNAKE_CASE =model(snake_case )
SCREAMING_SNAKE_CASE =model(snake_case )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape ,(self.batch_size, self.hidden_size) )
def _lowerCAmelCase ( self : List[str] ,snake_case : int ,snake_case : str ,snake_case : Tuple ,snake_case : List[str] ,snake_case : Any ,snake_case : Any ):
SCREAMING_SNAKE_CASE =EsmForMaskedLM(config=snake_case )
model.to(snake_case )
model.eval()
SCREAMING_SNAKE_CASE =model(snake_case ,attention_mask=snake_case ,labels=snake_case )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def _lowerCAmelCase ( self : Tuple ,snake_case : str ,snake_case : str ,snake_case : Optional[Any] ,snake_case : Any ,snake_case : List[Any] ,snake_case : Dict ):
SCREAMING_SNAKE_CASE =self.num_labels
SCREAMING_SNAKE_CASE =EsmForTokenClassification(config=snake_case )
model.to(snake_case )
model.eval()
SCREAMING_SNAKE_CASE =model(snake_case ,attention_mask=snake_case ,labels=snake_case )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def _lowerCAmelCase ( self : List[Any] ):
SCREAMING_SNAKE_CASE =self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) ,
) =config_and_inputs
SCREAMING_SNAKE_CASE ={'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class a_ ( lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase = False
__UpperCAmelCase = (
(
EsmForMaskedLM,
EsmModel,
EsmForSequenceClassification,
EsmForTokenClassification,
)
if is_torch_available()
else ()
)
__UpperCAmelCase = ()
__UpperCAmelCase = (
{
'feature-extraction': EsmModel,
'fill-mask': EsmForMaskedLM,
'text-classification': EsmForSequenceClassification,
'token-classification': EsmForTokenClassification,
'zero-shot': EsmForSequenceClassification,
}
if is_torch_available()
else {}
)
__UpperCAmelCase = True
def _lowerCAmelCase ( self : int ):
SCREAMING_SNAKE_CASE =EsmModelTester(self )
SCREAMING_SNAKE_CASE =ConfigTester(self ,config_class=snake_case ,hidden_size=37 )
def _lowerCAmelCase ( self : str ):
self.config_tester.run_common_tests()
def _lowerCAmelCase ( self : str ):
SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case )
def _lowerCAmelCase ( self : Optional[int] ):
SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
SCREAMING_SNAKE_CASE =type
self.model_tester.create_and_check_model(*snake_case )
def _lowerCAmelCase ( self : str ):
SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*snake_case )
def _lowerCAmelCase ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*snake_case )
@slow
def _lowerCAmelCase ( self : Any ):
for model_name in ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE =EsmModel.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
def _lowerCAmelCase ( self : List[str] ):
SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()[0]
SCREAMING_SNAKE_CASE =EsmEmbeddings(config=snake_case )
SCREAMING_SNAKE_CASE =torch.as_tensor([[12, 31, 13, model.padding_idx]] )
SCREAMING_SNAKE_CASE =torch.as_tensor(
[
[
0 + model.padding_idx + 1,
1 + model.padding_idx + 1,
2 + model.padding_idx + 1,
model.padding_idx,
]
] )
SCREAMING_SNAKE_CASE =create_position_ids_from_input_ids(snake_case ,model.padding_idx )
self.assertEqual(position_ids.shape ,expected_positions.shape )
self.assertTrue(torch.all(torch.eq(snake_case ,snake_case ) ) )
def _lowerCAmelCase ( self : int ):
SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()[0]
SCREAMING_SNAKE_CASE =EsmEmbeddings(config=snake_case )
SCREAMING_SNAKE_CASE =torch.empty(2 ,4 ,30 )
SCREAMING_SNAKE_CASE =[
0 + embeddings.padding_idx + 1,
1 + embeddings.padding_idx + 1,
2 + embeddings.padding_idx + 1,
3 + embeddings.padding_idx + 1,
]
SCREAMING_SNAKE_CASE =torch.as_tensor([expected_single_positions, expected_single_positions] )
SCREAMING_SNAKE_CASE =embeddings.create_position_ids_from_inputs_embeds(snake_case )
self.assertEqual(position_ids.shape ,expected_positions.shape )
self.assertTrue(torch.all(torch.eq(snake_case ,snake_case ) ) )
@unittest.skip('Esm does not support embedding resizing' )
def _lowerCAmelCase ( self : List[str] ):
pass
@unittest.skip('Esm does not support embedding resizing' )
def _lowerCAmelCase ( self : Dict ):
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def _lowerCAmelCase ( self : Optional[int] ):
pass
@require_torch
class a_ ( lowerCamelCase_ ):
"""simple docstring"""
@slow
def _lowerCAmelCase ( self : Optional[int] ):
with torch.no_grad():
SCREAMING_SNAKE_CASE =EsmForMaskedLM.from_pretrained('facebook/esm2_t6_8M_UR50D' )
model.eval()
SCREAMING_SNAKE_CASE =torch.tensor([[0, 1, 2, 3, 4, 5]] )
SCREAMING_SNAKE_CASE =model(snake_case )[0]
SCREAMING_SNAKE_CASE =33
SCREAMING_SNAKE_CASE =torch.Size((1, 6, vocab_size) )
self.assertEqual(output.shape ,snake_case )
SCREAMING_SNAKE_CASE =torch.tensor(
[[[8.9_215, -10.5_898, -6.4_671], [-6.3_967, -13.9_114, -1.1_212], [-7.7_812, -13.9_516, -3.7_406]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] ,snake_case ,atol=1e-4 ) )
@slow
def _lowerCAmelCase ( self : int ):
with torch.no_grad():
SCREAMING_SNAKE_CASE =EsmModel.from_pretrained('facebook/esm2_t6_8M_UR50D' )
model.eval()
SCREAMING_SNAKE_CASE =torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
SCREAMING_SNAKE_CASE =model(snake_case )[0]
# compare the actual values for a slice.
SCREAMING_SNAKE_CASE =torch.tensor(
[[[0.1_444, 0.5_413, 0.3_248], [0.3_034, 0.0_053, 0.3_108], [0.3_228, -0.2_499, 0.3_415]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] ,snake_case ,atol=1e-4 ) )
| 252
| 1
|
"""simple docstring"""
from collections import deque
class UpperCAmelCase :
def __init__( self : Dict , __lowerCamelCase : str , __lowerCamelCase : int , __lowerCamelCase : int ):
"""simple docstring"""
_snake_case = process_name # process name
_snake_case = arrival_time # arrival time of the process
# completion time of finished process or last interrupted time
_snake_case = arrival_time
_snake_case = burst_time # remaining burst time
_snake_case = 0 # total time of the process wait in ready queue
_snake_case = 0 # time from arrival time to completion time
class UpperCAmelCase :
def __init__( self : Dict , __lowerCamelCase : int , __lowerCamelCase : list[int] , __lowerCamelCase : deque[Process] , __lowerCamelCase : int , ):
"""simple docstring"""
# total number of mlfq's queues
_snake_case = number_of_queues
# time slice of queues that round robin algorithm applied
_snake_case = time_slices
# unfinished process is in this ready_queue
_snake_case = queue
# current time
_snake_case = current_time
# finished process is in this sequence queue
_snake_case = deque()
def __UpperCAmelCase ( self : Dict ):
"""simple docstring"""
_snake_case = []
for i in range(len(self.finish_queue ) ):
sequence.append(self.finish_queue[i].process_name )
return sequence
def __UpperCAmelCase ( self : List[str] , __lowerCamelCase : list[Process] ):
"""simple docstring"""
_snake_case = []
for i in range(len(__lowerCamelCase ) ):
waiting_times.append(queue[i].waiting_time )
return waiting_times
def __UpperCAmelCase ( self : Optional[int] , __lowerCamelCase : list[Process] ):
"""simple docstring"""
_snake_case = []
for i in range(len(__lowerCamelCase ) ):
turnaround_times.append(queue[i].turnaround_time )
return turnaround_times
def __UpperCAmelCase ( self : Optional[int] , __lowerCamelCase : list[Process] ):
"""simple docstring"""
_snake_case = []
for i in range(len(__lowerCamelCase ) ):
completion_times.append(queue[i].stop_time )
return completion_times
def __UpperCAmelCase ( self : List[str] , __lowerCamelCase : deque[Process] ):
"""simple docstring"""
return [q.burst_time for q in queue]
def __UpperCAmelCase ( self : List[Any] , __lowerCamelCase : Process ):
"""simple docstring"""
process.waiting_time += self.current_time - process.stop_time
return process.waiting_time
def __UpperCAmelCase ( self : List[str] , __lowerCamelCase : deque[Process] ):
"""simple docstring"""
_snake_case = deque() # sequence deque of finished process
while len(__lowerCamelCase ) != 0:
_snake_case = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of current process
self.update_waiting_time(__lowerCamelCase )
# update current time
self.current_time += cp.burst_time
# finish the process and set the process's burst-time 0
_snake_case = 0
# set the process's turnaround time because it is finished
_snake_case = self.current_time - cp.arrival_time
# set the completion time
_snake_case = self.current_time
# add the process to queue that has finished queue
finished.append(__lowerCamelCase )
self.finish_queue.extend(__lowerCamelCase ) # add finished process to finish queue
# FCFS will finish all remaining processes
return finished
def __UpperCAmelCase ( self : Union[str, Any] , __lowerCamelCase : deque[Process] , __lowerCamelCase : int ):
"""simple docstring"""
_snake_case = deque() # sequence deque of terminated process
# just for 1 cycle and unfinished processes will go back to queue
for _ in range(len(__lowerCamelCase ) ):
_snake_case = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of unfinished processes
self.update_waiting_time(__lowerCamelCase )
# if the burst time of process is bigger than time-slice
if cp.burst_time > time_slice:
# use CPU for only time-slice
self.current_time += time_slice
# update remaining burst time
cp.burst_time -= time_slice
# update end point time
_snake_case = self.current_time
# locate the process behind the queue because it is not finished
ready_queue.append(__lowerCamelCase )
else:
# use CPU for remaining burst time
self.current_time += cp.burst_time
# set burst time 0 because the process is finished
_snake_case = 0
# set the finish time
_snake_case = self.current_time
# update the process' turnaround time because it is finished
_snake_case = self.current_time - cp.arrival_time
# add the process to queue that has finished queue
finished.append(__lowerCamelCase )
self.finish_queue.extend(__lowerCamelCase ) # add finished process to finish queue
# return finished processes queue and remaining processes queue
return finished, ready_queue
def __UpperCAmelCase ( self : List[str] ):
"""simple docstring"""
# all queues except last one have round_robin algorithm
for i in range(self.number_of_queues - 1 ):
_snake_case , _snake_case = self.round_robin(
self.ready_queue , self.time_slices[i] )
# the last queue has first_come_first_served algorithm
self.first_come_first_served(self.ready_queue )
return self.finish_queue
if __name__ == "__main__":
import doctest
snake_case = Process('''P1''', 0, 5_3)
snake_case = Process('''P2''', 0, 1_7)
snake_case = Process('''P3''', 0, 6_8)
snake_case = Process('''P4''', 0, 2_4)
snake_case = 3
snake_case = [1_7, 2_5]
snake_case = deque([Pa, Pa, Pa, Pa])
if len(time_slices) != number_of_queues - 1:
raise SystemExit(0)
doctest.testmod(extraglobs={'''queue''': deque([Pa, Pa, Pa, Pa])})
snake_case = Process('''P1''', 0, 5_3)
snake_case = Process('''P2''', 0, 1_7)
snake_case = Process('''P3''', 0, 6_8)
snake_case = Process('''P4''', 0, 2_4)
snake_case = 3
snake_case = [1_7, 2_5]
snake_case = deque([Pa, Pa, Pa, Pa])
snake_case = MLFQ(number_of_queues, time_slices, queue, 0)
snake_case = mlfq.multi_level_feedback_queue()
# print total waiting times of processes(P1, P2, P3, P4)
print(
F"waiting time:\
\t\t\t{MLFQ.calculate_waiting_time(mlfq, [Pa, Pa, Pa, Pa])}"
)
# print completion times of processes(P1, P2, P3, P4)
print(
F"completion time:\
\t\t{MLFQ.calculate_completion_time(mlfq, [Pa, Pa, Pa, Pa])}"
)
# print total turnaround times of processes(P1, P2, P3, P4)
print(
F"turnaround time:\
\t\t{MLFQ.calculate_turnaround_time(mlfq, [Pa, Pa, Pa, Pa])}"
)
# print sequence of finished processes
print(
F"sequence of finished processes:\
{mlfq.calculate_sequence_of_finish_queue()}"
)
| 103
|
import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
__snake_case :Optional[Any] =logging.get_logger(__name__)
__snake_case :int =list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
__snake_case :Optional[int] =tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class lowerCAmelCase__ :
A_ : str = field(
default=_lowerCamelCase , metadata={'help': 'Model type selected in the list: ' + ', '.join(_lowerCamelCase )} )
A_ : str = field(
default=_lowerCamelCase , metadata={'help': 'The input data dir. Should contain the .json files for the SQuAD task.'} )
A_ : int = field(
default=1_2_8 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
A_ : int = field(
default=1_2_8 , metadata={'help': 'When splitting up a long document into chunks, how much stride to take between chunks.'} , )
A_ : int = field(
default=6_4 , metadata={
'help': (
'The maximum number of tokens for the question. Questions longer than this will '
'be truncated to this length.'
)
} , )
A_ : int = field(
default=3_0 , metadata={
'help': (
'The maximum length of an answer that can be generated. This is needed because the start '
'and end predictions are not conditioned on one another.'
)
} , )
A_ : bool = field(
default=_lowerCamelCase , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
A_ : bool = field(
default=_lowerCamelCase , metadata={'help': 'If true, the SQuAD examples contain some that do not have an answer.'} )
A_ : float = field(
default=0.0 , metadata={'help': 'If null_score - best_non_null is greater than the threshold predict null.'} )
A_ : int = field(
default=2_0 , metadata={'help': 'If null_score - best_non_null is greater than the threshold predict null.'} )
A_ : int = field(
default=0 , metadata={
'help': (
'language id of input for language-specific xlm models (see'
' tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)'
)
} , )
A_ : int = field(default=1 , metadata={'help': 'multiple threads for converting example to features'} )
class lowerCAmelCase__ ( _lowerCamelCase ):
A_ : str = 'train'
A_ : str = 'dev'
class lowerCAmelCase__ ( _lowerCamelCase ):
A_ : SquadDataTrainingArguments
A_ : List[SquadFeatures]
A_ : Split
A_ : bool
def __init__( self : Optional[int] , __UpperCamelCase : SquadDataTrainingArguments , __UpperCamelCase : PreTrainedTokenizer , __UpperCamelCase : Optional[int] = None , __UpperCamelCase : Union[str, Split] = Split.train , __UpperCamelCase : Optional[bool] = False , __UpperCamelCase : Optional[str] = None , __UpperCamelCase : Optional[str] = "pt" , ) -> Any:
A = args
A = is_language_sensitive
A = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor()
if isinstance(__UpperCamelCase , __UpperCamelCase ):
try:
A = Split[mode]
except KeyError:
raise KeyError('mode is not a valid split name' )
A = mode
# Load data features from cache or dataset file
A = 'v2' if args.version_2_with_negative else 'v1'
A = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , f'''cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}''' , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
A = cached_features_file + '.lock'
with FileLock(__UpperCamelCase ):
if os.path.exists(__UpperCamelCase ) and not args.overwrite_cache:
A = time.time()
A = torch.load(__UpperCamelCase )
# Legacy cache files have only features, while new cache files
# will have dataset and examples also.
A = self.old_features['features']
A = self.old_features.get('dataset' , __UpperCamelCase )
A = self.old_features.get('examples' , __UpperCamelCase )
logger.info(
f'''Loading features from cached file {cached_features_file} [took %.3f s]''' , time.time() - start )
if self.dataset is None or self.examples is None:
logger.warning(
f'''Deleting cached file {cached_features_file} will allow dataset and examples to be cached in'''
' future run' )
else:
if mode == Split.dev:
A = self.processor.get_dev_examples(args.data_dir )
else:
A = self.processor.get_train_examples(args.data_dir )
A , A = squad_convert_examples_to_features(
examples=self.examples , tokenizer=__UpperCamelCase , max_seq_length=args.max_seq_length , doc_stride=args.doc_stride , max_query_length=args.max_query_length , is_training=mode == Split.train , threads=args.threads , return_dataset=__UpperCamelCase , )
A = time.time()
torch.save(
{'features': self.features, 'dataset': self.dataset, 'examples': self.examples} , __UpperCamelCase , )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f'''Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]''' )
def __len__( self : Optional[Any] ) -> Tuple:
return len(self.features )
def __getitem__( self : Tuple , __UpperCamelCase : List[Any] ) -> Dict[str, torch.Tensor]:
# Convert to Tensors and build dataset
A = self.features[i]
A = torch.tensor(feature.input_ids , dtype=torch.long )
A = torch.tensor(feature.attention_mask , dtype=torch.long )
A = torch.tensor(feature.token_type_ids , dtype=torch.long )
A = torch.tensor(feature.cls_index , dtype=torch.long )
A = torch.tensor(feature.p_mask , dtype=torch.float )
A = torch.tensor(feature.is_impossible , dtype=torch.float )
A = {
'input_ids': input_ids,
'attention_mask': attention_mask,
'token_type_ids': token_type_ids,
}
if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
if self.args.model_type in ["xlnet", "xlm"]:
inputs.update({'cls_index': cls_index, 'p_mask': p_mask} )
if self.args.version_2_with_negative:
inputs.update({'is_impossible': is_impossible} )
if self.is_language_sensitive:
inputs.update({'langs': (torch.ones(input_ids.shape , dtype=torch.intaa ) * self.args.lang_id)} )
if self.mode == Split.train:
A = torch.tensor(feature.start_position , dtype=torch.long )
A = torch.tensor(feature.end_position , dtype=torch.long )
inputs.update({'start_positions': start_positions, 'end_positions': end_positions} )
return inputs
| 106
| 0
|
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import SegformerImageProcessor, SwinConfig, UperNetConfig, UperNetForSemanticSegmentation
def _SCREAMING_SNAKE_CASE ( lowercase : Dict ):
'''simple docstring'''
lowerCamelCase_ = 3_84
lowerCamelCase_ = 7
if "tiny" in model_name:
lowerCamelCase_ = 96
lowerCamelCase_ = (2, 2, 6, 2)
lowerCamelCase_ = (3, 6, 12, 24)
elif "small" in model_name:
lowerCamelCase_ = 96
lowerCamelCase_ = (2, 2, 18, 2)
lowerCamelCase_ = (3, 6, 12, 24)
elif "base" in model_name:
lowerCamelCase_ = 1_28
lowerCamelCase_ = (2, 2, 18, 2)
lowerCamelCase_ = (4, 8, 16, 32)
lowerCamelCase_ = 12
lowerCamelCase_ = 5_12
elif "large" in model_name:
lowerCamelCase_ = 1_92
lowerCamelCase_ = (2, 2, 18, 2)
lowerCamelCase_ = (6, 12, 24, 48)
lowerCamelCase_ = 12
lowerCamelCase_ = 7_68
# set label information
lowerCamelCase_ = 1_50
lowerCamelCase_ = 'huggingface/label-files'
lowerCamelCase_ = 'ade20k-id2label.json'
lowerCamelCase_ = json.load(open(hf_hub_download(lowercase , lowercase , repo_type='dataset' ) , 'r' ) )
lowerCamelCase_ = {int(lowercase ): v for k, v in idalabel.items()}
lowerCamelCase_ = {v: k for k, v in idalabel.items()}
lowerCamelCase_ = SwinConfig(
embed_dim=lowercase , depths=lowercase , num_heads=lowercase , window_size=lowercase , out_features=['stage1', 'stage2', 'stage3', 'stage4'] , )
lowerCamelCase_ = UperNetConfig(
backbone_config=lowercase , auxiliary_in_channels=lowercase , num_labels=lowercase , idalabel=lowercase , labelaid=lowercase , )
return config
def _SCREAMING_SNAKE_CASE ( lowercase : str ):
'''simple docstring'''
lowerCamelCase_ = []
# fmt: off
# stem
rename_keys.append(('backbone.patch_embed.projection.weight', 'backbone.embeddings.patch_embeddings.projection.weight') )
rename_keys.append(('backbone.patch_embed.projection.bias', 'backbone.embeddings.patch_embeddings.projection.bias') )
rename_keys.append(('backbone.patch_embed.norm.weight', 'backbone.embeddings.norm.weight') )
rename_keys.append(('backbone.patch_embed.norm.bias', 'backbone.embeddings.norm.bias') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.norm1.weight""", f"""backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.weight""") )
rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.norm1.bias""", f"""backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.bias""") )
rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_bias_table""", f"""backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table""") )
rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_index""", f"""backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index""") )
rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.weight""", f"""backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight""") )
rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.bias""", f"""backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias""") )
rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.norm2.weight""", f"""backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.weight""") )
rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.norm2.bias""", f"""backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.bias""") )
rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.weight""", f"""backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight""") )
rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.bias""", f"""backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias""") )
rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.ffn.layers.1.weight""", f"""backbone.encoder.layers.{i}.blocks.{j}.output.dense.weight""") )
rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.ffn.layers.1.bias""", f"""backbone.encoder.layers.{i}.blocks.{j}.output.dense.bias""") )
if i < 3:
rename_keys.append((f"""backbone.stages.{i}.downsample.reduction.weight""", f"""backbone.encoder.layers.{i}.downsample.reduction.weight""") )
rename_keys.append((f"""backbone.stages.{i}.downsample.norm.weight""", f"""backbone.encoder.layers.{i}.downsample.norm.weight""") )
rename_keys.append((f"""backbone.stages.{i}.downsample.norm.bias""", f"""backbone.encoder.layers.{i}.downsample.norm.bias""") )
rename_keys.append((f"""backbone.norm{i}.weight""", f"""backbone.hidden_states_norms.stage{i+1}.weight""") )
rename_keys.append((f"""backbone.norm{i}.bias""", f"""backbone.hidden_states_norms.stage{i+1}.bias""") )
# decode head
rename_keys.extend(
[
('decode_head.conv_seg.weight', 'decode_head.classifier.weight'),
('decode_head.conv_seg.bias', 'decode_head.classifier.bias'),
('auxiliary_head.conv_seg.weight', 'auxiliary_head.classifier.weight'),
('auxiliary_head.conv_seg.bias', 'auxiliary_head.classifier.bias'),
] )
# fmt: on
return rename_keys
def _SCREAMING_SNAKE_CASE ( lowercase : Tuple , lowercase : Union[str, Any] , lowercase : Tuple ):
'''simple docstring'''
lowerCamelCase_ = dct.pop(lowercase )
lowerCamelCase_ = val
def _SCREAMING_SNAKE_CASE ( lowercase : List[Any] , lowercase : Dict ):
'''simple docstring'''
lowerCamelCase_ = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
lowerCamelCase_ = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
lowerCamelCase_ = state_dict.pop(f"""backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.weight""" )
lowerCamelCase_ = state_dict.pop(f"""backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
lowerCamelCase_ = in_proj_weight[:dim, :]
lowerCamelCase_ = in_proj_bias[: dim]
lowerCamelCase_ = in_proj_weight[
dim : dim * 2, :
]
lowerCamelCase_ = in_proj_bias[
dim : dim * 2
]
lowerCamelCase_ = in_proj_weight[
-dim :, :
]
lowerCamelCase_ = in_proj_bias[-dim :]
# fmt: on
def _SCREAMING_SNAKE_CASE ( lowercase : List[Any] ):
'''simple docstring'''
lowerCamelCase_ , lowerCamelCase_ = x.shape
lowerCamelCase_ = x.reshape(lowercase , 4 , in_channel // 4 )
lowerCamelCase_ = x[:, [0, 2, 1, 3], :].transpose(1 , 2 ).reshape(lowercase , lowercase )
return x
def _SCREAMING_SNAKE_CASE ( lowercase : Tuple ):
'''simple docstring'''
lowerCamelCase_ , lowerCamelCase_ = x.shape
lowerCamelCase_ = x.reshape(lowercase , in_channel // 4 , 4 )
lowerCamelCase_ = x[:, :, [0, 2, 1, 3]].transpose(1 , 2 ).reshape(lowercase , lowercase )
return x
def _SCREAMING_SNAKE_CASE ( lowercase : Union[str, Any] ):
'''simple docstring'''
lowerCamelCase_ = x.shape[0]
lowerCamelCase_ = x.reshape(4 , in_channel // 4 )
lowerCamelCase_ = x[[0, 2, 1, 3], :].transpose(0 , 1 ).reshape(lowercase )
return x
def _SCREAMING_SNAKE_CASE ( lowercase : Tuple ):
'''simple docstring'''
lowerCamelCase_ = x.shape[0]
lowerCamelCase_ = x.reshape(in_channel // 4 , 4 )
lowerCamelCase_ = x[:, [0, 2, 1, 3]].transpose(0 , 1 ).reshape(lowercase )
return x
def _SCREAMING_SNAKE_CASE ( lowercase : List[str] , lowercase : Union[str, Any] , lowercase : Dict ):
'''simple docstring'''
lowerCamelCase_ = {
'upernet-swin-tiny': 'https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210531_112542-e380ad3e.pth',
'upernet-swin-small': 'https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210526_192015-ee2fff1c.pth',
'upernet-swin-base': 'https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K_20210531_125459-429057bf.pth',
'upernet-swin-large': 'https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k_20220318_091743-9ba68901.pth',
}
lowerCamelCase_ = model_name_to_url[model_name]
lowerCamelCase_ = torch.hub.load_state_dict_from_url(lowercase , map_location='cpu' , file_name=lowercase )[
'state_dict'
]
for name, param in state_dict.items():
print(lowercase , param.shape )
lowerCamelCase_ = get_upernet_config(lowercase )
lowerCamelCase_ = UperNetForSemanticSegmentation(lowercase )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
lowerCamelCase_ = state_dict.pop(lowercase )
if "bn" in key:
lowerCamelCase_ = key.replace('bn' , 'batch_norm' )
lowerCamelCase_ = val
# rename keys
lowerCamelCase_ = create_rename_keys(lowercase )
for src, dest in rename_keys:
rename_key(lowercase , lowercase , lowercase )
read_in_q_k_v(lowercase , config.backbone_config )
# fix downsample parameters
for key, value in state_dict.items():
if "downsample" in key:
if "reduction" in key:
lowerCamelCase_ = reverse_correct_unfold_reduction_order(lowercase )
if "norm" in key:
lowerCamelCase_ = reverse_correct_unfold_norm_order(lowercase )
model.load_state_dict(lowercase )
# verify on image
lowerCamelCase_ = 'https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg'
lowerCamelCase_ = Image.open(requests.get(lowercase , stream=lowercase ).raw ).convert('RGB' )
lowerCamelCase_ = SegformerImageProcessor()
lowerCamelCase_ = processor(lowercase , return_tensors='pt' ).pixel_values
with torch.no_grad():
lowerCamelCase_ = model(lowercase )
lowerCamelCase_ = outputs.logits
print(logits.shape )
print('First values of logits:' , logits[0, 0, :3, :3] )
# assert values
if model_name == "upernet-swin-tiny":
lowerCamelCase_ = torch.tensor(
[[-7.5958, -7.5958, -7.4302], [-7.5958, -7.5958, -7.4302], [-7.4797, -7.4797, -7.3068]] )
elif model_name == "upernet-swin-small":
lowerCamelCase_ = torch.tensor(
[[-7.1921, -7.1921, -6.9532], [-7.1921, -7.1921, -6.9532], [-7.0908, -7.0908, -6.8534]] )
elif model_name == "upernet-swin-base":
lowerCamelCase_ = torch.tensor(
[[-6.5851, -6.5851, -6.4330], [-6.5851, -6.5851, -6.4330], [-6.4763, -6.4763, -6.3254]] )
elif model_name == "upernet-swin-large":
lowerCamelCase_ = torch.tensor(
[[-7.5297, -7.5297, -7.3802], [-7.5297, -7.5297, -7.3802], [-7.4044, -7.4044, -7.2586]] )
print('Logits:' , outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3] , lowercase , atol=1e-4 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
print(f"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowercase )
print(f"""Saving processor to {pytorch_dump_folder_path}""" )
processor.save_pretrained(lowercase )
if push_to_hub:
print(f"""Pushing model and processor for {model_name} to hub""" )
model.push_to_hub(f"""openmmlab/{model_name}""" )
processor.push_to_hub(f"""openmmlab/{model_name}""" )
if __name__ == "__main__":
lowerCamelCase : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="upernet-swin-tiny",
type=str,
choices=[F"""upernet-swin-{size}""" for size in ["tiny", "small", "base", "large"]],
help="Name of the Swin + UperNet model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
lowerCamelCase : Optional[Any] = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 651
|
import cva
import numpy as np
class A:
'''simple docstring'''
def __init__( self : int , A_ : float , A_ : int ) -> List[Any]:
"""simple docstring"""
if k in (0.04, 0.06):
lowerCamelCase_ = k
lowerCamelCase_ = window_size
else:
raise ValueError('invalid k value' )
def __str__( self : str ) -> str:
"""simple docstring"""
return str(self.k )
def a__ ( self : Any , A_ : str ) -> tuple[cva.Mat, list[list[int]]]:
"""simple docstring"""
lowerCamelCase_ = cva.imread(A_ , 0 )
lowerCamelCase_ , lowerCamelCase_ = img.shape
lowerCamelCase_ = []
lowerCamelCase_ = img.copy()
lowerCamelCase_ = cva.cvtColor(A_ , cva.COLOR_GRAY2RGB )
lowerCamelCase_ , lowerCamelCase_ = np.gradient(A_ )
lowerCamelCase_ = dx**2
lowerCamelCase_ = dy**2
lowerCamelCase_ = dx * dy
lowerCamelCase_ = 0.04
lowerCamelCase_ = self.window_size // 2
for y in range(A_ , h - offset ):
for x in range(A_ , w - offset ):
lowerCamelCase_ = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
lowerCamelCase_ = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
lowerCamelCase_ = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
lowerCamelCase_ = (wxx * wyy) - (wxy**2)
lowerCamelCase_ = wxx + wyy
lowerCamelCase_ = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r] )
color_img.itemset((y, x, 0) , 0 )
color_img.itemset((y, x, 1) , 0 )
color_img.itemset((y, x, 2) , 255 )
return color_img, corner_list
if __name__ == "__main__":
lowerCamelCase : Optional[int] = HarrisCorner(0.04, 3)
lowerCamelCase , lowerCamelCase : Optional[int] = edge_detect.detect("path_to_image")
cva.imwrite("detect.png", color_img)
| 651
| 1
|
'''simple docstring'''
import numpy as np
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
from tensorflow.keras.layers import LSTM, Dense
from tensorflow.keras.models import Sequential
if __name__ == "__main__":
__lowerCamelCase = pd.read_csv('''sample_data.csv''', header=None)
__lowerCamelCase = df.shape[:1][0]
# If you're using some other dataset input the target column
__lowerCamelCase = df.iloc[:, 1:2]
__lowerCamelCase = actual_data.values.reshape(len_data, 1)
__lowerCamelCase = MinMaxScaler().fit_transform(actual_data)
__lowerCamelCase = 10
__lowerCamelCase = 5
__lowerCamelCase = 20
__lowerCamelCase = len_data - periods * look_back
__lowerCamelCase = actual_data[:division]
__lowerCamelCase = actual_data[division - look_back :]
__lowerCamelCase , __lowerCamelCase = [], []
__lowerCamelCase , __lowerCamelCase = [], []
for i in range(0, len(train_data) - forward_days - look_back + 1):
train_x.append(train_data[i : i + look_back])
train_y.append(train_data[i + look_back : i + look_back + forward_days])
for i in range(0, len(test_data) - forward_days - look_back + 1):
test_x.append(test_data[i : i + look_back])
test_y.append(test_data[i + look_back : i + look_back + forward_days])
__lowerCamelCase = np.array(train_x)
__lowerCamelCase = np.array(test_x)
__lowerCamelCase = np.array([list(i.ravel()) for i in train_y])
__lowerCamelCase = np.array([list(i.ravel()) for i in test_y])
__lowerCamelCase = Sequential()
model.add(LSTM(128, input_shape=(look_back, 1), return_sequences=True))
model.add(LSTM(64, input_shape=(128, 1)))
model.add(Dense(forward_days))
model.compile(loss='''mean_squared_error''', optimizer='''adam''')
__lowerCamelCase = model.fit(
x_train, y_train, epochs=150, verbose=1, shuffle=True, batch_size=4
)
__lowerCamelCase = model.predict(x_test)
| 288
|
'''simple docstring'''
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = {
'''Salesforce/codegen-350M-nl''': '''https://huggingface.co/Salesforce/codegen-350M-nl/resolve/main/config.json''',
'''Salesforce/codegen-350M-multi''': '''https://huggingface.co/Salesforce/codegen-350M-multi/resolve/main/config.json''',
'''Salesforce/codegen-350M-mono''': '''https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/config.json''',
'''Salesforce/codegen-2B-nl''': '''https://huggingface.co/Salesforce/codegen-2B-nl/resolve/main/config.json''',
'''Salesforce/codegen-2B-multi''': '''https://huggingface.co/Salesforce/codegen-2B-multi/resolve/main/config.json''',
'''Salesforce/codegen-2B-mono''': '''https://huggingface.co/Salesforce/codegen-2B-mono/resolve/main/config.json''',
'''Salesforce/codegen-6B-nl''': '''https://huggingface.co/Salesforce/codegen-6B-nl/resolve/main/config.json''',
'''Salesforce/codegen-6B-multi''': '''https://huggingface.co/Salesforce/codegen-6B-multi/resolve/main/config.json''',
'''Salesforce/codegen-6B-mono''': '''https://huggingface.co/Salesforce/codegen-6B-mono/resolve/main/config.json''',
'''Salesforce/codegen-16B-nl''': '''https://huggingface.co/Salesforce/codegen-16B-nl/resolve/main/config.json''',
'''Salesforce/codegen-16B-multi''': '''https://huggingface.co/Salesforce/codegen-16B-multi/resolve/main/config.json''',
'''Salesforce/codegen-16B-mono''': '''https://huggingface.co/Salesforce/codegen-16B-mono/resolve/main/config.json''',
}
class A__ ( _snake_case ):
lowercase = "codegen"
lowercase = {
"max_position_embeddings": "n_positions",
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self , UpperCamelCase__=50400 , UpperCamelCase__=2048 , UpperCamelCase__=2048 , UpperCamelCase__=4096 , UpperCamelCase__=28 , UpperCamelCase__=16 , UpperCamelCase__=64 , UpperCamelCase__=None , UpperCamelCase__="gelu_new" , UpperCamelCase__=0.0 , UpperCamelCase__=0.0 , UpperCamelCase__=0.0 , UpperCamelCase__=1e-5 , UpperCamelCase__=0.02 , UpperCamelCase__=True , UpperCamelCase__=50256 , UpperCamelCase__=50256 , UpperCamelCase__=False , **UpperCamelCase__ , ) -> Tuple:
'''simple docstring'''
A_ = vocab_size
A_ = n_ctx
A_ = n_positions
A_ = n_embd
A_ = n_layer
A_ = n_head
A_ = n_inner
A_ = rotary_dim
A_ = activation_function
A_ = resid_pdrop
A_ = embd_pdrop
A_ = attn_pdrop
A_ = layer_norm_epsilon
A_ = initializer_range
A_ = use_cache
A_ = bos_token_id
A_ = eos_token_id
super().__init__(
bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , tie_word_embeddings=UpperCamelCase__ , **UpperCamelCase__ )
class A__ ( _snake_case ):
def __init__( self , UpperCamelCase__ , UpperCamelCase__ = "default" , UpperCamelCase__ = None , UpperCamelCase__ = False , ) -> Tuple:
'''simple docstring'''
super().__init__(UpperCamelCase__ , task=UpperCamelCase__ , patching_specs=UpperCamelCase__ , use_past=UpperCamelCase__ )
if not getattr(self._config , """pad_token_id""" , UpperCamelCase__ ):
# TODO: how to do that better?
A_ = 0
@property
def snake_case_ ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
A_ = OrderedDict({"""input_ids""": {0: """batch""", 1: """sequence"""}} )
if self.use_past:
self.fill_with_past_key_values_(UpperCamelCase__ , direction="""inputs""" )
A_ = {0: """batch""", 1: """past_sequence + sequence"""}
else:
A_ = {0: """batch""", 1: """sequence"""}
return common_inputs
@property
def snake_case_ ( self ) -> int:
'''simple docstring'''
return self._config.n_layer
@property
def snake_case_ ( self ) -> int:
'''simple docstring'''
return self._config.n_head
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ = -1 , UpperCamelCase__ = -1 , UpperCamelCase__ = False , UpperCamelCase__ = None , ) -> Mapping[str, Any]:
'''simple docstring'''
A_ = super(UpperCamelCase__ , self ).generate_dummy_inputs(
UpperCamelCase__ , batch_size=UpperCamelCase__ , seq_length=UpperCamelCase__ , is_pair=UpperCamelCase__ , framework=UpperCamelCase__ )
# We need to order the input in the way they appears in the forward()
A_ = OrderedDict({"""input_ids""": common_inputs["""input_ids"""]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
A_ , A_ = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
A_ = seqlen + 2
A_ = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
A_ = [
(torch.zeros(UpperCamelCase__ ), torch.zeros(UpperCamelCase__ )) for _ in range(self.num_layers )
]
A_ = common_inputs["""attention_mask"""]
if self.use_past:
A_ = ordered_inputs["""attention_mask"""].dtype
A_ = torch.cat(
[ordered_inputs["""attention_mask"""], torch.ones(UpperCamelCase__ , UpperCamelCase__ , dtype=UpperCamelCase__ )] , dim=1 )
return ordered_inputs
@property
def snake_case_ ( self ) -> int:
'''simple docstring'''
return 13
| 288
| 1
|
def lowerCAmelCase__ ( lowerCamelCase_ : int = 10**9):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = 1
lowerCAmelCase__ : List[str] = 2
lowerCAmelCase__ : Tuple = 0
lowerCAmelCase__ : int = 0
lowerCAmelCase__ : Dict = 0
while perimeter <= max_perimeter:
perimeters_sum += perimeter
prev_value += 2 * value
value += prev_value
lowerCAmelCase__ : Dict = 2 * value + 2 if i % 2 == 0 else 2 * value - 2
i += 1
return perimeters_sum
if __name__ == "__main__":
print(f"""{solution() = }""")
| 720
|
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case : List[Any] =logging.get_logger(__name__)
__snake_case : str ={
'RUCAIBox/mvp': 'https://huggingface.co/RUCAIBox/mvp/resolve/main/config.json',
}
class lowerCamelCase__ ( lowerCamelCase__):
'''simple docstring'''
snake_case_ ="""mvp"""
snake_case_ =["""past_key_values"""]
snake_case_ ={"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__(self ,__lowerCamelCase=5_02_67 ,__lowerCamelCase=10_24 ,__lowerCamelCase=12 ,__lowerCamelCase=40_96 ,__lowerCamelCase=16 ,__lowerCamelCase=12 ,__lowerCamelCase=40_96 ,__lowerCamelCase=16 ,__lowerCamelCase=0.0 ,__lowerCamelCase=0.0 ,__lowerCamelCase="gelu" ,__lowerCamelCase=10_24 ,__lowerCamelCase=0.1 ,__lowerCamelCase=0.0 ,__lowerCamelCase=0.0 ,__lowerCamelCase=0.02 ,__lowerCamelCase=0.0 ,__lowerCamelCase=False ,__lowerCamelCase=True ,__lowerCamelCase=1 ,__lowerCamelCase=0 ,__lowerCamelCase=2 ,__lowerCamelCase=True ,__lowerCamelCase=2 ,__lowerCamelCase=2 ,__lowerCamelCase=False ,__lowerCamelCase=1_00 ,__lowerCamelCase=8_00 ,**__lowerCamelCase ,) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = vocab_size
lowerCAmelCase__ : Dict = max_position_embeddings
lowerCAmelCase__ : Optional[int] = d_model
lowerCAmelCase__ : Any = encoder_ffn_dim
lowerCAmelCase__ : str = encoder_layers
lowerCAmelCase__ : Union[str, Any] = encoder_attention_heads
lowerCAmelCase__ : Dict = decoder_ffn_dim
lowerCAmelCase__ : Optional[int] = decoder_layers
lowerCAmelCase__ : int = decoder_attention_heads
lowerCAmelCase__ : str = dropout
lowerCAmelCase__ : int = attention_dropout
lowerCAmelCase__ : Optional[Any] = activation_dropout
lowerCAmelCase__ : Optional[int] = activation_function
lowerCAmelCase__ : int = init_std
lowerCAmelCase__ : List[Any] = encoder_layerdrop
lowerCAmelCase__ : Union[str, Any] = decoder_layerdrop
lowerCAmelCase__ : Tuple = classifier_dropout
lowerCAmelCase__ : Optional[int] = use_cache
lowerCAmelCase__ : List[Any] = encoder_layers
lowerCAmelCase__ : List[str] = scale_embedding # scale factor will be sqrt(d_model) if True
lowerCAmelCase__ : Any = use_prompt
lowerCAmelCase__ : Optional[Any] = prompt_length
lowerCAmelCase__ : int = prompt_mid_dim
super().__init__(
pad_token_id=__lowerCamelCase ,bos_token_id=__lowerCamelCase ,eos_token_id=__lowerCamelCase ,is_encoder_decoder=__lowerCamelCase ,decoder_start_token_id=__lowerCamelCase ,forced_eos_token_id=__lowerCamelCase ,**__lowerCamelCase ,)
if self.forced_bos_token_id is None and kwargs.get('''force_bos_token_to_be_generated''' ,__lowerCamelCase ):
lowerCAmelCase__ : Union[str, Any] = self.bos_token_id
warnings.warn(
f"""Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. """
'''The config can simply be saved and uploaded again to be fixed.''' )
| 90
| 0
|
from sklearn.metrics import matthews_corrcoef
import datasets
lowerCamelCase : Optional[Any] = "\nCompute the Matthews correlation coefficient (MCC)\n\nThe Matthews correlation coefficient is used in machine learning as a\nmeasure of the quality of binary and multiclass classifications. It takes\ninto account true and false positives and negatives and is generally\nregarded as a balanced measure which can be used even if the classes are of\nvery different sizes. The MCC is in essence a correlation coefficient value\nbetween -1 and +1. A coefficient of +1 represents a perfect prediction, 0\nan average random prediction and -1 an inverse prediction. The statistic\nis also known as the phi coefficient. [source: Wikipedia]\n"
lowerCamelCase : Union[str, Any] = "\nArgs:\n predictions (list of int): Predicted labels, as returned by a model.\n references (list of int): Ground truth labels.\n sample_weight (list of int, float, or bool): Sample weights. Defaults to `None`.\nReturns:\n matthews_correlation (dict containing float): Matthews correlation.\nExamples:\n Example 1, a basic example with only predictions and references as inputs:\n >>> matthews_metric = datasets.load_metric(\"matthews_correlation\")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3])\n >>> print(round(results['matthews_correlation'], 2))\n 0.54\n\n Example 2, the same example as above, but also including sample weights:\n >>> matthews_metric = datasets.load_metric(\"matthews_correlation\")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3],\n ... sample_weight=[0.5, 3, 1, 1, 1, 2])\n >>> print(round(results['matthews_correlation'], 2))\n 0.1\n\n Example 3, the same example as above, but with sample weights that cause a negative correlation:\n >>> matthews_metric = datasets.load_metric(\"matthews_correlation\")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3],\n ... sample_weight=[0.5, 1, 0, 0, 0, 1])\n >>> print(round(results['matthews_correlation'], 2))\n -0.25\n"
lowerCamelCase : Optional[int] = "\\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A( datasets.Metric ):
'''simple docstring'''
def a__ ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('int32' ),
'references': datasets.Value('int32' ),
} ) , reference_urls=[
'https://scikit-learn.org/stable/modules/generated/sklearn.metrics.matthews_corrcoef.html'
] , )
def a__ ( self : Any , A_ : Optional[Any] , A_ : List[Any] , A_ : List[Any]=None ) -> Tuple:
"""simple docstring"""
return {
"matthews_correlation": float(matthews_corrcoef(A_ , A_ , sample_weight=A_ ) ),
}
| 70
|
def _lowercase ( UpperCAmelCase_ = 10 , UpperCAmelCase_ = 1_000 , UpperCAmelCase_ = True):
"""simple docstring"""
assert (
isinstance(UpperCAmelCase_ , UpperCAmelCase_)
and isinstance(UpperCAmelCase_ , UpperCAmelCase_)
and isinstance(UpperCAmelCase_ , UpperCAmelCase_)
), "Invalid type of value(s) specified to function!"
if min_val > max_val:
raise ValueError("""Invalid value for min_val or max_val (min_value < max_value)""")
return min_val if option else max_val
def _lowercase ( UpperCAmelCase_ , UpperCAmelCase_):
"""simple docstring"""
return int((number_a + number_a) / 2)
def _lowercase ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_):
"""simple docstring"""
assert (
isinstance(UpperCAmelCase_ , UpperCAmelCase_) and isinstance(UpperCAmelCase_ , UpperCAmelCase_) and isinstance(UpperCAmelCase_ , UpperCAmelCase_)
), 'argument values must be type of "int"'
if lower > higher:
raise ValueError("""argument value for lower and higher must be(lower > higher)""")
if not lower < to_guess < higher:
raise ValueError(
"""guess value must be within the range of lower and higher value""")
def answer(UpperCAmelCase_) -> str:
if number > to_guess:
return "high"
elif number < to_guess:
return "low"
else:
return "same"
print("""started...""")
snake_case__ : Any = lower
snake_case__ : Optional[Any] = higher
snake_case__ : Dict = []
while True:
snake_case__ : Optional[Any] = get_avg(UpperCAmelCase_ , UpperCAmelCase_)
last_numbers.append(UpperCAmelCase_)
if answer(UpperCAmelCase_) == "low":
snake_case__ : Tuple = number
elif answer(UpperCAmelCase_) == "high":
snake_case__ : Any = number
else:
break
print(F'guess the number : {last_numbers[-1]}')
print(F'details : {last_numbers!s}')
def _lowercase ( ):
"""simple docstring"""
snake_case__ : Dict = int(input("""Enter lower value : """).strip())
snake_case__ : int = int(input("""Enter high value : """).strip())
snake_case__ : List[str] = int(input("""Enter value to guess : """).strip())
guess_the_number(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_)
if __name__ == "__main__":
main()
| 648
| 0
|
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import add_start_docstrings
__A : str = R'''
[`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and
can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information.
Args:
title_sep (`str`, *optional*, defaults to `" / "`):
Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`].
doc_sep (`str`, *optional*, defaults to `" // "`):
Separator inserted between the text of the retrieved document and the original input when calling
[`RagRetriever`].
n_docs (`int`, *optional*, defaults to 5):
Number of documents to retrieve.
max_combined_length (`int`, *optional*, defaults to 300):
Max length of contextualized input returned by [`~RagRetriever.__call__`].
retrieval_vector_size (`int`, *optional*, defaults to 768):
Dimensionality of the document embeddings indexed by [`RagRetriever`].
retrieval_batch_size (`int`, *optional*, defaults to 8):
Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated
[`RagRetriever`].
dataset (`str`, *optional*, defaults to `"wiki_dpr"`):
A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids
using `datasets.list_datasets()`).
dataset_split (`str`, *optional*, defaults to `"train"`)
Which split of the `dataset` to load.
index_name (`str`, *optional*, defaults to `"compressed"`)
The index name of the index associated with the `dataset`. One can choose between `"legacy"`, `"exact"` and
`"compressed"`.
index_path (`str`, *optional*)
The path to the serialized faiss index on disk.
passages_path (`str`, *optional*):
A path to text passages compatible with the faiss index. Required if using
[`~models.rag.retrieval_rag.LegacyIndex`]
use_dummy_dataset (`bool`, *optional*, defaults to `False`)
Whether to load a "dummy" variant of the dataset specified by `dataset`.
label_smoothing (`float`, *optional*, defaults to 0.0):
Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing
in the loss calculation. If set to 0, no label smoothing is performed.
do_marginalize (`bool`, *optional*, defaults to `False`):
If `True`, the logits are marginalized over all documents by making use of
`torch.nn.functional.log_softmax`.
reduce_loss (`bool`, *optional*, defaults to `False`):
Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation.
do_deduplication (`bool`, *optional*, defaults to `True`):
Whether or not to deduplicate the generations from different context documents for a given input. Has to be
set to `False` if used while training with distributed backend.
exclude_bos_score (`bool`, *optional*, defaults to `False`):
Whether or not to disregard the BOS token when computing the loss.
output_retrieved(`bool`, *optional*, defaults to `False`):
If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and
`context_attention_mask` are returned. See returned tensors for more detail.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models).
forced_eos_token_id (`int`, *optional*):
The id of the token to force as the last generated token when `max_length` is reached. Usually set to
`eos_token_id`.
'''
@add_start_docstrings(_A )
class _UpperCAmelCase ( _A ):
SCREAMING_SNAKE_CASE_ : Tuple = "rag"
SCREAMING_SNAKE_CASE_ : List[Any] = True
def __init__( self : str , A : int=None , A : Dict=True , A : int=None , A : int=None , A : Union[str, Any]=None , A : Optional[int]=None , A : Union[str, Any]=None , A : List[str]=" / " , A : Optional[Any]=" // " , A : List[Any]=5 , A : Any=3_00 , A : Any=7_68 , A : Any=8 , A : Dict="wiki_dpr" , A : Optional[int]="train" , A : List[str]="compressed" , A : Union[str, Any]=None , A : Dict=None , A : Optional[int]=False , A : int=False , A : Optional[int]=0.0 , A : Dict=True , A : Any=False , A : List[str]=False , A : Any=False , A : str=True , A : str=None , **A : Dict , ) -> int:
super().__init__(
bos_token_id=A , pad_token_id=A , eos_token_id=A , decoder_start_token_id=A , forced_eos_token_id=A , is_encoder_decoder=A , prefix=A , vocab_size=A , **A , )
assert (
"question_encoder" in kwargs and "generator" in kwargs
), "Config has to be initialized with question_encoder and generator config"
lowercase_ : str = kwargs.pop('''question_encoder''' )
lowercase_ : str = question_encoder_config.pop('''model_type''' )
lowercase_ : int = kwargs.pop('''generator''' )
lowercase_ : Any = decoder_config.pop('''model_type''' )
from ..auto.configuration_auto import AutoConfig
lowercase_ : Tuple = AutoConfig.for_model(A , **A )
lowercase_ : str = AutoConfig.for_model(A , **A )
lowercase_ : List[str] = reduce_loss
lowercase_ : Any = label_smoothing
lowercase_ : List[str] = exclude_bos_score
lowercase_ : Optional[int] = do_marginalize
lowercase_ : Tuple = title_sep
lowercase_ : Tuple = doc_sep
lowercase_ : Union[str, Any] = n_docs
lowercase_ : str = max_combined_length
lowercase_ : Any = dataset
lowercase_ : str = dataset_split
lowercase_ : List[Any] = index_name
lowercase_ : List[Any] = retrieval_vector_size
lowercase_ : Tuple = retrieval_batch_size
lowercase_ : Any = passages_path
lowercase_ : Optional[int] = index_path
lowercase_ : List[str] = use_dummy_dataset
lowercase_ : Tuple = output_retrieved
lowercase_ : Tuple = do_deduplication
lowercase_ : Optional[Any] = use_cache
if self.forced_eos_token_id is None:
lowercase_ : int = getattr(self.generator , '''forced_eos_token_id''' , A )
@classmethod
def A ( cls : Dict , A : PretrainedConfig , A : PretrainedConfig , **A : Any ) -> PretrainedConfig:
return cls(question_encoder=question_encoder_config.to_dict() , generator=generator_config.to_dict() , **A )
def A ( self : Optional[Any] ) -> Dict:
lowercase_ : Any = copy.deepcopy(self.__dict__ )
lowercase_ : Optional[Any] = self.question_encoder.to_dict()
lowercase_ : Any = self.generator.to_dict()
lowercase_ : Any = self.__class__.model_type
return output
| 141
|
"""simple docstring"""
import random
import unittest
from torch.utils.data import BatchSampler, DataLoader, IterableDataset
from accelerate import Accelerator
from accelerate.data_loader import (
BatchSamplerShard,
DataLoaderDispatcher,
DataLoaderShard,
IterableDatasetShard,
SkipBatchSampler,
SkipDataLoader,
skip_first_batches,
)
class _UpperCAmelCase ( _A ):
def __init__( self : Optional[int] , A : Optional[Any]=0.01 , A : int=10_00 ) -> Optional[int]:
lowercase_ : Dict = p_stop
lowercase_ : Optional[Any] = max_length
def __iter__( self : Dict ) -> Dict:
lowercase_ : str = 0
lowercase_ : Optional[int] = False
while not stop and count < self.max_length:
yield count
count += 1
lowercase_ : List[str] = random.random() < self.p_stop
class _UpperCAmelCase ( unittest.TestCase ):
def A ( self : List[Any] , A : Any , A : Union[str, Any] , A : Optional[Any]=False , A : Dict=True ) -> str:
lowercase_ : Tuple = [
BatchSamplerShard(A , 2 , A , split_batches=A , even_batches=A )
for i in range(2 )
]
lowercase_ : Optional[Any] = [list(A ) for batch_sampler_shard in batch_sampler_shards]
if not split_batches:
self.assertListEqual([len(A ) for shard in batch_sampler_shards] , [len(A ) for e in expected] )
self.assertListEqual(A , A )
def A ( self : Dict ) -> Tuple:
# Check the shards when the dataset is a round multiple of total batch size.
lowercase_ : Union[str, Any] = BatchSampler(range(24 ) , batch_size=3 , drop_last=A )
lowercase_ : List[str] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(A , A )
lowercase_ : Tuple = BatchSampler(range(24 ) , batch_size=3 , drop_last=A )
# Expected shouldn't change
self.check_batch_sampler_shards(A , A )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
lowercase_ : Tuple = BatchSampler(range(21 ) , batch_size=3 , drop_last=A )
lowercase_ : int = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [0, 1, 2]],
]
self.check_batch_sampler_shards(A , A )
lowercase_ : str = BatchSampler(range(21 ) , batch_size=3 , drop_last=A )
lowercase_ : Dict = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(A , A )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
lowercase_ : Tuple = BatchSampler(range(22 ) , batch_size=3 , drop_last=A )
lowercase_ : List[str] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 0, 1]],
]
self.check_batch_sampler_shards(A , A )
lowercase_ : Union[str, Any] = BatchSampler(range(22 ) , batch_size=3 , drop_last=A )
lowercase_ : str = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(A , A )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
lowercase_ : Tuple = BatchSampler(range(20 ) , batch_size=3 , drop_last=A )
lowercase_ : Optional[int] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 0]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [1, 2, 3]],
]
self.check_batch_sampler_shards(A , A )
lowercase_ : Any = BatchSampler(range(20 ) , batch_size=3 , drop_last=A )
lowercase_ : Union[str, Any] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(A , A )
# Check the shards when the dataset is very small.
lowercase_ : str = BatchSampler(range(2 ) , batch_size=3 , drop_last=A )
lowercase_ : Optional[Any] = [[[0, 1, 0]], [[1, 0, 1]]]
self.check_batch_sampler_shards(A , A )
lowercase_ : Any = BatchSampler(range(2 ) , batch_size=3 , drop_last=A )
lowercase_ : List[str] = [[], []]
self.check_batch_sampler_shards(A , A )
def A ( self : str ) -> str:
# Check the shards when the dataset is a round multiple of batch size.
lowercase_ : List[Any] = BatchSampler(range(24 ) , batch_size=4 , drop_last=A )
lowercase_ : List[str] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(A , A , split_batches=A )
lowercase_ : Dict = BatchSampler(range(24 ) , batch_size=4 , drop_last=A )
# Expected shouldn't change
self.check_batch_sampler_shards(A , A , split_batches=A )
# Check the shards when the dataset is not a round multiple of batch size.
lowercase_ : List[Any] = BatchSampler(range(22 ) , batch_size=4 , drop_last=A )
lowercase_ : int = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [0, 1]],
]
self.check_batch_sampler_shards(A , A , split_batches=A )
lowercase_ : Any = BatchSampler(range(22 ) , batch_size=4 , drop_last=A )
lowercase_ : Any = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(A , A , split_batches=A )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
lowercase_ : Tuple = BatchSampler(range(21 ) , batch_size=4 , drop_last=A )
lowercase_ : str = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 0]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [1, 2]],
]
self.check_batch_sampler_shards(A , A , split_batches=A )
lowercase_ : Dict = BatchSampler(range(21 ) , batch_size=4 , drop_last=A )
lowercase_ : int = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(A , A , split_batches=A )
# Check the shards when the dataset is very small.
lowercase_ : Optional[Any] = BatchSampler(range(2 ) , batch_size=4 , drop_last=A )
lowercase_ : Dict = [[[0, 1]], [[0, 1]]]
self.check_batch_sampler_shards(A , A , split_batches=A )
lowercase_ : Optional[Any] = BatchSampler(range(2 ) , batch_size=4 , drop_last=A )
lowercase_ : str = [[], []]
self.check_batch_sampler_shards(A , A , split_batches=A )
def A ( self : str ) -> int:
# Check the shards when the dataset is a round multiple of total batch size.
lowercase_ : str = BatchSampler(range(24 ) , batch_size=3 , drop_last=A )
lowercase_ : Optional[Any] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(A , A , even_batches=A )
lowercase_ : Dict = BatchSampler(range(24 ) , batch_size=3 , drop_last=A )
# Expected shouldn't change
self.check_batch_sampler_shards(A , A , even_batches=A )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
lowercase_ : List[Any] = BatchSampler(range(21 ) , batch_size=3 , drop_last=A )
lowercase_ : List[Any] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(A , A , even_batches=A )
lowercase_ : List[Any] = BatchSampler(range(21 ) , batch_size=3 , drop_last=A )
lowercase_ : str = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(A , A , even_batches=A )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
lowercase_ : List[Any] = BatchSampler(range(22 ) , batch_size=3 , drop_last=A )
lowercase_ : Tuple = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21]],
]
self.check_batch_sampler_shards(A , A , even_batches=A )
lowercase_ : Union[str, Any] = BatchSampler(range(22 ) , batch_size=3 , drop_last=A )
lowercase_ : str = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(A , A , even_batches=A )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
lowercase_ : List[str] = BatchSampler(range(20 ) , batch_size=3 , drop_last=A )
lowercase_ : List[Any] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(A , A , even_batches=A )
lowercase_ : str = BatchSampler(range(20 ) , batch_size=3 , drop_last=A )
lowercase_ : Any = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(A , A , even_batches=A )
# Check the shards when the dataset is very small.
lowercase_ : List[str] = BatchSampler(range(2 ) , batch_size=3 , drop_last=A )
lowercase_ : Tuple = [[[0, 1]], []]
self.check_batch_sampler_shards(A , A , even_batches=A )
lowercase_ : List[Any] = BatchSampler(range(2 ) , batch_size=3 , drop_last=A )
lowercase_ : Optional[Any] = [[], []]
self.check_batch_sampler_shards(A , A , even_batches=A )
def A ( self : Optional[Any] ) -> Union[str, Any]:
# Check the shards when the dataset is a round multiple of batch size.
lowercase_ : Any = BatchSampler(range(24 ) , batch_size=4 , drop_last=A )
lowercase_ : int = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(A , A , split_batches=A , even_batches=A )
lowercase_ : int = BatchSampler(range(24 ) , batch_size=4 , drop_last=A )
# Expected shouldn't change
self.check_batch_sampler_shards(A , A , split_batches=A , even_batches=A )
# Check the shards when the dataset is not a round multiple of batch size.
lowercase_ : List[Any] = BatchSampler(range(22 ) , batch_size=4 , drop_last=A )
lowercase_ : Any = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(A , A , split_batches=A , even_batches=A )
lowercase_ : List[str] = BatchSampler(range(22 ) , batch_size=4 , drop_last=A )
lowercase_ : str = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(A , A , split_batches=A , even_batches=A )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
lowercase_ : Dict = BatchSampler(range(21 ) , batch_size=4 , drop_last=A )
lowercase_ : Dict = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(A , A , split_batches=A , even_batches=A )
lowercase_ : Dict = BatchSampler(range(21 ) , batch_size=4 , drop_last=A )
lowercase_ : Any = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(A , A , split_batches=A , even_batches=A )
# Check the shards when the dataset is very small.
lowercase_ : Optional[int] = BatchSampler(range(2 ) , batch_size=4 , drop_last=A )
lowercase_ : Union[str, Any] = [[[0, 1]], []]
self.check_batch_sampler_shards(A , A , split_batches=A , even_batches=A )
lowercase_ : List[str] = BatchSampler(range(2 ) , batch_size=4 , drop_last=A )
lowercase_ : Dict = [[], []]
self.check_batch_sampler_shards(A , A , split_batches=A , even_batches=A )
def A ( self : str ) -> str:
lowercase_ : str = [[0, 1, 2], [3, 4], [5, 6, 7, 8], [9, 10, 11], [12, 13]]
lowercase_ : Tuple = [BatchSamplerShard(A , 2 , A , even_batches=A ) for i in range(2 )]
self.assertEqual(len(batch_sampler_shards[0] ) , 3 )
self.assertEqual(len(batch_sampler_shards[1] ) , 2 )
self.assertListEqual(list(batch_sampler_shards[0] ) , [[0, 1, 2], [5, 6, 7, 8], [12, 13]] )
self.assertListEqual(list(batch_sampler_shards[1] ) , [[3, 4], [9, 10, 11]] )
def A ( self : Union[str, Any] , A : Union[str, Any] , A : Tuple , A : Dict , A : str=False , A : Any=2 , A : Optional[int]=False ) -> Optional[Any]:
random.seed(A )
lowercase_ : Any = list(A )
lowercase_ : Optional[int] = [
IterableDatasetShard(
A , batch_size=A , drop_last=A , num_processes=A , process_index=A , split_batches=A , )
for i in range(A )
]
lowercase_ : Any = []
for iterable_dataset_shard in iterable_dataset_shards:
# Since our random iterable dataset will be... random... we need to use a seed to get reproducible results.
random.seed(A )
iterable_dataset_lists.append(list(A ) )
lowercase_ : List[Any] = batch_size // num_processes if split_batches else batch_size
# All iterable dataset shard should have the same length, a round multiple of shard_batch_size
lowercase_ : Dict = iterable_dataset_lists[0]
for l in iterable_dataset_lists[1:]:
self.assertEqual(len(A ) , len(A ) )
self.assertTrue(len(A ) % shard_batch_size == 0 )
lowercase_ : Optional[int] = []
for idx in range(0 , len(A ) , A ):
for l in iterable_dataset_lists:
observed += l[idx : idx + shard_batch_size]
if not drop_last:
while len(A ) < len(A ):
reference += reference
self.assertListEqual(A , reference[: len(A )] )
def A ( self : Optional[Any] ) -> List[str]:
lowercase_ : int = 42
lowercase_ : Tuple = RandomIterableDataset()
self.check_iterable_dataset_shards(A , A , batch_size=4 , drop_last=A , split_batches=A )
self.check_iterable_dataset_shards(A , A , batch_size=4 , drop_last=A , split_batches=A )
self.check_iterable_dataset_shards(A , A , batch_size=4 , drop_last=A , split_batches=A )
self.check_iterable_dataset_shards(A , A , batch_size=4 , drop_last=A , split_batches=A )
# Edge case with a very small dataset
lowercase_ : List[str] = RandomIterableDataset(max_length=2 )
self.check_iterable_dataset_shards(A , A , batch_size=4 , drop_last=A , split_batches=A )
self.check_iterable_dataset_shards(A , A , batch_size=4 , drop_last=A , split_batches=A )
self.check_iterable_dataset_shards(A , A , batch_size=4 , drop_last=A , split_batches=A )
self.check_iterable_dataset_shards(A , A , batch_size=4 , drop_last=A , split_batches=A )
def A ( self : Optional[Any] ) -> Tuple:
lowercase_ : List[str] = BatchSampler(range(16 ) , batch_size=4 , drop_last=A )
lowercase_ : int = SkipBatchSampler(A , 2 )
self.assertListEqual(list(A ) , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def A ( self : List[str] ) -> Union[str, Any]:
lowercase_ : int = SkipDataLoader(list(range(16 ) ) , batch_size=4 , skip_batches=2 )
self.assertListEqual([t.tolist() for t in dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def A ( self : Dict ) -> int:
lowercase_ : Optional[Any] = DataLoader(list(range(16 ) ) , batch_size=4 )
lowercase_ : Union[str, Any] = skip_first_batches(A , num_batches=2 )
self.assertListEqual([t.tolist() for t in new_dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def A ( self : List[str] ) -> str:
lowercase_ : Any = DataLoaderShard(list(range(16 ) ) , batch_size=4 )
for idx, _ in enumerate(A ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(A ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
def A ( self : Optional[Any] ) -> Optional[int]:
Accelerator()
lowercase_ : Tuple = DataLoaderDispatcher(range(16 ) , batch_size=4 )
for idx, _ in enumerate(A ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(A ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
| 141
| 1
|
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class _UpperCAmelCase ( unittest.TestCase ):
def __init__( self : Any , a : List[Any] , a : Optional[Any]=7 , a : Optional[Any]=3 , a : Optional[int]=1_8 , a : List[Any]=3_0 , a : Union[str, Any]=4_0_0 , a : Optional[Any]=True , a : int=None , a : Optional[Any]=True , a : Tuple=None , a : Any=True , ):
'''simple docstring'''
lowercase_ : List[str] = size if size is not None else {"shortest_edge": 2_0}
lowercase_ : Union[str, Any] = crop_size if crop_size is not None else {"height": 1_8, "width": 1_8}
lowercase_ : Optional[Any] = parent
lowercase_ : int = batch_size
lowercase_ : Any = num_channels
lowercase_ : List[Any] = image_size
lowercase_ : List[str] = min_resolution
lowercase_ : List[str] = max_resolution
lowercase_ : Tuple = do_resize
lowercase_ : Optional[Any] = size
lowercase_ : int = do_center_crop
lowercase_ : List[Any] = crop_size
lowercase_ : Optional[Any] = do_flip_channel_order
def lowerCAmelCase__ ( self : int ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_flip_channel_order": self.do_flip_channel_order,
}
@require_torch
@require_vision
class _UpperCAmelCase ( UpperCamelCase__ , unittest.TestCase ):
__lowerCamelCase: str = MobileViTImageProcessor if is_vision_available() else None
def lowerCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase_ : Union[str, Any] = MobileViTImageProcessingTester(self )
@property
def lowerCAmelCase__ ( self : Dict ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase__ ( self : Tuple ):
'''simple docstring'''
lowercase_ : Dict = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(a , "do_resize" ) )
self.assertTrue(hasattr(a , "size" ) )
self.assertTrue(hasattr(a , "do_center_crop" ) )
self.assertTrue(hasattr(a , "center_crop" ) )
self.assertTrue(hasattr(a , "do_flip_channel_order" ) )
def lowerCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase_ : Tuple = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 2_0} )
self.assertEqual(image_processor.crop_size , {"height": 1_8, "width": 1_8} )
lowercase_ : str = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 , crop_size=8_4 )
self.assertEqual(image_processor.size , {"shortest_edge": 4_2} )
self.assertEqual(image_processor.crop_size , {"height": 8_4, "width": 8_4} )
def lowerCAmelCase__ ( self : Any ):
'''simple docstring'''
pass
def lowerCAmelCase__ ( self : Any ):
'''simple docstring'''
lowercase_ : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase_ : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=a )
for image in image_inputs:
self.assertIsInstance(a , Image.Image )
# Test not batched input
lowercase_ : Dict = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
lowercase_ : Any = image_processing(a , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def lowerCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase_ : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowercase_ : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=a , numpify=a )
for image in image_inputs:
self.assertIsInstance(a , np.ndarray )
# Test not batched input
lowercase_ : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
lowercase_ : Any = image_processing(a , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def lowerCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase_ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowercase_ : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=a , torchify=a )
for image in image_inputs:
self.assertIsInstance(a , torch.Tensor )
# Test not batched input
lowercase_ : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
lowercase_ : Optional[int] = image_processing(a , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
| 620
|
import json
import os
from functools import lru_cache
from typing import TYPE_CHECKING, List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowercase__ : Union[str, Any] = logging.get_logger(__name__)
lowercase__ : Tuple = {
"vocab_file": "vocab.json",
"merges_file": "merges.txt",
"tokenizer_config_file": "tokenizer_config.json",
}
lowercase__ : Optional[int] = {
"vocab_file": {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json"},
"merges_file": {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt"},
"tokenizer_config_file": {
"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json"
},
}
lowercase__ : Dict = {"facebook/blenderbot-3B": 128}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def SCREAMING_SNAKE_CASE ( ) -> List[str]:
a = (
list(range(ord("!") , ord("~") + 1)) + list(range(ord("¡") , ord("¬") + 1)) + list(range(ord("®") , ord("ÿ") + 1))
)
a = bs[:]
a = 0
for b in range(2**8):
if b not in bs:
bs.append(__UpperCamelCase)
cs.append(2**8 + n)
n += 1
a = [chr(__UpperCamelCase) for n in cs]
return dict(zip(__UpperCamelCase , __UpperCamelCase))
def SCREAMING_SNAKE_CASE ( __UpperCamelCase) -> Union[str, Any]:
a = set()
a = word[0]
for char in word[1:]:
pairs.add((prev_char, char))
a = char
return pairs
class a__ ( UpperCamelCase__ ):
a : List[str] = VOCAB_FILES_NAMES
a : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
a : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a : Union[str, Any] = ["""input_ids""", """attention_mask"""]
def __init__( self , A , A , A="replace" , A="<s>" , A="</s>" , A="</s>" , A="<s>" , A="<unk>" , A="<pad>" , A="<mask>" , A=False , **A , ) -> int:
'''simple docstring'''
a = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else bos_token
a = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else eos_token
a = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else sep_token
a = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else cls_token
a = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else unk_token
a = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
a = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else mask_token
super().__init__(
errors=A , bos_token=A , eos_token=A , unk_token=A , sep_token=A , cls_token=A , pad_token=A , mask_token=A , add_prefix_space=A , **A , )
with open(A , encoding="utf-8" ) as vocab_handle:
a = json.load(A )
a = {v: k for k, v in self.encoder.items()}
a = errors # how to handle errors in decoding
a = bytes_to_unicode()
a = {v: k for k, v in self.byte_encoder.items()}
with open(A , encoding="utf-8" ) as merges_handle:
a = merges_handle.read().split("\n" )[1:-1]
a = [tuple(merge.split() ) for merge in bpe_merges]
a = dict(zip(A , range(len(A ) ) ) )
a = {}
a = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
a = re.compile(r"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" )
@property
# Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.vocab_size with Roberta->Blenderbot, RoBERTa->Blenderbot
def lowerCAmelCase_ ( self ) -> str:
'''simple docstring'''
return len(self.encoder )
def lowerCAmelCase_ ( self ) -> Tuple:
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def lowerCAmelCase_ ( self , A ) -> Union[str, Any]:
'''simple docstring'''
if token in self.cache:
return self.cache[token]
a = tuple(A )
a = get_pairs(A )
if not pairs:
return token
while True:
a = min(A , key=lambda A : self.bpe_ranks.get(A , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
a , a = bigram
a = []
a = 0
while i < len(A ):
try:
a = word.index(A , A )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
a = j
if word[i] == first and i < len(A ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
a = tuple(A )
a = new_word
if len(A ) == 1:
break
else:
a = get_pairs(A )
a = " ".join(A )
a = word
return word
def lowerCAmelCase_ ( self , A ) -> Optional[Any]:
'''simple docstring'''
a = []
for token in re.findall(self.pat , A ):
a = "".join(
self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(A ).split(" " ) )
return bpe_tokens
def lowerCAmelCase_ ( self , A ) -> str:
'''simple docstring'''
return self.encoder.get(A , self.encoder.get(self.unk_token ) )
def lowerCAmelCase_ ( self , A ) -> List[str]:
'''simple docstring'''
return self.decoder.get(A )
def lowerCAmelCase_ ( self , A ) -> Dict:
'''simple docstring'''
a = "".join(A )
a = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" , errors=self.errors )
return text
def lowerCAmelCase_ ( self , A , A = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(A ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
a = os.path.join(
A , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
a = os.path.join(
A , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(A , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=A , ensure_ascii=A ) + "\n" )
a = 0
with open(A , "w" , encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda A : kv[1] ):
if index != token_index:
logger.warning(
F'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
" Please check that the tokenizer is not corrupted!" )
a = token_index
writer.write(" ".join(A ) + "\n" )
index += 1
return vocab_file, merge_file
def lowerCAmelCase_ ( self , A , A = None , A = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A , token_ids_a=A , already_has_special_tokens=A )
if token_ids_a is None:
return [1] + ([0] * len(A )) + [1]
return [1] + ([0] * len(A )) + [1, 1] + ([0] * len(A )) + [1]
def lowerCAmelCase_ ( self , A , A = None ) -> List[int]:
'''simple docstring'''
a = [self.sep_token_id]
a = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCAmelCase_ ( self , A , A=False , **A ) -> List[str]:
'''simple docstring'''
a = kwargs.pop("add_prefix_space" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(A ) > 0 and not text[0].isspace()):
a = " " + text
return (text, kwargs)
def lowerCAmelCase_ ( self , A , A = None ) -> Any:
'''simple docstring'''
return token_ids_a + [self.eos_token_id]
def lowerCAmelCase_ ( self , A ) -> List[int]:
'''simple docstring'''
a = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(" " + text )
else:
# Generated responses should contain them already.
inputs.append(A )
a = " ".join(A )
a = self.encode(A )
if len(A ) > self.model_max_length:
a = input_ids[-self.model_max_length :]
logger.warning(F'''Trimmed input from conversation as it was longer than {self.model_max_length} tokens.''' )
return input_ids
| 515
| 0
|
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class snake_case_ ( a_ ,a_ ,a_ ,unittest.TestCase ):
__lowerCAmelCase = StableDiffusionInpaintPipeline
__lowerCAmelCase = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
__lowerCAmelCase = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
__lowerCAmelCase = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
__lowerCAmelCase = frozenset([] )
def snake_case_ ( self ):
torch.manual_seed(0 )
a_ : int = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=9 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=3_2 , attention_head_dim=(2, 4) , use_linear_projection=a_ , )
a_ : Optional[Any] = PNDMScheduler(skip_prk_steps=a_ )
torch.manual_seed(0 )
a_ : Optional[Any] = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , sample_size=1_2_8 , )
torch.manual_seed(0 )
a_ : List[str] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act="gelu" , projection_dim=5_1_2 , )
a_ : Tuple = CLIPTextModel(a_ )
a_ : List[str] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
a_ : Tuple = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def snake_case_ ( self , a_ , a_=0 ):
# TODO: use tensor inputs instead of PIL, this is here just to leave the old expected_slices untouched
a_ : str = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(a_ ) ).to(a_ )
a_ : Optional[int] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
a_ : Optional[Any] = Image.fromarray(np.uinta(a_ ) ).convert("RGB" ).resize((6_4, 6_4) )
a_ : List[Any] = Image.fromarray(np.uinta(image + 4 ) ).convert("RGB" ).resize((6_4, 6_4) )
if str(a_ ).startswith("mps" ):
a_ : Optional[Any] = torch.manual_seed(a_ )
else:
a_ : Tuple = torch.Generator(device=a_ ).manual_seed(a_ )
a_ : str = {
"prompt": "A painting of a squirrel eating a burger",
"image": init_image,
"mask_image": mask_image,
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def snake_case_ ( self ):
a_ : List[str] = "cpu" # ensure determinism for the device-dependent torch.Generator
a_ : Tuple = self.get_dummy_components()
a_ : Dict = StableDiffusionInpaintPipeline(**a_ )
a_ : int = sd_pipe.to(a_ )
sd_pipe.set_progress_bar_config(disable=a_ )
a_ : str = self.get_dummy_inputs(a_ )
a_ : Optional[int] = sd_pipe(**a_ ).images
a_ : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
a_ : str = np.array([0.4_727, 0.5_735, 0.3_941, 0.5_446, 0.5_926, 0.4_394, 0.5_062, 0.4_654, 0.4_476] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def snake_case_ ( self ):
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class snake_case_ ( unittest.TestCase ):
def snake_case_ ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case_ ( self ):
a_ : Tuple = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-inpaint/init_image.png" )
a_ : List[Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png" )
a_ : Optional[Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint"
"/yellow_cat_sitting_on_a_park_bench.npy" )
a_ : Dict = "stabilityai/stable-diffusion-2-inpainting"
a_ : str = StableDiffusionInpaintPipeline.from_pretrained(a_ , safety_checker=a_ )
pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
pipe.enable_attention_slicing()
a_ : Union[str, Any] = "Face of a yellow cat, high resolution, sitting on a park bench"
a_ : List[Any] = torch.manual_seed(0 )
a_ : Any = pipe(
prompt=a_ , image=a_ , mask_image=a_ , generator=a_ , output_type="np" , )
a_ : List[Any] = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert np.abs(expected_image - image ).max() < 9e-3
def snake_case_ ( self ):
a_ : int = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-inpaint/init_image.png" )
a_ : Any = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png" )
a_ : Any = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint"
"/yellow_cat_sitting_on_a_park_bench_fp16.npy" )
a_ : Optional[int] = "stabilityai/stable-diffusion-2-inpainting"
a_ : Any = StableDiffusionInpaintPipeline.from_pretrained(
a_ , torch_dtype=torch.floataa , safety_checker=a_ , )
pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
pipe.enable_attention_slicing()
a_ : Any = "Face of a yellow cat, high resolution, sitting on a park bench"
a_ : int = torch.manual_seed(0 )
a_ : str = pipe(
prompt=a_ , image=a_ , mask_image=a_ , generator=a_ , output_type="np" , )
a_ : int = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert np.abs(expected_image - image ).max() < 5e-1
def snake_case_ ( self ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
a_ : Optional[Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-inpaint/init_image.png" )
a_ : List[str] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png" )
a_ : str = "stabilityai/stable-diffusion-2-inpainting"
a_ : List[Any] = PNDMScheduler.from_pretrained(a_ , subfolder="scheduler" )
a_ : List[str] = StableDiffusionInpaintPipeline.from_pretrained(
a_ , safety_checker=a_ , scheduler=a_ , torch_dtype=torch.floataa , )
pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
a_ : Tuple = "Face of a yellow cat, high resolution, sitting on a park bench"
a_ : List[Any] = torch.manual_seed(0 )
a_ : Optional[int] = pipe(
prompt=a_ , image=a_ , mask_image=a_ , generator=a_ , num_inference_steps=2 , output_type="np" , )
a_ : Any = torch.cuda.max_memory_allocated()
# make sure that less than 2.65 GB is allocated
assert mem_bytes < 2.65 * 1_0**9
| 370
|
"""simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pathlib import Path
import torch
from ...utils import is_npu_available, is_xpu_available
from .config_args import ClusterConfig, default_json_config_file
from .config_utils import SubcommandHelpFormatter
SCREAMING_SNAKE_CASE_ = """Create a default config file for Accelerate with only a few flags set."""
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE__="no", SCREAMING_SNAKE_CASE__ = default_json_config_file, SCREAMING_SNAKE_CASE__ = False ) -> Tuple:
a_ : Union[str, Any] = Path(SCREAMING_SNAKE_CASE__ )
path.parent.mkdir(parents=SCREAMING_SNAKE_CASE__, exist_ok=SCREAMING_SNAKE_CASE__ )
if path.exists():
print(
F"""Configuration already exists at {save_location}, will not override. Run `accelerate config` manually or pass a different `save_location`.""" )
return False
a_ : Optional[int] = mixed_precision.lower()
if mixed_precision not in ["no", "fp16", "bf16", "fp8"]:
raise ValueError(
F"""`mixed_precision` should be one of 'no', 'fp16', 'bf16', or 'fp8'. Received {mixed_precision}""" )
a_ : str = {
"compute_environment": "LOCAL_MACHINE",
"mixed_precision": mixed_precision,
}
if torch.cuda.is_available():
a_ : int = torch.cuda.device_count()
a_ : Optional[Any] = num_gpus
a_ : int = False
if num_gpus > 1:
a_ : Any = "MULTI_GPU"
else:
a_ : int = "NO"
elif is_xpu_available() and use_xpu:
a_ : int = torch.xpu.device_count()
a_ : str = num_xpus
a_ : Tuple = False
if num_xpus > 1:
a_ : int = "MULTI_XPU"
else:
a_ : List[str] = "NO"
elif is_npu_available():
a_ : List[Any] = torch.npu.device_count()
a_ : int = num_npus
a_ : List[Any] = False
if num_npus > 1:
a_ : str = "MULTI_NPU"
else:
a_ : Union[str, Any] = "NO"
else:
a_ : Optional[Any] = 0
a_ : Optional[Any] = True
a_ : Tuple = 1
a_ : Optional[int] = "NO"
a_ : str = ClusterConfig(**SCREAMING_SNAKE_CASE__ )
config.to_json_file(SCREAMING_SNAKE_CASE__ )
return path
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) -> Optional[Any]:
a_ : Dict = parser.add_parser("default", parents=SCREAMING_SNAKE_CASE__, help=SCREAMING_SNAKE_CASE__, formatter_class=SCREAMING_SNAKE_CASE__ )
parser.add_argument(
"--config_file", default=SCREAMING_SNAKE_CASE__, help=(
"The path to use to store the config file. Will default to a file named default_config.yaml in the cache "
"location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have "
"such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed "
"with 'huggingface'."
), dest="save_location", )
parser.add_argument(
"--mixed_precision", choices=["no", "fp16", "bf16"], type=SCREAMING_SNAKE_CASE__, help="Whether or not to use mixed precision training. "
"Choose between FP16 and BF16 (bfloat16) training. "
"BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later.", default="no", )
parser.set_defaults(func=SCREAMING_SNAKE_CASE__ )
return parser
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE__ ) -> Tuple:
a_ : Any = write_basic_config(args.mixed_precision, args.save_location )
if config_file:
print(F"""accelerate configuration saved at {config_file}""" )
| 370
| 1
|
import os
import time
import numpy as np
import onnxruntime as ort
__UpperCamelCase : int = """1"""
__UpperCamelCase : Dict = """0"""
__UpperCamelCase : str = """1"""
__UpperCamelCase : int = ort.SessionOptions()
__UpperCamelCase : Dict = ort.GraphOptimizationLevel.ORT_DISABLE_ALL
print("""Create inference session...""")
__UpperCamelCase : Tuple = ["""TensorrtExecutionProvider""", """CUDAExecutionProvider"""]
__UpperCamelCase : int = ort.InferenceSession("""model.onnx""", sess_options=sess_opt, providers=execution_provider)
__UpperCamelCase : Tuple = ort.RunOptions()
__UpperCamelCase : int = 128
__UpperCamelCase : List[str] = 1
__UpperCamelCase : List[Any] = np.ones((batch, sequence), dtype=np.intaa)
__UpperCamelCase : str = np.ones((batch, sequence), dtype=np.intaa)
__UpperCamelCase : Optional[Any] = np.ones((batch, sequence), dtype=np.intaa)
print("""Warm up phase...""")
sess.run(
None,
{
sess.get_inputs()[0].name: input_ids,
sess.get_inputs()[1].name: attention_mask,
sess.get_inputs()[2].name: token_type_ids,
},
run_options=run_opt,
)
print("""Start inference...""")
__UpperCamelCase : int = time.time()
__UpperCamelCase : Dict = 2000
__UpperCamelCase : Optional[Any] = {}
for iter in range(max_iters):
__UpperCamelCase : Union[str, Any] = sess.run(
None,
{
sess.get_inputs()[0].name: input_ids,
sess.get_inputs()[1].name: attention_mask,
sess.get_inputs()[2].name: token_type_ids,
},
run_options=run_opt,
)
print("""Average Inference Time = {:.3f} ms""".format((time.time() - start_time) * 1000 / max_iters))
| 80
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase__ : str = logging.get_logger(__name__)
lowerCamelCase__ : Optional[int] = {
'''facebook/xmod-base''': '''https://huggingface.co/facebook/xmod-base/resolve/main/config.json''',
'''facebook/xmod-large-prenorm''': '''https://huggingface.co/facebook/xmod-large-prenorm/resolve/main/config.json''',
'''facebook/xmod-base-13-125k''': '''https://huggingface.co/facebook/xmod-base-13-125k/resolve/main/config.json''',
'''facebook/xmod-base-30-125k''': '''https://huggingface.co/facebook/xmod-base-30-125k/resolve/main/config.json''',
'''facebook/xmod-base-30-195k''': '''https://huggingface.co/facebook/xmod-base-30-195k/resolve/main/config.json''',
'''facebook/xmod-base-60-125k''': '''https://huggingface.co/facebook/xmod-base-60-125k/resolve/main/config.json''',
'''facebook/xmod-base-60-265k''': '''https://huggingface.co/facebook/xmod-base-60-265k/resolve/main/config.json''',
'''facebook/xmod-base-75-125k''': '''https://huggingface.co/facebook/xmod-base-75-125k/resolve/main/config.json''',
'''facebook/xmod-base-75-269k''': '''https://huggingface.co/facebook/xmod-base-75-269k/resolve/main/config.json''',
}
class _UpperCAmelCase ( __a):
__a : List[Any] = """xmod"""
def __init__( self , _A=3_05_22 , _A=7_68 , _A=12 , _A=12 , _A=30_72 , _A="gelu" , _A=0.1 , _A=0.1 , _A=5_12 , _A=2 , _A=0.02 , _A=1e-12 , _A=1 , _A=0 , _A=2 , _A="absolute" , _A=True , _A=None , _A=False , _A=2 , _A=False , _A=True , _A=True , _A=("en_XX",) , _A=None , **_A , ) -> Optional[int]:
'''simple docstring'''
super().__init__(pad_token_id=_A , bos_token_id=_A , eos_token_id=_A , **_A )
_UpperCAmelCase : Tuple = vocab_size
_UpperCAmelCase : Union[str, Any] = hidden_size
_UpperCAmelCase : Any = num_hidden_layers
_UpperCAmelCase : Optional[int] = num_attention_heads
_UpperCAmelCase : int = hidden_act
_UpperCAmelCase : Dict = intermediate_size
_UpperCAmelCase : Dict = hidden_dropout_prob
_UpperCAmelCase : List[Any] = attention_probs_dropout_prob
_UpperCAmelCase : Dict = max_position_embeddings
_UpperCAmelCase : List[Any] = type_vocab_size
_UpperCAmelCase : str = initializer_range
_UpperCAmelCase : Optional[Any] = layer_norm_eps
_UpperCAmelCase : Any = position_embedding_type
_UpperCAmelCase : Tuple = use_cache
_UpperCAmelCase : Optional[int] = classifier_dropout
_UpperCAmelCase : Dict = pre_norm
_UpperCAmelCase : str = adapter_reduction_factor
_UpperCAmelCase : List[str] = adapter_layer_norm
_UpperCAmelCase : Union[str, Any] = adapter_reuse_layer_norm
_UpperCAmelCase : List[Any] = ln_before_adapter
_UpperCAmelCase : Any = list(_A )
_UpperCAmelCase : Dict = default_language
class _UpperCAmelCase ( __a):
@property
def __snake_case ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
_UpperCAmelCase : Optional[int] = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
_UpperCAmelCase : Union[str, Any] = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 238
| 0
|
"""simple docstring"""
def __A ( a_ :str) -> list:
if n_term == "":
return []
__a : List[Any] = []
for temp in range(int(_A)):
series.append(F"""1/{temp + 1}""" if series else '''1''')
return series
if __name__ == "__main__":
A = input('''Enter the last number (nth term) of the Harmonic Series''')
print('''Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n''')
print(harmonic_series(nth_term))
| 709
|
"""simple docstring"""
import torch
from torch import nn
from transformers import CLIPPreTrainedModel, CLIPVisionModel
from ...models.attention import BasicTransformerBlock
from ...utils import logging
A = logging.get_logger(__name__) # pylint: disable=invalid-name
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=768 ):
super().__init__(_UpperCAmelCase )
__a : str = proj_size
__a : Optional[Any] = CLIPVisionModel(_UpperCAmelCase )
__a : List[Any] = PaintByExampleMapper(_UpperCAmelCase )
__a : int = nn.LayerNorm(config.hidden_size )
__a : List[Any] = nn.Linear(config.hidden_size , self.proj_size )
# uncondition for scaling
__a : int = nn.Parameter(torch.randn((1, 1, self.proj_size) ) )
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase=False ):
__a : str = self.model(pixel_values=_UpperCAmelCase )
__a : Union[str, Any] = clip_output.pooler_output
__a : Optional[int] = self.mapper(latent_states[:, None] )
__a : int = self.final_layer_norm(_UpperCAmelCase )
__a : Optional[Any] = self.proj_out(_UpperCAmelCase )
if return_uncond_vector:
return latent_states, self.uncond_vector
return latent_states
class __lowercase ( nn.Module ):
'''simple docstring'''
def __init__( self , _UpperCAmelCase ):
super().__init__()
__a : List[str] = (config.num_hidden_layers + 1) // 5
__a : Optional[Any] = config.hidden_size
__a : str = 1
__a : Union[str, Any] = nn.ModuleList(
[
BasicTransformerBlock(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , activation_fn='''gelu''' , attention_bias=_UpperCAmelCase )
for _ in range(_UpperCAmelCase )
] )
def _lowerCamelCase ( self , _UpperCAmelCase ):
for block in self.blocks:
__a : Union[str, Any] = block(_UpperCAmelCase )
return hidden_states
| 101
| 0
|
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing the experiment tracking capability,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
UpperCAmelCase_ = 1_6
UpperCAmelCase_ = 3_2
def SCREAMING_SNAKE_CASE_ ( _snake_case :Accelerator , _snake_case :int = 16 ) -> Optional[Any]:
_A = AutoTokenizer.from_pretrained('''bert-base-cased''' )
_A = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(_snake_case :Optional[int] ):
# max_length=None => use the model max length (it's actually the default)
_A = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=_snake_case , max_length=_snake_case )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
_A = datasets.map(
_snake_case , batched=_snake_case , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_A = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(_snake_case :Any ):
# On TPU it's best to pad everything to the same length or training will be very slow.
_A = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
_A = 16
elif accelerator.mixed_precision != "no":
_A = 8
else:
_A = None
return tokenizer.pad(
_snake_case , padding='''longest''' , max_length=_snake_case , pad_to_multiple_of=_snake_case , return_tensors='''pt''' , )
# Instantiate dataloaders.
_A = DataLoader(
tokenized_datasets['''train'''] , shuffle=_snake_case , collate_fn=_snake_case , batch_size=_snake_case )
_A = DataLoader(
tokenized_datasets['''validation'''] , shuffle=_snake_case , collate_fn=_snake_case , batch_size=_snake_case )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
UpperCAmelCase_ = mocked_dataloaders # noqa: F811
def SCREAMING_SNAKE_CASE_ ( _snake_case :List[str] , _snake_case :str ) -> Optional[int]:
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''' , _snake_case ) == "1":
_A = 2
# Initialize Accelerator
# New Code #
# We pass in "all" to `log_with` to grab all available trackers in the environment
# Note: If using a custom `Tracker` class, should be passed in here such as:
# >>> log_with = ["all", MyCustomTrackerClassInstance()]
if args.with_tracking:
_A = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , log_with='''all''' , project_dir=args.project_dir )
else:
_A = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_A = config['''lr''']
_A = int(config['''num_epochs'''] )
_A = int(config['''seed'''] )
_A = int(config['''batch_size'''] )
set_seed(_snake_case )
_A , _A = get_dataloaders(_snake_case , _snake_case )
_A = evaluate.load('''glue''' , '''mrpc''' )
# If the batch size is too big we use gradient accumulation
_A = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
_A = batch_size // MAX_GPU_BATCH_SIZE
_A = MAX_GPU_BATCH_SIZE
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_A = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=_snake_case )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
_A = model.to(accelerator.device )
# Instantiate optimizer
_A = AdamW(params=model.parameters() , lr=_snake_case )
# Instantiate scheduler
_A = get_linear_schedule_with_warmup(
optimizer=_snake_case , num_warmup_steps=100 , num_training_steps=(len(_snake_case ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_A , _A , _A , _A , _A = accelerator.prepare(
_snake_case , _snake_case , _snake_case , _snake_case , _snake_case )
# New Code #
# We need to initialize the trackers we use. Overall configurations can also be stored
if args.with_tracking:
_A = os.path.split(_snake_case )[-1].split('''.''' )[0]
accelerator.init_trackers(_snake_case , _snake_case )
# Now we train the model
for epoch in range(_snake_case ):
model.train()
# New Code #
# For our tracking example, we will log the total loss of each epoch
if args.with_tracking:
_A = 0
for step, batch in enumerate(_snake_case ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
_A = model(**_snake_case )
_A = outputs.loss
# New Code #
if args.with_tracking:
total_loss += loss.detach().float()
_A = loss / gradient_accumulation_steps
accelerator.backward(_snake_case )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(_snake_case ):
# We could avoid this line since we set the accelerator with `device_placement=True` (the default).
batch.to(accelerator.device )
with torch.no_grad():
_A = model(**_snake_case )
_A = outputs.logits.argmax(dim=-1 )
_A , _A = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=_snake_case , references=_snake_case , )
_A = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'''epoch {epoch}:''' , _snake_case )
# New Code #
# To actually log, we call `Accelerator.log`
# The values passed can be of `str`, `int`, `float` or `dict` of `str` to `float`/`int`
if args.with_tracking:
accelerator.log(
{
'''accuracy''': eval_metric['''accuracy'''],
'''f1''': eval_metric['''f1'''],
'''train_loss''': total_loss.item() / len(_snake_case ),
'''epoch''': epoch,
} , step=_snake_case , )
# New Code #
# When a run is finished, you should call `accelerator.end_training()`
# to close all of the open trackers
if args.with_tracking:
accelerator.end_training()
def SCREAMING_SNAKE_CASE_ ( ) -> List[Any]:
_A = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''' , type=_snake_case , default=_snake_case , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
parser.add_argument(
'''--with_tracking''' , action='''store_true''' , help='''Whether to load in all available experiment trackers from the environment and use them for logging.''' , )
parser.add_argument(
'''--project_dir''' , type=_snake_case , default='''logs''' , help='''Location on where to store experiment tracking logs` and relevent project information''' , )
_A = parser.parse_args()
_A = {'''lr''': 2E-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(_snake_case , _snake_case )
if __name__ == "__main__":
main()
| 2
|
from math import factorial
UpperCAmelCase : Tuple = {str(d): factorial(d) for d in range(10)}
def _A ( SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
return sum(DIGIT_FACTORIAL[d] for d in str(SCREAMING_SNAKE_CASE ) )
def _A ( ):
"""simple docstring"""
a__ : Any =7 * factorial(9 ) + 1
return sum(i for i in range(3 , SCREAMING_SNAKE_CASE ) if sum_of_digit_factorial(SCREAMING_SNAKE_CASE ) == i )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 563
| 0
|
"""simple docstring"""
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase = 0 ) -> list:
'''simple docstring'''
_lowerCamelCase : str = length or len(_lowerCamelCase )
_lowerCamelCase : Optional[int] = False
for i in range(length - 1 ):
if list_data[i] > list_data[i + 1]:
_lowerCamelCase, _lowerCamelCase : List[Any] = list_data[i + 1], list_data[i]
_lowerCamelCase : List[Any] = True
return list_data if not swapped else bubble_sort(_lowerCamelCase , length - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 386
|
"""simple docstring"""
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase = 0 ) -> list:
'''simple docstring'''
_lowerCamelCase : str = length or len(_lowerCamelCase )
_lowerCamelCase : Optional[int] = False
for i in range(length - 1 ):
if list_data[i] > list_data[i + 1]:
_lowerCamelCase, _lowerCamelCase : List[Any] = list_data[i + 1], list_data[i]
_lowerCamelCase : List[Any] = True
return list_data if not swapped else bubble_sort(_lowerCamelCase , length - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 386
| 1
|
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DPMSolverMultistepScheduler,
TextToVideoSDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, load_numpy, skip_mps, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class A_ ( UpperCAmelCase__ , unittest.TestCase ):
_SCREAMING_SNAKE_CASE = TextToVideoSDPipeline
_SCREAMING_SNAKE_CASE = TEXT_TO_IMAGE_PARAMS
_SCREAMING_SNAKE_CASE = TEXT_TO_IMAGE_BATCH_PARAMS
# No `output_type`.
_SCREAMING_SNAKE_CASE = frozenset(
[
"""num_inference_steps""",
"""generator""",
"""latents""",
"""return_dict""",
"""callback""",
"""callback_steps""",
] )
def _UpperCAmelCase ( self : Any ):
torch.manual_seed(0 )
__a = UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("CrossAttnDownBlock3D", "CrossAttnDownBlock3D", "CrossAttnDownBlock3D", "DownBlock3D") , up_block_types=("UpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D") , cross_attention_dim=32 , attention_head_dim=4 , )
__a = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="scaled_linear" , clip_sample=_UpperCAmelCase , set_alpha_to_one=_UpperCAmelCase , )
torch.manual_seed(0 )
__a = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , sample_size=1_28 , )
torch.manual_seed(0 )
__a = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act="gelu" , projection_dim=5_12 , )
__a = CLIPTextModel(_UpperCAmelCase )
__a = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
__a = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
}
return components
def _UpperCAmelCase ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Tuple=0 ):
if str(_UpperCAmelCase ).startswith("mps" ):
__a = torch.manual_seed(_UpperCAmelCase )
else:
__a = torch.Generator(device=_UpperCAmelCase ).manual_seed(_UpperCAmelCase )
__a = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "pt",
}
return inputs
def _UpperCAmelCase ( self : Optional[Any] ):
__a = "cpu" # ensure determinism for the device-dependent torch.Generator
__a = self.get_dummy_components()
__a = TextToVideoSDPipeline(**_UpperCAmelCase )
__a = sd_pipe.to(_UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=_UpperCAmelCase )
__a = self.get_dummy_inputs(_UpperCAmelCase )
__a = "np"
__a = sd_pipe(**_UpperCAmelCase ).frames
__a = frames[0][-3:, -3:, -1]
assert frames[0].shape == (64, 64, 3)
__a = np.array([1_58.0, 1_60.0, 1_53.0, 1_25.0, 1_00.0, 1_21.0, 1_11.0, 93.0, 1_13.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _UpperCAmelCase ( self : List[Any] ):
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=_UpperCAmelCase , expected_max_diff=3E-3 )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def _UpperCAmelCase ( self : Optional[Any] ):
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=_UpperCAmelCase , expected_max_diff=1E-2 )
@unittest.skip(reason="Batching needs to be properly figured out first for this pipeline." )
def _UpperCAmelCase ( self : Optional[int] ):
pass
@unittest.skip(reason="Batching needs to be properly figured out first for this pipeline." )
def _UpperCAmelCase ( self : List[str] ):
pass
@unittest.skip(reason="`num_images_per_prompt` argument is not supported for this pipeline." )
def _UpperCAmelCase ( self : int ):
pass
def _UpperCAmelCase ( self : Tuple ):
return super().test_progress_bar()
@slow
@skip_mps
class A_ ( unittest.TestCase ):
def _UpperCAmelCase ( self : str ):
__a = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video.npy" )
__a = TextToVideoSDPipeline.from_pretrained("damo-vilab/text-to-video-ms-1.7b" )
__a = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
__a = pipe.to("cuda" )
__a = "Spiderman is surfing"
__a = torch.Generator(device="cpu" ).manual_seed(0 )
__a = pipe(_UpperCAmelCase , generator=_UpperCAmelCase , num_inference_steps=25 , output_type="pt" ).frames
__a = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5E-2
def _UpperCAmelCase ( self : int ):
__a = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy" )
__a = TextToVideoSDPipeline.from_pretrained("damo-vilab/text-to-video-ms-1.7b" )
__a = pipe.to("cuda" )
__a = "Spiderman is surfing"
__a = torch.Generator(device="cpu" ).manual_seed(0 )
__a = pipe(_UpperCAmelCase , generator=_UpperCAmelCase , num_inference_steps=2 , output_type="pt" ).frames
__a = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5E-2
| 197
|
import re
from pathlib import Path
from unittest import TestCase
import pytest
@pytest.mark.integration
class _a ( UpperCAmelCase__ ):
"""simple docstring"""
def _UpperCAmelCase ( self , _UpperCAmelCase ) -> Dict:
with open(_UpperCAmelCase , encoding='utf-8' ) as input_file:
UpperCamelCase_ = re.compile(R'(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)' )
UpperCamelCase_ = input_file.read()
UpperCamelCase_ = regexp.search(_UpperCAmelCase )
return match
def _UpperCAmelCase ( self , _UpperCAmelCase ) -> Dict:
with open(_UpperCAmelCase , encoding='utf-8' ) as input_file:
UpperCamelCase_ = re.compile(R'#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()' , re.DOTALL )
UpperCamelCase_ = input_file.read()
# use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search`
UpperCamelCase_ = regexp.finditer(_UpperCAmelCase )
UpperCamelCase_ = [match for match in matches if match is not None and match.group(1 ) is not None]
return matches[0] if matches else None
def _UpperCAmelCase ( self ) -> List[str]:
UpperCamelCase_ = Path('./datasets' )
UpperCamelCase_ = list(dataset_paths.absolute().glob('**/*.py' ) )
for dataset in dataset_files:
if self._no_encoding_on_file_open(str(_UpperCAmelCase ) ):
raise AssertionError(f"""open(...) must use utf-8 encoding in {dataset}""" )
def _UpperCAmelCase ( self ) -> Optional[int]:
UpperCamelCase_ = Path('./datasets' )
UpperCamelCase_ = list(dataset_paths.absolute().glob('**/*.py' ) )
for dataset in dataset_files:
if self._no_print_statements(str(_UpperCAmelCase ) ):
raise AssertionError(f"""print statement found in {dataset}. Use datasets.logger/logging instead.""" )
| 23
| 0
|
import warnings
from diffusers import StableDiffusionInpaintPipeline as StableDiffusionInpaintPipeline # noqa F401
warnings.warn(
'The `inpainting.py` script is outdated. Please use directly `from diffusers import'
' StableDiffusionInpaintPipeline` instead.'
)
| 705
|
import re
from flax.core.frozen_dict import freeze
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.experimental import PartitionSpec as P
# Sentinels
__magic_name__ : Union[str, Any] = object()
# For specifying empty leaf dict `{}`
__magic_name__ : Union[str, Any] = object()
def lowerCAmelCase ( snake_case__ : str , snake_case__ : Optional[int] )-> str:
A_ = tuple((re.compile(x + "$" ) for x in qs) )
for i in range(len(snake_case__ ) - len(snake_case__ ) + 1 ):
A_ = [x.match(snake_case__ ) for x, y in zip(snake_case__ , ks[i:] )]
if matches and all(snake_case__ ):
return True
return False
def lowerCAmelCase ( snake_case__ : int )-> int:
def replace(snake_case__ : Union[str, Any] , snake_case__ : Optional[Any] ):
for rule, replacement in rules:
if _match(snake_case__ , snake_case__ ):
return replacement
return val
return replace
def lowerCAmelCase ( )-> int:
return [
# embeddings
(("transformer", "wpe", "embedding"), P("mp" , snake_case__ )),
(("transformer", "wte", "embedding"), P("mp" , snake_case__ )),
# atention
(("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(snake_case__ , "mp" )),
(("attention", "out_proj", "kernel"), P("mp" , snake_case__ )),
(("attention", "out_proj", "bias"), None),
# mlp
(("mlp", "c_fc", "kernel"), P(snake_case__ , "mp" )),
(("mlp", "c_fc", "bias"), P("mp" )),
(("mlp", "c_proj", "kernel"), P("mp" , snake_case__ )),
(("mlp", "c_proj", "bias"), None),
# layer norms
((r"ln_\d+", "bias"), None),
((r"\d+", r"ln_\d+", "scale"), None),
(("ln_f", "bias"), None),
(("ln_f", "scale"), None),
]
def lowerCAmelCase ( snake_case__ : str )-> Optional[int]:
A_ = _get_partition_rules()
A_ = _replacement_rules(snake_case__ )
A_ = {k: _unmatched for k in flatten_dict(snake_case__ )}
A_ = {k: replace(snake_case__ , snake_case__ ) for k, v in initd.items()}
assert _unmatched not in result.values(), "Incomplete partition spec."
return freeze(unflatten_dict(snake_case__ ) )
| 608
| 0
|
"""simple docstring"""
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ):
lowercase__ = ["""image_processor""", """tokenizer"""]
lowercase__ = """ViTImageProcessor"""
lowercase__ = ("""CLIPTokenizer""", """CLIPTokenizerFast""")
def __init__( self : Optional[Any] , lowerCAmelCase_ : Any=None , lowerCAmelCase_ : List[Any]=None , **lowerCAmelCase_ : Dict):
"""simple docstring"""
lowercase_ = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , a__ , )
lowercase_ = kwargs.pop("""feature_extractor""")
lowercase_ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""")
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""")
super().__init__(a__ , a__)
def __call__( self : int , lowerCAmelCase_ : int=None , lowerCAmelCase_ : Any=None , lowerCAmelCase_ : List[Any]=None , lowerCAmelCase_ : List[str]=None , **lowerCAmelCase_ : Optional[int]):
"""simple docstring"""
if text is None and visual_prompt is None and images is None:
raise ValueError("""You have to specify either text, visual prompt or images.""")
if text is not None and visual_prompt is not None:
raise ValueError("""You have to specify exactly one type of prompt. Either text or visual prompt.""")
if text is not None:
lowercase_ = self.tokenizer(a__ , return_tensors=a__ , **a__)
if visual_prompt is not None:
lowercase_ = self.image_processor(a__ , return_tensors=a__ , **a__)
if images is not None:
lowercase_ = self.image_processor(a__ , return_tensors=a__ , **a__)
if visual_prompt is not None and images is not None:
lowercase_ = {
"pixel_values": image_features.pixel_values,
"conditional_pixel_values": prompt_features.pixel_values,
}
return encoding
elif text is not None and images is not None:
lowercase_ = image_features.pixel_values
return encoding
elif text is not None:
return encoding
elif visual_prompt is not None:
lowercase_ = {
"conditional_pixel_values": prompt_features.pixel_values,
}
return encoding
else:
return BatchEncoding(data=dict(**a__) , tensor_type=a__)
def _UpperCAmelCase ( self : int , *lowerCAmelCase_ : int , **lowerCAmelCase_ : Optional[Any]):
"""simple docstring"""
return self.tokenizer.batch_decode(*a__ , **a__)
def _UpperCAmelCase ( self : List[str] , *lowerCAmelCase_ : Tuple , **lowerCAmelCase_ : int):
"""simple docstring"""
return self.tokenizer.decode(*a__ , **a__)
@property
def _UpperCAmelCase ( self : List[str]):
"""simple docstring"""
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , a__ , )
return self.image_processor_class
@property
def _UpperCAmelCase ( self : int):
"""simple docstring"""
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , a__ , )
return self.image_processor
| 567
|
'''simple docstring'''
import os
import re
import unicodedata
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import is_torch_available, logging
if is_torch_available():
import torch
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
snake_case = logging.get_logger(__name__)
snake_case = {"""vocab_file""": """spiece.model"""}
snake_case = {
"""vocab_file""": {
"""AI-Sweden/gpt-sw3-126m""": """https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model""",
"""AI-Sweden/gpt-sw3-350m""": """https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model""",
"""AI-Sweden/gpt-sw3-1.6b""": """https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model""",
"""AI-Sweden/gpt-sw3-6.7b""": """https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model""",
"""AI-Sweden/gpt-sw3-20b""": """https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model""",
}
}
snake_case = {
"""AI-Sweden/gpt-sw3-126m""": 20_48,
"""AI-Sweden/gpt-sw3-350m""": 20_48,
"""AI-Sweden/gpt-sw3-1.6b""": 20_48,
"""AI-Sweden/gpt-sw3-6.7b""": 20_48,
"""AI-Sweden/gpt-sw3-20b""": 20_48,
}
class lowerCAmelCase ( UpperCamelCase_ ):
A_ : Dict = VOCAB_FILES_NAMES
A_ : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
A_ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A_ : Union[str, Any] = ["""input_ids""", """attention_mask"""]
def __init__( self : List[str] , a__ : Any , a__ : Dict=False , a__ : List[Any]=False , a__ : Dict=False , a__ : Any=None , a__ : Tuple=None , a__ : str=None , a__ : Optional[int]=None , a__ : Optional[Dict[str, Any]] = None , **a__ : Union[str, Any] , ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = {} if sp_model_kwargs is None else sp_model_kwargs
lowerCAmelCase__ : List[str] = kwargs.get("name_or_path" )
if name_or_path is None:
logger.warning(
"name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,"
" you are testing the model, this can safely be ignored" )
lowerCAmelCase__ : Dict = "None"
# Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing
lowerCAmelCase__ : Tuple = "<|endoftext|>" if eos_token is None else eos_token
lowerCAmelCase__ : List[str] = "<unk>" if unk_token is None else unk_token
if "gpt-sw3-7b" in name_or_path:
lowerCAmelCase__ : List[str] = unk_token if pad_token is None else pad_token
lowerCAmelCase__ : Any = eos_token if bos_token is None else bos_token
else:
lowerCAmelCase__ : str = "<pad>" if pad_token is None else pad_token
lowerCAmelCase__ : Union[str, Any] = "<s>" if bos_token is None else bos_token
super().__init__(
do_lower_case=a__ , remove_space=a__ , keep_accents=a__ , bos_token=a__ , eos_token=a__ , unk_token=a__ , pad_token=a__ , sp_model_kwargs=self.sp_model_kwargs , **a__ , )
lowerCAmelCase__ : Dict = do_lower_case
lowerCAmelCase__ : Union[str, Any] = remove_space
lowerCAmelCase__ : Any = keep_accents
lowerCAmelCase__ : List[str] = vocab_file
lowerCAmelCase__ : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(a__ )
# Used for whitespace normalization in input texts
# fmt : off
lowerCAmelCase__ : List[str] = {" ", " ", " ", " ", " ", " ", " ", " ", " ", " ", "", ""}
# fmt : on
# Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing
lowerCAmelCase__ : Dict = re.compile(
F'''[{''.join(map(a__ , list(range(0 , 9 ) ) + list(range(11 , 32 ) ) + list(range(127 , 160 ) ) + [160, 173, 8203] ) )}]''' )
def __getstate__( self : str ):
'''simple docstring'''
lowerCAmelCase__ : List[Any] = self.__dict__.copy()
lowerCAmelCase__ : List[str] = None
return state
def __setstate__( self : List[Any] , a__ : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
lowerCAmelCase__ : int = {}
lowerCAmelCase__ : str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
@property
# Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size
def _A ( self : str ):
'''simple docstring'''
return len(self.sp_model )
def _A ( self : List[str] , a__ : str ):
'''simple docstring'''
lowerCAmelCase__ : str = self.non_printing_characters_re.sub("" , a__ )
# Normalize whitespaces
lowerCAmelCase__ : int = "".join([char if char not in self.whitespaces else " " for char in text] )
# NFC Unicode normalization
lowerCAmelCase__ : Tuple = unicodedata.normalize("NFC" , a__ )
return text
def _A ( self : str , a__ : str , **a__ : List[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = self.preprocess_text(a__ )
return self.sp_model.encode(a__ , out_type=a__ )
def _A ( self : List[str] , a__ : str ):
'''simple docstring'''
return self.sp_model.PieceToId(a__ )
def _A ( self : Optional[int] , a__ : int ):
'''simple docstring'''
return self.sp_model.IdToPiece(a__ )
@staticmethod
def _A ( a__ : str ):
'''simple docstring'''
return out_string
def _A ( self : int , a__ : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = []
lowerCAmelCase__ : Tuple = ""
lowerCAmelCase__ : str = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
# TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(a__ ) + token
lowerCAmelCase__ : Union[str, Any] = True
lowerCAmelCase__ : Dict = []
else:
current_sub_tokens.append(a__ )
lowerCAmelCase__ : Dict = False
out_string += self.sp_model.decode(a__ )
return out_string
def _A ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : int = {self.convert_ids_to_tokens(a__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _A ( self : str , a__ : str , a__ : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(a__ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowerCAmelCase__ : List[str] = os.path.join(
a__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , a__ )
elif not os.path.isfile(self.vocab_file ):
with open(a__ , "wb" ) as fi:
lowerCAmelCase__ : int = self.sp_model.serialized_model_proto()
fi.write(a__ )
return (out_vocab_file,)
def _A ( self : List[Any] , a__ : Union[str, List[str]] , a__ : Union[str, bool] = False ):
'''simple docstring'''
if isinstance(a__ , a__ ):
lowerCAmelCase__ : int = self.preprocess_text(a__ )
lowerCAmelCase__ : Dict = self.sp_model.encode(a__ )
else:
lowerCAmelCase__ : Union[str, Any] = [self.preprocess_text(a__ ) for t in text]
lowerCAmelCase__ : Optional[int] = self.sp_model.encode(a__ )
if return_tensors is True or return_tensors == "pt":
lowerCAmelCase__ : Union[str, Any] = torch.tensor(a__ )
return token_ids
def _A ( self : Dict , a__ : Union[int, List[int]] ):
'''simple docstring'''
return self.sp_model.decode(a__ )
def _A ( self : int , a__ : "Conversation" ):
'''simple docstring'''
lowerCAmelCase__ : Any = [F'''User: {text}''' if is_user else F'''Bot: {text}''' for is_user, text in conversation.iter_texts()]
lowerCAmelCase__ : List[str] = (
F'''{self.eos_token}{self.bos_token}''' + F'''{self.bos_token}'''.join(a__ ) + F'''{self.bos_token}Bot:'''
)
return self.encode(text=a__ )
| 378
| 0
|
'''simple docstring'''
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def __a(SCREAMING_SNAKE_CASE_ : Optional[int] ):
'''simple docstring'''
_lowerCAmelCase = filter(lambda SCREAMING_SNAKE_CASE_ : p.requires_grad , model.parameters() )
_lowerCAmelCase = sum([np.prod(p.size() ) for p in model_parameters] )
return params
_SCREAMING_SNAKE_CASE = logging.getLogger(__name__)
def __a(SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : List[Any] ):
'''simple docstring'''
if metric == "rouge2":
_lowerCAmelCase = "{val_avg_rouge2:.4f}-{step_count}"
elif metric == "bleu":
_lowerCAmelCase = "{val_avg_bleu:.4f}-{step_count}"
elif metric == "em":
_lowerCAmelCase = "{val_avg_em:.4f}-{step_count}"
elif metric == "loss":
_lowerCAmelCase = "{val_avg_loss:.4f}-{step_count}"
else:
raise NotImplementedError(
F'''seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this'''
" function." )
_lowerCAmelCase = ModelCheckpoint(
dirpath=SCREAMING_SNAKE_CASE_ , filename=SCREAMING_SNAKE_CASE_ , monitor=F'''val_{metric}''' , mode="max" , save_top_k=1 , every_n_epochs=1 , )
return checkpoint_callback
def __a(SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Any ):
'''simple docstring'''
return EarlyStopping(
monitor=F'''val_{metric}''' , mode="min" if "loss" in metric else "max" , patience=SCREAMING_SNAKE_CASE_ , verbose=SCREAMING_SNAKE_CASE_ , )
class lowerCAmelCase_ ( pl.Callback ):
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase ) -> Union[str, Any]:
_lowerCAmelCase = {f'''lr_group_{i}''': param["lr"] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(_lowerCAmelCase )
@rank_zero_only
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=True ) -> None:
logger.info(f'''***** {type_path} results at step {trainer.global_step:05d} *****''' )
_lowerCAmelCase = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ["log", "progress_bar", "preds"]} )
# Log results
_lowerCAmelCase = Path(pl_module.hparams.output_dir )
if type_path == "test":
_lowerCAmelCase = od / "test_results.txt"
_lowerCAmelCase = od / "test_generations.txt"
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
_lowerCAmelCase = od / f'''{type_path}_results/{trainer.global_step:05d}.txt'''
_lowerCAmelCase = od / f'''{type_path}_generations/{trainer.global_step:05d}.txt'''
results_file.parent.mkdir(exist_ok=_lowerCAmelCase )
generations_file.parent.mkdir(exist_ok=_lowerCAmelCase )
with open(_lowerCAmelCase , "a+" ) as writer:
for key in sorted(_lowerCAmelCase ):
if key in ["log", "progress_bar", "preds"]:
continue
_lowerCAmelCase = metrics[key]
if isinstance(_lowerCAmelCase , torch.Tensor ):
_lowerCAmelCase = val.item()
_lowerCAmelCase = f'''{key}: {val:.6f}\n'''
writer.write(_lowerCAmelCase )
if not save_generations:
return
if "preds" in metrics:
_lowerCAmelCase = "\n".join(metrics["preds"] )
generations_file.open("w+" ).write(_lowerCAmelCase )
@rank_zero_only
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase ) -> str:
try:
_lowerCAmelCase = pl_module.model.model.num_parameters()
except AttributeError:
_lowerCAmelCase = pl_module.model.num_parameters()
_lowerCAmelCase = count_trainable_parameters(_lowerCAmelCase )
# mp stands for million parameters
trainer.logger.log_metrics({"n_params": npars, "mp": npars / 1E6, "grad_mp": n_trainable_pars / 1E6} )
@rank_zero_only
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase ) -> Tuple:
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(_lowerCAmelCase , _lowerCAmelCase , "test" )
@rank_zero_only
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase ) -> int:
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 489
|
'''simple docstring'''
def __a(SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : bool = False ):
'''simple docstring'''
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
_lowerCAmelCase = F'''Expected string as input, found {type(SCREAMING_SNAKE_CASE_ )}'''
raise ValueError(SCREAMING_SNAKE_CASE_ )
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
_lowerCAmelCase = F'''Expected boolean as use_pascal parameter, found {type(SCREAMING_SNAKE_CASE_ )}'''
raise ValueError(SCREAMING_SNAKE_CASE_ )
_lowerCAmelCase = input_str.split("_" )
_lowerCAmelCase = 0 if use_pascal else 1
_lowerCAmelCase = words[start_index:]
_lowerCAmelCase = [word[0].upper() + word[1:] for word in words_to_capitalize]
_lowerCAmelCase = "" if use_pascal else words[0]
return "".join([initial_word, *capitalized_words] )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 489
| 1
|
'''simple docstring'''
from __future__ import annotations
from collections.abc import Iterator
from typing import Any
class A :
def __init__( self : str , __magic_name__ : Any ):
"""simple docstring"""
lowerCAmelCase__ = data
lowerCAmelCase__ = None
class A :
def __init__( self : Tuple ):
"""simple docstring"""
lowerCAmelCase__ = None
lowerCAmelCase__ = None
def __iter__( self : Any ):
"""simple docstring"""
lowerCAmelCase__ = self.head
while self.head:
yield node.data
lowerCAmelCase__ = node.next
if node == self.head:
break
def __len__( self : Tuple ):
"""simple docstring"""
return sum(1 for _ in self )
def __repr__( self : int ):
"""simple docstring"""
return "->".join(str(__magic_name__ ) for item in iter(self ) )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __magic_name__ : Any ):
"""simple docstring"""
self.insert_nth(len(self ) , __magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __magic_name__ : Any ):
"""simple docstring"""
self.insert_nth(0 , __magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : str , __magic_name__ : int , __magic_name__ : Any ):
"""simple docstring"""
if index < 0 or index > len(self ):
raise IndexError("list index out of range." )
lowerCAmelCase__ = Node(__magic_name__ )
if self.head is None:
lowerCAmelCase__ = new_node # first node points itself
lowerCAmelCase__ = lowerCAmelCase__ = new_node
elif index == 0: # insert at head
lowerCAmelCase__ = self.head
lowerCAmelCase__ = lowerCAmelCase__ = new_node
else:
lowerCAmelCase__ = self.head
for _ in range(index - 1 ):
lowerCAmelCase__ = temp.next
lowerCAmelCase__ = temp.next
lowerCAmelCase__ = new_node
if index == len(self ) - 1: # insert at tail
lowerCAmelCase__ = new_node
def __SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
return self.delete_nth(0 )
def __SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
return self.delete_nth(len(self ) - 1 )
def __SCREAMING_SNAKE_CASE ( self : Any , __magic_name__ : int = 0 ):
"""simple docstring"""
if not 0 <= index < len(self ):
raise IndexError("list index out of range." )
lowerCAmelCase__ = self.head
if self.head == self.tail: # just one node
lowerCAmelCase__ = lowerCAmelCase__ = None
elif index == 0: # delete head node
lowerCAmelCase__ = self.tail.next.next
lowerCAmelCase__ = self.head.next
else:
lowerCAmelCase__ = self.head
for _ in range(index - 1 ):
lowerCAmelCase__ = temp.next
lowerCAmelCase__ = temp.next
lowerCAmelCase__ = temp.next.next
if index == len(self ) - 1: # delete at tail
lowerCAmelCase__ = temp
return delete_node.data
def __SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
return len(self ) == 0
def A ( ) -> None:
'''simple docstring'''
lowerCAmelCase__ = CircularLinkedList()
assert len(UpperCamelCase_ ) == 0
assert circular_linked_list.is_empty() is True
assert str(UpperCamelCase_ ) == ""
try:
circular_linked_list.delete_front()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_tail()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_nth(-1 )
raise AssertionError
except IndexError:
assert True
try:
circular_linked_list.delete_nth(0 )
raise AssertionError
except IndexError:
assert True
assert circular_linked_list.is_empty() is True
for i in range(5 ):
assert len(UpperCamelCase_ ) == i
circular_linked_list.insert_nth(UpperCamelCase_ , i + 1 )
assert str(UpperCamelCase_ ) == "->".join(str(UpperCamelCase_ ) for i in range(1 , 6 ) )
circular_linked_list.insert_tail(6 )
assert str(UpperCamelCase_ ) == "->".join(str(UpperCamelCase_ ) for i in range(1 , 7 ) )
circular_linked_list.insert_head(0 )
assert str(UpperCamelCase_ ) == "->".join(str(UpperCamelCase_ ) for i in range(0 , 7 ) )
assert circular_linked_list.delete_front() == 0
assert circular_linked_list.delete_tail() == 6
assert str(UpperCamelCase_ ) == "->".join(str(UpperCamelCase_ ) for i in range(1 , 6 ) )
assert circular_linked_list.delete_nth(2 ) == 3
circular_linked_list.insert_nth(2 , 3 )
assert str(UpperCamelCase_ ) == "->".join(str(UpperCamelCase_ ) for i in range(1 , 6 ) )
assert circular_linked_list.is_empty() is False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 48
|
'''simple docstring'''
from __future__ import annotations
def _UpperCAmelCase ( __A : str , __A : str ):
a_ : int = get_failure_array(__A )
# 2) Step through text searching for pattern
a_ , a_ : Any = 0, 0 # index into text, pattern
while i < len(__A ):
if pattern[j] == text[i]:
if j == (len(__A ) - 1):
return True
j += 1
# if this is a prefix in our pattern
# just go back far enough to continue
elif j > 0:
a_ : Any = failure[j - 1]
continue
i += 1
return False
def _UpperCAmelCase ( __A : str ):
a_ : Optional[Any] = [0]
a_ : Any = 0
a_ : int = 1
while j < len(__A ):
if pattern[i] == pattern[j]:
i += 1
elif i > 0:
a_ : List[Any] = failure[i - 1]
continue
j += 1
failure.append(__A )
return failure
if __name__ == "__main__":
# Test 1)
__lowerCAmelCase = 'abc1abc12'
__lowerCAmelCase = 'alskfjaldsabc1abc1abc12k23adsfabcabc'
__lowerCAmelCase = 'alskfjaldsk23adsfabcabc'
assert kmp(pattern, texta) and not kmp(pattern, texta)
# Test 2)
__lowerCAmelCase = 'ABABX'
__lowerCAmelCase = 'ABABZABABYABABX'
assert kmp(pattern, text)
# Test 3)
__lowerCAmelCase = 'AAAB'
__lowerCAmelCase = 'ABAAAAAB'
assert kmp(pattern, text)
# Test 4)
__lowerCAmelCase = 'abcdabcy'
__lowerCAmelCase = 'abcxabcdabxabcdabcdabcy'
assert kmp(pattern, text)
# Test 5)
__lowerCAmelCase = 'aabaabaaa'
assert get_failure_array(pattern) == [0, 1, 0, 1, 2, 3, 4, 5, 2]
| 466
| 0
|
import warnings
from ...utils import logging
from .image_processing_owlvit import OwlViTImageProcessor
UpperCamelCase = logging.get_logger(__name__)
class lowerCAmelCase_ ( lowercase ):
"""simple docstring"""
def __init__( self :Any , *lowerCamelCase__ :Union[str, Any] , **lowerCamelCase__ :int ):
warnings.warn(
"""The class OwlViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use OwlViTImageProcessor instead.""" , lowerCamelCase__ , )
super().__init__(*lowerCamelCase__ , **lowerCamelCase__ )
| 383
|
UpperCamelCase = 8.3_144_598
def A ( lowercase__ : float , lowercase__ : float ) -> float:
if temperature < 0:
raise Exception("""Temperature cannot be less than 0 K""" )
if molar_mass <= 0:
raise Exception("""Molar mass cannot be less than or equal to 0 kg/mol""" )
else:
return (3 * UNIVERSAL_GAS_CONSTANT * temperature / molar_mass) ** 0.5
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# example
UpperCamelCase = 300
UpperCamelCase = 28
UpperCamelCase = rms_speed_of_molecule(temperature, molar_mass)
print(f'''Vrms of Nitrogen gas at 300 K is {vrms} m/s''')
| 383
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
__lowerCamelCase = {
"""configuration_gpt_bigcode""": ["""GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GPTBigCodeConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = [
"""GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GPTBigCodeForSequenceClassification""",
"""GPTBigCodeForTokenClassification""",
"""GPTBigCodeForCausalLM""",
"""GPTBigCodeModel""",
"""GPTBigCodePreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_bigcode import (
GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTBigCodeForCausalLM,
GPTBigCodeForSequenceClassification,
GPTBigCodeForTokenClassification,
GPTBigCodeModel,
GPTBigCodePreTrainedModel,
)
else:
import sys
__lowerCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 204
|
from ..utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_pt_objects import * # noqa F403
else:
from .scheduling_consistency_models import CMStochasticIterativeScheduler
from .scheduling_ddim import DDIMScheduler
from .scheduling_ddim_inverse import DDIMInverseScheduler
from .scheduling_ddim_parallel import DDIMParallelScheduler
from .scheduling_ddpm import DDPMScheduler
from .scheduling_ddpm_parallel import DDPMParallelScheduler
from .scheduling_deis_multistep import DEISMultistepScheduler
from .scheduling_dpmsolver_multistep import DPMSolverMultistepScheduler
from .scheduling_dpmsolver_multistep_inverse import DPMSolverMultistepInverseScheduler
from .scheduling_dpmsolver_singlestep import DPMSolverSinglestepScheduler
from .scheduling_euler_ancestral_discrete import EulerAncestralDiscreteScheduler
from .scheduling_euler_discrete import EulerDiscreteScheduler
from .scheduling_heun_discrete import HeunDiscreteScheduler
from .scheduling_ipndm import IPNDMScheduler
from .scheduling_k_dpm_2_ancestral_discrete import KDPMaAncestralDiscreteScheduler
from .scheduling_k_dpm_2_discrete import KDPMaDiscreteScheduler
from .scheduling_karras_ve import KarrasVeScheduler
from .scheduling_pndm import PNDMScheduler
from .scheduling_repaint import RePaintScheduler
from .scheduling_sde_ve import ScoreSdeVeScheduler
from .scheduling_sde_vp import ScoreSdeVpScheduler
from .scheduling_unclip import UnCLIPScheduler
from .scheduling_unipc_multistep import UniPCMultistepScheduler
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin
from .scheduling_vq_diffusion import VQDiffusionScheduler
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_flax_objects import * # noqa F403
else:
from .scheduling_ddim_flax import FlaxDDIMScheduler
from .scheduling_ddpm_flax import FlaxDDPMScheduler
from .scheduling_dpmsolver_multistep_flax import FlaxDPMSolverMultistepScheduler
from .scheduling_karras_ve_flax import FlaxKarrasVeScheduler
from .scheduling_lms_discrete_flax import FlaxLMSDiscreteScheduler
from .scheduling_pndm_flax import FlaxPNDMScheduler
from .scheduling_sde_ve_flax import FlaxScoreSdeVeScheduler
from .scheduling_utils_flax import (
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
broadcast_to_shape_from_left,
)
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .scheduling_lms_discrete import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .scheduling_dpmsolver_sde import DPMSolverSDEScheduler
| 204
| 1
|
'''simple docstring'''
__SCREAMING_SNAKE_CASE = {
"joule": 1.0,
"kilojoule": 1_0_0_0,
"megajoule": 1_0_0_0_0_0_0,
"gigajoule": 1_0_0_0_0_0_0_0_0_0,
"wattsecond": 1.0,
"watthour": 3_6_0_0,
"kilowatthour": 3_6_0_0_0_0_0,
"newtonmeter": 1.0,
"calorie_nutr": 4_1_8_6.8,
"kilocalorie_nutr": 4_1_8_6_8_0_0.0_0,
"electronvolt": 1.602_176_634e-19,
"britishthermalunit_it": 1_0_5_5.0_5_5_8_5,
"footpound": 1.355_818,
}
def __a ( lowerCAmelCase__ : str , lowerCAmelCase__ : str , lowerCAmelCase__ : float ):
if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION:
a__ : List[str] = (
F'Incorrect \'from_type\' or \'to_type\' value: {from_type!r}, {to_type!r}\n'
F'Valid values are: {", ".join(lowerCAmelCase__ )}'
)
raise ValueError(lowerCAmelCase__ )
return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 340
|
'''simple docstring'''
class lowerCAmelCase__ :
"""simple docstring"""
def __init__( self : Optional[Any] , A__ : list[int] ) -> None:
'''simple docstring'''
a__ : Union[str, Any] = len(A__ )
a__ : Tuple = [0] * len_array
if len_array > 0:
a__ : Dict = array[0]
for i in range(1 , A__ ):
a__ : Optional[Any] = self.prefix_sum[i - 1] + array[i]
def __lowerCAmelCase ( self : int , A__ : int , A__ : int ) -> int:
'''simple docstring'''
if start == 0:
return self.prefix_sum[end]
return self.prefix_sum[end] - self.prefix_sum[start - 1]
def __lowerCAmelCase ( self : Tuple , A__ : int ) -> bool:
'''simple docstring'''
a__ : Tuple = {0}
for sum_item in self.prefix_sum:
if sum_item - target_sum in sums:
return True
sums.add(A__ )
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 340
| 1
|
import argparse
import logging
from collections import namedtuple
import torch
from model_bertabs import BertAbsSummarizer
from models.model_builder import AbsSummarizer # The authors' implementation
from transformers import BertTokenizer
logging.basicConfig(level=logging.INFO)
lowercase_ = logging.getLogger(__name__)
lowercase_ = "Hello world! cécé herlolip"
lowercase_ = namedtuple(
"""BertAbsConfig""",
[
"""temp_dir""",
"""large""",
"""use_bert_emb""",
"""finetune_bert""",
"""encoder""",
"""share_emb""",
"""max_pos""",
"""enc_layers""",
"""enc_hidden_size""",
"""enc_heads""",
"""enc_ff_size""",
"""enc_dropout""",
"""dec_layers""",
"""dec_hidden_size""",
"""dec_heads""",
"""dec_ff_size""",
"""dec_dropout""",
],
)
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> str:
lowercase__ = BertAbsConfig(
temp_dir='.' , finetune_bert=_SCREAMING_SNAKE_CASE , large=_SCREAMING_SNAKE_CASE , share_emb=_SCREAMING_SNAKE_CASE , use_bert_emb=_SCREAMING_SNAKE_CASE , encoder='bert' , max_pos=512 , enc_layers=6 , enc_hidden_size=512 , enc_heads=8 , enc_ff_size=512 , enc_dropout=0.2 , dec_layers=6 , dec_hidden_size=768 , dec_heads=8 , dec_ff_size=2048 , dec_dropout=0.2 , )
lowercase__ = torch.load(_SCREAMING_SNAKE_CASE , lambda _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : storage )
lowercase__ = AbsSummarizer(_SCREAMING_SNAKE_CASE , torch.device('cpu' ) , _SCREAMING_SNAKE_CASE )
original.eval()
lowercase__ = BertAbsSummarizer(_SCREAMING_SNAKE_CASE , torch.device('cpu' ) )
new_model.eval()
# -------------------
# Convert the weights
# -------------------
logging.info('convert the model' )
new_model.bert.load_state_dict(original.bert.state_dict() )
new_model.decoder.load_state_dict(original.decoder.state_dict() )
new_model.generator.load_state_dict(original.generator.state_dict() )
# ----------------------------------
# Make sure the outpus are identical
# ----------------------------------
logging.info('Make sure that the models\' outputs are identical' )
lowercase__ = BertTokenizer.from_pretrained('bert-base-uncased' )
# prepare the model inputs
lowercase__ = tokenizer.encode('This is sample éàalj\'-.' )
encoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(_SCREAMING_SNAKE_CASE )) )
lowercase__ = torch.tensor(_SCREAMING_SNAKE_CASE ).unsqueeze(0 )
lowercase__ = tokenizer.encode('This is sample 3 éàalj\'-.' )
decoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(_SCREAMING_SNAKE_CASE )) )
lowercase__ = torch.tensor(_SCREAMING_SNAKE_CASE ).unsqueeze(0 )
# failsafe to make sure the weights reset does not affect the
# loaded weights.
assert torch.max(torch.abs(original.generator[0].weight - new_model.generator[0].weight ) ) == 0
# forward pass
lowercase__ = encoder_input_ids
lowercase__ = decoder_input_ids
lowercase__ = None
lowercase__ = None
lowercase__ = None
lowercase__ = None
lowercase__ = None
# The original model does not apply the geneator layer immediatly but rather in
# the beam search (where it combines softmax + linear layer). Since we already
# apply the softmax in our generation process we only apply the linear layer here.
# We make sure that the outputs of the full stack are identical
lowercase__ = original(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )[0]
lowercase__ = original.generator(_SCREAMING_SNAKE_CASE )
lowercase__ = new_model(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )[0]
lowercase__ = new_model.generator(_SCREAMING_SNAKE_CASE )
lowercase__ = torch.max(torch.abs(output_converted_model - output_original_model ) ).item()
print('Maximum absolute difference beween weights: {:.2f}'.format(_SCREAMING_SNAKE_CASE ) )
lowercase__ = torch.max(torch.abs(output_converted_generator - output_original_generator ) ).item()
print('Maximum absolute difference beween weights: {:.2f}'.format(_SCREAMING_SNAKE_CASE ) )
lowercase__ = torch.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , atol=1E-3 )
if are_identical:
logging.info('all weights are equal up to 1e-3' )
else:
raise ValueError('the weights are different. The new model is likely different from the original one.' )
# The model has been saved with torch.save(model) and this is bound to the exact
# directory structure. We save the state_dict instead.
logging.info('saving the model\'s state dictionary' )
torch.save(
new_model.state_dict() , './bertabs-finetuned-cnndm-extractive-abstractive-summarization/pytorch_model.bin' )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
parser.add_argument(
"""--bertabs_checkpoint_path""",
default=None,
type=str,
required=True,
help="""Path the official PyTorch dump.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
type=str,
required=True,
help="""Path to the output PyTorch model.""",
)
lowercase_ = parser.parse_args()
convert_bertabs_checkpoints(
args.bertabs_checkpoint_path,
args.pytorch_dump_folder_path,
)
| 235
|
"""simple docstring"""
import os
from tempfile import TemporaryDirectory
from unittest import TestCase
import pytest
from absl.testing import parameterized
from datasets import config
from datasets.arrow_reader import HF_GCP_BASE_URL
from datasets.builder import DatasetBuilder
from datasets.dataset_dict import IterableDatasetDict
from datasets.iterable_dataset import IterableDataset
from datasets.load import dataset_module_factory, import_main_class
from datasets.utils.file_utils import cached_path
A: Optional[int] = [
{"dataset": "wikipedia", "config_name": "20220301.de"},
{"dataset": "wikipedia", "config_name": "20220301.en"},
{"dataset": "wikipedia", "config_name": "20220301.fr"},
{"dataset": "wikipedia", "config_name": "20220301.frr"},
{"dataset": "wikipedia", "config_name": "20220301.it"},
{"dataset": "wikipedia", "config_name": "20220301.simple"},
{"dataset": "snli", "config_name": "plain_text"},
{"dataset": "eli5", "config_name": "LFQA_reddit"},
{"dataset": "wiki40b", "config_name": "en"},
{"dataset": "wiki_dpr", "config_name": "psgs_w100.nq.compressed"},
{"dataset": "wiki_dpr", "config_name": "psgs_w100.nq.no_index"},
{"dataset": "wiki_dpr", "config_name": "psgs_w100.multiset.no_index"},
{"dataset": "natural_questions", "config_name": "default"},
]
def _snake_case ( UpperCamelCase : List[str]=True ):
if with_config:
return [
{
"testcase_name": d["dataset"] + "/" + d["config_name"],
"dataset": d["dataset"],
"config_name": d["config_name"],
}
for d in DATASETS_ON_HF_GCP
]
else:
return [
{"testcase_name": dataset, "dataset": dataset} for dataset in {d["dataset"] for d in DATASETS_ON_HF_GCP}
]
@parameterized.named_parameters(list_datasets_on_hf_gcp_parameters(with_config=UpperCAmelCase__ ) )
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ):
__lowerCAmelCase : Optional[int] = None
__lowerCAmelCase : Tuple = None
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[Any]:
'''simple docstring'''
with TemporaryDirectory() as tmp_dir:
UpperCAmelCase : str = dataset_module_factory(_SCREAMING_SNAKE_CASE , cache_dir=_SCREAMING_SNAKE_CASE )
UpperCAmelCase : List[Any] = import_main_class(dataset_module.module_path , dataset=_SCREAMING_SNAKE_CASE )
UpperCAmelCase : DatasetBuilder = builder_cls(
cache_dir=_SCREAMING_SNAKE_CASE , config_name=_SCREAMING_SNAKE_CASE , hash=dataset_module.hash , )
UpperCAmelCase : Optional[int] = """/""".join(
[
HF_GCP_BASE_URL,
builder_instance._relative_data_dir(with_hash=_SCREAMING_SNAKE_CASE ).replace(os.sep , """/""" ),
config.DATASET_INFO_FILENAME,
] )
UpperCAmelCase : Tuple = cached_path(_SCREAMING_SNAKE_CASE , cache_dir=_SCREAMING_SNAKE_CASE )
self.assertTrue(os.path.exists(_SCREAMING_SNAKE_CASE ) )
@pytest.mark.integration
def _snake_case ( UpperCamelCase : int ):
UpperCAmelCase : str = tmp_path_factory.mktemp("""test_hf_gcp""" ) / """test_wikipedia_simple"""
UpperCAmelCase : Optional[int] = dataset_module_factory("""wikipedia""" , cache_dir=UpperCamelCase )
UpperCAmelCase : List[Any] = import_main_class(dataset_module.module_path )
UpperCAmelCase : DatasetBuilder = builder_cls(
cache_dir=UpperCamelCase , config_name="""20220301.frr""" , hash=dataset_module.hash , )
# use the HF cloud storage, not the original download_and_prepare that uses apache-beam
UpperCAmelCase : List[str] = None
builder_instance.download_and_prepare()
UpperCAmelCase : List[Any] = builder_instance.as_dataset()
assert ds
@pytest.mark.integration
def _snake_case ( UpperCamelCase : str ):
UpperCAmelCase : List[str] = dataset_module_factory("""wikipedia""" , cache_dir=UpperCamelCase )
UpperCAmelCase : Optional[Any] = import_main_class(dataset_module.module_path , dataset=UpperCamelCase )
UpperCAmelCase : DatasetBuilder = builder_cls(
cache_dir=UpperCamelCase , config_name="""20220301.frr""" , hash=dataset_module.hash , )
UpperCAmelCase : Any = builder_instance.as_streaming_dataset()
assert ds
assert isinstance(UpperCamelCase , UpperCamelCase )
assert "train" in ds
assert isinstance(ds["""train"""] , UpperCamelCase )
assert next(iter(ds["""train"""] ) )
| 160
| 0
|
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = '▁'
_lowerCAmelCase = {'vocab_file': 'sentencepiece.bpe.model'}
_lowerCAmelCase = {
'vocab_file': {
'facebook/mbart-large-en-ro': (
'https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model'
),
'facebook/mbart-large-cc25': (
'https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model'
),
}
}
_lowerCAmelCase = {
'facebook/mbart-large-en-ro': 1_0_2_4,
'facebook/mbart-large-cc25': 1_0_2_4,
}
# fmt: off
_lowerCAmelCase = ['ar_AR', 'cs_CZ', 'de_DE', 'en_XX', 'es_XX', 'et_EE', 'fi_FI', 'fr_XX', 'gu_IN', 'hi_IN', 'it_IT', 'ja_XX', 'kk_KZ', 'ko_KR', 'lt_LT', 'lv_LV', 'my_MM', 'ne_NP', 'nl_XX', 'ro_RO', 'ru_RU', 'si_LK', 'tr_TR', 'vi_VN', 'zh_CN']
class __UpperCAmelCase( A__ ):
"""simple docstring"""
__magic_name__ = VOCAB_FILES_NAMES
__magic_name__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__magic_name__ = PRETRAINED_VOCAB_FILES_MAP
__magic_name__ = ["""input_ids""", """attention_mask"""]
__magic_name__ = []
__magic_name__ = []
def __init__( self , __magic_name__ , __magic_name__="<s>" , __magic_name__="</s>" , __magic_name__="</s>" , __magic_name__="<s>" , __magic_name__="<unk>" , __magic_name__="<pad>" , __magic_name__="<mask>" , __magic_name__=None , __magic_name__=None , __magic_name__=None , __magic_name__ = None , __magic_name__=None , **__magic_name__ , ):
"""simple docstring"""
A_ : Tuple = AddedToken(__magic_name__ , lstrip=__magic_name__ , rstrip=__magic_name__ ) if isinstance(__magic_name__ , __magic_name__ ) else mask_token
A_ : str = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__magic_name__ , eos_token=__magic_name__ , unk_token=__magic_name__ , sep_token=__magic_name__ , cls_token=__magic_name__ , pad_token=__magic_name__ , mask_token=__magic_name__ , tokenizer_file=__magic_name__ , src_lang=__magic_name__ , tgt_lang=__magic_name__ , additional_special_tokens=__magic_name__ , sp_model_kwargs=self.sp_model_kwargs , **__magic_name__ , )
A_ : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__magic_name__ ) )
A_ : Union[str, Any] = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
A_ : int = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
A_ : int = 1
A_ : Dict = len(self.sp_model )
A_ : int = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(__magic_name__ )
}
A_ : Optional[Any] = {v: k for k, v in self.lang_code_to_id.items()}
A_ : List[str] = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
A_ : Optional[Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
A_ : Union[str, Any] = list(self.lang_code_to_id.keys() )
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
self._additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in self._additional_special_tokens] )
A_ : Union[str, Any] = src_lang if src_lang is not None else '''en_XX'''
A_ : Tuple = self.lang_code_to_id[self._src_lang]
A_ : List[Any] = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self ):
"""simple docstring"""
A_ : Dict = self.__dict__.copy()
A_ : int = None
A_ : Union[str, Any] = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , __magic_name__ ):
"""simple docstring"""
A_ : Dict = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
A_ : Optional[int] = {}
A_ : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
@property
def UpperCAmelCase ( self ):
"""simple docstring"""
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def UpperCAmelCase ( self ):
"""simple docstring"""
return self._src_lang
@src_lang.setter
def UpperCAmelCase ( self , __magic_name__ ):
"""simple docstring"""
A_ : Optional[Any] = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def UpperCAmelCase ( self , __magic_name__ , __magic_name__ = None , __magic_name__ = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__magic_name__ , token_ids_a=__magic_name__ , already_has_special_tokens=__magic_name__ )
A_ : Optional[int] = [1] * len(self.prefix_tokens )
A_ : int = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(__magic_name__ )) + suffix_ones
return prefix_ones + ([0] * len(__magic_name__ )) + ([0] * len(__magic_name__ )) + suffix_ones
def UpperCAmelCase ( self , __magic_name__ , __magic_name__ = None ):
"""simple docstring"""
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def UpperCAmelCase ( self , __magic_name__ , __magic_name__ = None ):
"""simple docstring"""
A_ : List[str] = [self.sep_token_id]
A_ : Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def UpperCAmelCase ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ):
"""simple docstring"""
if src_lang is None or tgt_lang is None:
raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' )
A_ : str = src_lang
A_ : Tuple = self(__magic_name__ , add_special_tokens=__magic_name__ , return_tensors=__magic_name__ , **__magic_name__ )
A_ : Dict = self.convert_tokens_to_ids(__magic_name__ )
A_ : Any = tgt_lang_id
return inputs
def UpperCAmelCase ( self ):
"""simple docstring"""
A_ : List[Any] = {self.convert_ids_to_tokens(__magic_name__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def UpperCAmelCase ( self , __magic_name__ ):
"""simple docstring"""
return self.sp_model.encode(__magic_name__ , out_type=__magic_name__ )
def UpperCAmelCase ( self , __magic_name__ ):
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
A_ : Optional[Any] = self.sp_model.PieceToId(__magic_name__ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def UpperCAmelCase ( self , __magic_name__ ):
"""simple docstring"""
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def UpperCAmelCase ( self , __magic_name__ ):
"""simple docstring"""
A_ : Optional[int] = ''''''.join(__magic_name__ ).replace(__magic_name__ , ''' ''' ).strip()
return out_string
def UpperCAmelCase ( self , __magic_name__ , __magic_name__ = None ):
"""simple docstring"""
if not os.path.isdir(__magic_name__ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
A_ : Dict = os.path.join(
__magic_name__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__magic_name__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __magic_name__ )
elif not os.path.isfile(self.vocab_file ):
with open(__magic_name__ , '''wb''' ) as fi:
A_ : List[str] = self.sp_model.serialized_model_proto()
fi.write(__magic_name__ )
return (out_vocab_file,)
def UpperCAmelCase ( self , __magic_name__ , __magic_name__ = "en_XX" , __magic_name__ = None , __magic_name__ = "ro_RO" , **__magic_name__ , ):
"""simple docstring"""
A_ : List[Any] = src_lang
A_ : Optional[int] = tgt_lang
return super().prepare_seqaseq_batch(__magic_name__ , __magic_name__ , **__magic_name__ )
def UpperCAmelCase ( self ):
"""simple docstring"""
return self.set_src_lang_special_tokens(self.src_lang )
def UpperCAmelCase ( self ):
"""simple docstring"""
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def UpperCAmelCase ( self , __magic_name__ ):
"""simple docstring"""
A_ : int = self.lang_code_to_id[src_lang]
A_ : int = []
A_ : Optional[Any] = [self.eos_token_id, self.cur_lang_code]
def UpperCAmelCase ( self , __magic_name__ ):
"""simple docstring"""
A_ : Union[str, Any] = self.lang_code_to_id[lang]
A_ : Any = []
A_ : Optional[int] = [self.eos_token_id, self.cur_lang_code]
| 708
|
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert import BertTokenizer
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
_lowerCAmelCase = {
'vocab_file': {
'facebook/dpr-ctx_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-ctx_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-ctx_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-ctx_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json'
),
},
}
_lowerCAmelCase = {
'vocab_file': {
'facebook/dpr-question_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-question_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-question_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-question_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json'
),
},
}
_lowerCAmelCase = {
'vocab_file': {
'facebook/dpr-reader-single-nq-base': (
'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-reader-multiset-base': (
'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-reader-single-nq-base': (
'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-reader-multiset-base': (
'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json'
),
},
}
_lowerCAmelCase = {
'facebook/dpr-ctx_encoder-single-nq-base': 5_1_2,
'facebook/dpr-ctx_encoder-multiset-base': 5_1_2,
}
_lowerCAmelCase = {
'facebook/dpr-question_encoder-single-nq-base': 5_1_2,
'facebook/dpr-question_encoder-multiset-base': 5_1_2,
}
_lowerCAmelCase = {
'facebook/dpr-reader-single-nq-base': 5_1_2,
'facebook/dpr-reader-multiset-base': 5_1_2,
}
_lowerCAmelCase = {
'facebook/dpr-ctx_encoder-single-nq-base': {'do_lower_case': True},
'facebook/dpr-ctx_encoder-multiset-base': {'do_lower_case': True},
}
_lowerCAmelCase = {
'facebook/dpr-question_encoder-single-nq-base': {'do_lower_case': True},
'facebook/dpr-question_encoder-multiset-base': {'do_lower_case': True},
}
_lowerCAmelCase = {
'facebook/dpr-reader-single-nq-base': {'do_lower_case': True},
'facebook/dpr-reader-multiset-base': {'do_lower_case': True},
}
class __UpperCAmelCase( A__ ):
"""simple docstring"""
__magic_name__ = VOCAB_FILES_NAMES
__magic_name__ = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
__magic_name__ = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__magic_name__ = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
class __UpperCAmelCase( A__ ):
"""simple docstring"""
__magic_name__ = VOCAB_FILES_NAMES
__magic_name__ = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
__magic_name__ = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__magic_name__ = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
_lowerCAmelCase = collections.namedtuple(
'DPRSpanPrediction', ['span_score', 'relevance_score', 'doc_id', 'start_index', 'end_index', 'text']
)
_lowerCAmelCase = collections.namedtuple('DPRReaderOutput', ['start_logits', 'end_logits', 'relevance_logits'])
_lowerCAmelCase = r'\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n ```\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n ```\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `\'tf\'`: Return TensorFlow `tf.constant` objects.\n - `\'pt\'`: Return PyTorch `torch.Tensor` objects.\n - `\'np\'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer\'s default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Returns:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n '
@add_start_docstrings(A__ )
class __UpperCAmelCase:
"""simple docstring"""
def __call__( self , __magic_name__ , __magic_name__ = None , __magic_name__ = None , __magic_name__ = False , __magic_name__ = False , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , **__magic_name__ , ):
"""simple docstring"""
if titles is None and texts is None:
return super().__call__(
__magic_name__ , padding=__magic_name__ , truncation=__magic_name__ , max_length=__magic_name__ , return_tensors=__magic_name__ , return_attention_mask=__magic_name__ , **__magic_name__ , )
elif titles is None or texts is None:
A_ : Dict = titles if texts is None else texts
return super().__call__(
__magic_name__ , __magic_name__ , padding=__magic_name__ , truncation=__magic_name__ , max_length=__magic_name__ , return_tensors=__magic_name__ , return_attention_mask=__magic_name__ , **__magic_name__ , )
A_ : Optional[int] = titles if not isinstance(__magic_name__ , __magic_name__ ) else [titles]
A_ : Dict = texts if not isinstance(__magic_name__ , __magic_name__ ) else [texts]
A_ : Tuple = len(__magic_name__ )
A_ : Any = questions if not isinstance(__magic_name__ , __magic_name__ ) else [questions] * n_passages
if len(__magic_name__ ) != len(__magic_name__ ):
raise ValueError(
f"""There should be as many titles than texts but got {len(__magic_name__ )} titles and {len(__magic_name__ )} texts.""" )
A_ : Optional[int] = super().__call__(__magic_name__ , __magic_name__ , padding=__magic_name__ , truncation=__magic_name__ )['''input_ids''']
A_ : Dict = super().__call__(__magic_name__ , add_special_tokens=__magic_name__ , padding=__magic_name__ , truncation=__magic_name__ )['''input_ids''']
A_ : Tuple = {
'''input_ids''': [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(__magic_name__ , __magic_name__ )
]
}
if return_attention_mask is not False:
A_ : Dict = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
A_ : Tuple = attention_mask
return self.pad(__magic_name__ , padding=__magic_name__ , max_length=__magic_name__ , return_tensors=__magic_name__ )
def UpperCAmelCase ( self , __magic_name__ , __magic_name__ , __magic_name__ = 16 , __magic_name__ = 64 , __magic_name__ = 4 , ):
"""simple docstring"""
A_ : Any = reader_input['''input_ids''']
A_ , A_ , A_ : Dict = reader_output[:3]
A_ : Tuple = len(__magic_name__ )
A_ : int = sorted(range(__magic_name__ ) , reverse=__magic_name__ , key=relevance_logits.__getitem__ )
A_ : List[DPRReaderOutput] = []
for doc_id in sorted_docs:
A_ : Any = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
A_ : Optional[int] = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
A_ : List[Any] = sequence_ids.index(self.pad_token_id )
else:
A_ : Dict = len(__magic_name__ )
A_ : List[Any] = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=__magic_name__ , top_spans=__magic_name__ , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=__magic_name__ , start_index=__magic_name__ , end_index=__magic_name__ , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(__magic_name__ ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def UpperCAmelCase ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , ):
"""simple docstring"""
A_ : Union[str, Any] = []
for start_index, start_score in enumerate(__magic_name__ ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
A_ : List[str] = sorted(__magic_name__ , key=lambda __magic_name__ : x[1] , reverse=__magic_name__ )
A_ : str = []
for (start_index, end_index), score in scores:
if start_index > end_index:
raise ValueError(f"""Wrong span indices: [{start_index}:{end_index}]""" )
A_ : List[str] = end_index - start_index + 1
if length > max_answer_length:
raise ValueError(f"""Span is too long: {length} > {max_answer_length}""" )
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(__magic_name__ ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(A__ )
class __UpperCAmelCase( A__ , A__ ):
"""simple docstring"""
__magic_name__ = VOCAB_FILES_NAMES
__magic_name__ = READER_PRETRAINED_VOCAB_FILES_MAP
__magic_name__ = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__magic_name__ = READER_PRETRAINED_INIT_CONFIGURATION
__magic_name__ = ["""input_ids""", """attention_mask"""]
| 236
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ : Tuple = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : List[Any] = {
"""MIT/ast-finetuned-audioset-10-10-0.4593""": (
"""https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json"""
),
}
class UpperCAmelCase_ ( __lowerCamelCase ):
__lowerCamelCase = 'audio-spectrogram-transformer'
def __init__( self , _lowerCAmelCase=768 , _lowerCAmelCase=12 , _lowerCAmelCase=12 , _lowerCAmelCase=3072 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.0_2 , _lowerCAmelCase=1e-12 , _lowerCAmelCase=16 , _lowerCAmelCase=True , _lowerCAmelCase=10 , _lowerCAmelCase=10 , _lowerCAmelCase=1024 , _lowerCAmelCase=128 , **_lowerCAmelCase , ):
super().__init__(**_lowerCAmelCase )
UpperCAmelCase__ : Optional[int] = hidden_size
UpperCAmelCase__ : int = num_hidden_layers
UpperCAmelCase__ : List[Any] = num_attention_heads
UpperCAmelCase__ : Dict = intermediate_size
UpperCAmelCase__ : Dict = hidden_act
UpperCAmelCase__ : str = hidden_dropout_prob
UpperCAmelCase__ : str = attention_probs_dropout_prob
UpperCAmelCase__ : Tuple = initializer_range
UpperCAmelCase__ : Dict = layer_norm_eps
UpperCAmelCase__ : Optional[Any] = patch_size
UpperCAmelCase__ : Tuple = qkv_bias
UpperCAmelCase__ : Tuple = frequency_stride
UpperCAmelCase__ : Union[str, Any] = time_stride
UpperCAmelCase__ : Optional[Any] = max_length
UpperCAmelCase__ : Optional[int] = num_mel_bins
| 79
|
'''simple docstring'''
import re
from flax.core.frozen_dict import freeze
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.experimental import PartitionSpec as P
# Sentinels
UpperCamelCase_ : Optional[int] = object()
# For specifying empty leaf dict `{}`
UpperCamelCase_ : Optional[int] = object()
def __a ( _UpperCamelCase: Union[str, Any] , _UpperCamelCase: Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
_snake_case = tuple((re.compile(x + "$" ) for x in qs) )
for i in range(len(_UpperCamelCase ) - len(_UpperCamelCase ) + 1 ):
_snake_case = [x.match(_UpperCamelCase ) for x, y in zip(_UpperCamelCase , ks[i:] )]
if matches and all(_UpperCamelCase ):
return True
return False
def __a ( _UpperCamelCase: Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
def replace(_UpperCamelCase: Tuple , _UpperCamelCase: List[str] ):
for rule, replacement in rules:
if _match(_UpperCamelCase , _UpperCamelCase ):
return replacement
return val
return replace
def __a ( ) -> Any:
"""simple docstring"""
return [
# embeddings
(("transformer", "wpe", "embedding"), P("mp" , _UpperCamelCase )),
(("transformer", "wte", "embedding"), P("mp" , _UpperCamelCase )),
# atention
(("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(_UpperCamelCase , "mp" )),
(("attention", "out_proj", "kernel"), P("mp" , _UpperCamelCase )),
(("attention", "out_proj", "bias"), None),
# mlp
(("mlp", "c_fc", "kernel"), P(_UpperCamelCase , "mp" )),
(("mlp", "c_fc", "bias"), P("mp" )),
(("mlp", "c_proj", "kernel"), P("mp" , _UpperCamelCase )),
(("mlp", "c_proj", "bias"), None),
# layer norms
((r"ln_\d+", "bias"), None),
((r"\d+", r"ln_\d+", "scale"), None),
(("ln_f", "bias"), None),
(("ln_f", "scale"), None),
]
def __a ( _UpperCamelCase: Union[str, Any] ) -> Any:
"""simple docstring"""
_snake_case = _get_partition_rules()
_snake_case = _replacement_rules(_UpperCamelCase )
_snake_case = {k: _unmatched for k in flatten_dict(_UpperCamelCase )}
_snake_case = {k: replace(_UpperCamelCase , _UpperCamelCase ) for k, v in initd.items()}
assert _unmatched not in result.values(), "Incomplete partition spec."
return freeze(unflatten_dict(_UpperCamelCase ) )
| 185
| 0
|
from __future__ import annotations
from collections.abc import Iterator
from typing import Generic, TypeVar
lowerCamelCase : List[Any] = TypeVar('''T''')
class _UpperCamelCase (Generic[T] ):
def __init__( self , __UpperCamelCase )-> Optional[int]:
__lowerCAmelCase = data
__lowerCAmelCase = None
def __str__( self )-> str:
return F"""{self.data}"""
class _UpperCamelCase (Generic[T] ):
def __init__( self )-> None:
__lowerCAmelCase = None
def __iter__( self )-> Iterator[T]:
__lowerCAmelCase = self.top
while node:
yield node.data
__lowerCAmelCase = node.next
def __str__( self )-> str:
return "->".join([str(__UpperCamelCase ) for item in self] )
def __len__( self )-> int:
return len(tuple(iter(self ) ) )
def __UpperCAmelCase ( self )-> bool:
return self.top is None
def __UpperCAmelCase ( self , __UpperCamelCase )-> None:
__lowerCAmelCase = Node(__UpperCamelCase )
if not self.is_empty():
__lowerCAmelCase = self.top
__lowerCAmelCase = node
def __UpperCAmelCase ( self )-> T:
if self.is_empty():
raise IndexError("pop from empty stack" )
assert isinstance(self.top , __UpperCamelCase )
__lowerCAmelCase = self.top
__lowerCAmelCase = self.top.next
return pop_node.data
def __UpperCAmelCase ( self )-> T:
if self.is_empty():
raise IndexError("peek from empty stack" )
assert self.top is not None
return self.top.data
def __UpperCAmelCase ( self )-> None:
__lowerCAmelCase = None
if __name__ == "__main__":
from doctest import testmod
testmod()
| 290
|
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional
from packaging import version
if TYPE_CHECKING:
from ... import PreTrainedTokenizer, TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import is_torch_available, logging
lowerCamelCase : List[str] = logging.get_logger(__name__)
lowerCamelCase : Any = {
'''bigscience/bloom''': '''https://huggingface.co/bigscience/bloom/resolve/main/config.json''',
'''bigscience/bloom-560m''': '''https://huggingface.co/bigscience/bloom-560m/blob/main/config.json''',
'''bigscience/bloom-1b1''': '''https://huggingface.co/bigscience/bloom-1b1/blob/main/config.json''',
'''bigscience/bloom-1b7''': '''https://huggingface.co/bigscience/bloom-1b7/blob/main/config.json''',
'''bigscience/bloom-3b''': '''https://huggingface.co/bigscience/bloom-3b/blob/main/config.json''',
'''bigscience/bloom-7b1''': '''https://huggingface.co/bigscience/bloom-7b1/blob/main/config.json''',
}
class _UpperCamelCase (a_ ):
snake_case_ = """bloom"""
snake_case_ = ["""past_key_values"""]
snake_case_ = {
"""num_hidden_layers""": """n_layer""",
"""num_attention_heads""": """n_head""",
}
def __init__( self , __UpperCamelCase=2_5_0_8_8_0 , __UpperCamelCase=6_4 , __UpperCamelCase=2 , __UpperCamelCase=8 , __UpperCamelCase=1e-5 , __UpperCamelCase=0.0_2 , __UpperCamelCase=True , __UpperCamelCase=1 , __UpperCamelCase=2 , __UpperCamelCase=False , __UpperCamelCase=0.0 , __UpperCamelCase=0.0 , __UpperCamelCase=1 , __UpperCamelCase=False , **__UpperCamelCase , )-> Dict:
__lowerCAmelCase = vocab_size
# Backward compatibility with n_embed kwarg
__lowerCAmelCase = kwargs.pop("n_embed" , __UpperCamelCase )
__lowerCAmelCase = hidden_size if n_embed is None else n_embed
__lowerCAmelCase = n_layer
__lowerCAmelCase = n_head
__lowerCAmelCase = layer_norm_epsilon
__lowerCAmelCase = initializer_range
__lowerCAmelCase = use_cache
__lowerCAmelCase = pretraining_tp
__lowerCAmelCase = apply_residual_connection_post_layernorm
__lowerCAmelCase = hidden_dropout
__lowerCAmelCase = attention_dropout
__lowerCAmelCase = bos_token_id
__lowerCAmelCase = eos_token_id
__lowerCAmelCase = slow_but_exact
super().__init__(bos_token_id=__UpperCamelCase , eos_token_id=__UpperCamelCase , **__UpperCamelCase )
class _UpperCamelCase (a_ ):
snake_case_ = version.parse("""1.12""" )
def __init__( self , __UpperCamelCase , __UpperCamelCase = "default" , __UpperCamelCase = None , __UpperCamelCase = False , )-> str:
super().__init__(__UpperCamelCase , task=__UpperCamelCase , patching_specs=__UpperCamelCase , use_past=__UpperCamelCase )
if not getattr(self._config , "pad_token_id" , __UpperCamelCase ):
# TODO: how to do that better?
__lowerCAmelCase = 0
@property
def __UpperCAmelCase ( self )-> Mapping[str, Mapping[int, str]]:
__lowerCAmelCase = OrderedDict({"input_ids": {0: "batch", 1: "sequence"}} )
if self.use_past:
# BLOOM stores values on dynamic axis 2. For more details see: https://github.com/huggingface/transformers/pull/18344
self.fill_with_past_key_values_(__UpperCamelCase , direction="inputs" , inverted_values_shape=__UpperCamelCase )
__lowerCAmelCase = {0: "batch", 1: "past_sequence + sequence"}
else:
__lowerCAmelCase = {0: "batch", 1: "sequence"}
return common_inputs
@property
def __UpperCAmelCase ( self )-> int:
return self._config.n_layer
@property
def __UpperCAmelCase ( self )-> int:
return self._config.n_head
@property
def __UpperCAmelCase ( self )-> float:
return 1e-3
def __UpperCAmelCase ( self , __UpperCamelCase , __UpperCamelCase = -1 , __UpperCamelCase = -1 , __UpperCamelCase = False , __UpperCamelCase = None , )-> Mapping[str, Any]:
__lowerCAmelCase = super(__UpperCamelCase , self ).generate_dummy_inputs(
__UpperCamelCase , batch_size=__UpperCamelCase , seq_length=__UpperCamelCase , is_pair=__UpperCamelCase , framework=__UpperCamelCase )
# We need to order the input in the way they appears in the forward()
__lowerCAmelCase = OrderedDict({"input_ids": common_inputs["input_ids"]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
__lowerCAmelCase , __lowerCAmelCase = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
__lowerCAmelCase = seqlen + 2
__lowerCAmelCase = self._config.hidden_size // self.num_attention_heads
__lowerCAmelCase = (
batch * self.num_attention_heads,
head_dim,
past_key_values_length,
)
__lowerCAmelCase = (
batch * self.num_attention_heads,
past_key_values_length,
head_dim,
)
__lowerCAmelCase = [
(torch.zeros(__UpperCamelCase ), torch.zeros(__UpperCamelCase )) for _ in range(self.num_layers )
]
__lowerCAmelCase = common_inputs["attention_mask"]
if self.use_past:
__lowerCAmelCase = ordered_inputs["attention_mask"].dtype
__lowerCAmelCase = torch.cat(
[ordered_inputs["attention_mask"], torch.ones(__UpperCamelCase , __UpperCamelCase , dtype=__UpperCamelCase )] , dim=1 )
return ordered_inputs
@property
def __UpperCAmelCase ( self )-> int:
return 1_3
| 290
| 1
|
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class snake_case__ ( a_ ):
_SCREAMING_SNAKE_CASE : Optional[int] = ["image_processor", "tokenizer"]
_SCREAMING_SNAKE_CASE : Any = "CLIPImageProcessor"
_SCREAMING_SNAKE_CASE : str = ("CLIPTokenizer", "CLIPTokenizerFast")
def __init__( self : Any , A__ : Union[str, Any]=None , A__ : Optional[Any]=None , **A__ : List[Any] ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : int = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , A__ , )
snake_case_ : int = kwargs.pop("feature_extractor" )
snake_case_ : Dict = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(A__ , A__ )
def __call__( self : Union[str, Any] , A__ : Any=None , A__ : Tuple=None , A__ : List[str]=None , **A__ : Dict ) -> Union[str, Any]:
'''simple docstring'''
if text is None and images is None:
raise ValueError("You have to specify either text or images. Both cannot be none." )
if text is not None:
snake_case_ : str = self.tokenizer(A__ , return_tensors=A__ , **A__ )
if images is not None:
snake_case_ : Tuple = self.image_processor(A__ , return_tensors=A__ , **A__ )
if text is not None and images is not None:
snake_case_ : Dict = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**A__ ) , tensor_type=A__ )
def UpperCAmelCase__ ( self : Optional[int] , *A__ : Union[str, Any] , **A__ : Optional[int] ) -> Any:
'''simple docstring'''
return self.tokenizer.batch_decode(*A__ , **A__ )
def UpperCAmelCase__ ( self : Any , *A__ : Any , **A__ : Optional[Any] ) -> str:
'''simple docstring'''
return self.tokenizer.decode(*A__ , **A__ )
@property
def UpperCAmelCase__ ( self : Tuple ) -> Any:
'''simple docstring'''
snake_case_ : Optional[Any] = self.tokenizer.model_input_names
snake_case_ : List[str] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def UpperCAmelCase__ ( self : Optional[Any] ) -> int:
'''simple docstring'''
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , A__ , )
return self.image_processor_class
@property
def UpperCAmelCase__ ( self : Any ) -> List[str]:
'''simple docstring'''
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , A__ , )
return self.image_processor
| 666
|
import argparse
import os
from pathlib import Path
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer
from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params
__UpperCamelCase : Union[str, Any] = [
# replace left string with right string to get the relevant state_dict key (identical state dict to bart)
["""memory_attention""", """encoder_attn"""],
["""attention""", """attn"""],
["""/""", """."""],
[""".LayerNorm.gamma""", """_layer_norm.weight"""],
[""".LayerNorm.beta""", """_layer_norm.bias"""],
["""r.layer_""", """r.layers."""],
["""output_proj""", """out_proj"""],
["""ffn.dense_1.""", """fc2."""],
["""ffn.dense.""", """fc1."""],
["""ffn_layer_norm""", """final_layer_norm"""],
["""kernel""", """weight"""],
["""encoder_layer_norm.""", """encoder.layer_norm."""],
["""decoder_layer_norm.""", """decoder.layer_norm."""],
["""embeddings.weights""", """shared.weight"""],
]
def a_ ( _A ) -> Any:
"""simple docstring"""
for pegasus_name, hf_name in PATTERNS:
snake_case__ = k.replace(_A , _A )
return k
def a_ ( _A , _A ) -> PegasusForConditionalGeneration:
"""simple docstring"""
snake_case__ = DEFAULTS.copy()
cfg_kwargs.update(_A )
snake_case__ = PegasusConfig(**_A )
snake_case__ = PegasusForConditionalGeneration(_A )
snake_case__ = torch_model.model.state_dict()
snake_case__ = {}
for k, v in tf_weights.items():
snake_case__ = rename_state_dict_key(_A )
if new_k not in sd:
raise ValueError(f'''could not find new key {new_k} in state dict. (converted from {k})''' )
if "dense" in k or "proj" in new_k:
snake_case__ = v.T
snake_case__ = torch.tensor(_A , dtype=sd[new_k].dtype )
assert v.shape == sd[new_k].shape, f'''{new_k}, {k}, {v.shape}, {sd[new_k].shape}'''
# make sure embedding.padding_idx is respected
snake_case__ = torch.zeros_like(mapping['shared.weight'][cfg.pad_token_id + 1] )
snake_case__ = mapping['shared.weight']
snake_case__ = mapping['shared.weight']
snake_case__ = {k: torch.zeros_like(_A ) for k, v in sd.items() if k.endswith('bias' ) and k not in mapping}
mapping.update(**_A )
snake_case__ , snake_case__ = torch_model.model.load_state_dict(_A , strict=_A )
snake_case__ = [
k for k in missing if k not in ['encoder.embed_positions.weight', 'decoder.embed_positions.weight']
]
assert unexpected_missing == [], f'''no matches found for the following torch keys {unexpected_missing}'''
assert extra == [], f'''no matches found for the following tf keys {extra}'''
return torch_model
def a_ ( _A="./ckpt/aeslc/model.ckpt-32000" ) -> Dict:
"""simple docstring"""
snake_case__ = tf.train.list_variables(_A )
snake_case__ = {}
snake_case__ = ['Adafactor', 'global_step']
for name, shape in tqdm(_A , desc='converting tf checkpoint to dict' ):
snake_case__ = any(pat in name for pat in ignore_name )
if skip_key:
continue
snake_case__ = tf.train.load_variable(_A , _A )
snake_case__ = array
return tf_weights
def a_ ( _A , _A ) -> List[Any]:
"""simple docstring"""
# save tokenizer first
snake_case__ = Path(_A ).parent.name
snake_case__ = task_specific_params[f'''summarization_{dataset}''']['max_position_embeddings']
snake_case__ = PegasusTokenizer.from_pretrained('sshleifer/pegasus' , model_max_length=_A )
assert tok.model_max_length == desired_max_model_length
tok.save_pretrained(_A )
# convert model
snake_case__ = get_tf_weights_as_numpy(_A )
snake_case__ = task_specific_params[f'''summarization_{dataset}''']
if dataset == "large":
snake_case__ = task_specific_params
snake_case__ = convert_pegasus(_A , _A )
torch_model.save_pretrained(_A )
snake_case__ = torch_model.state_dict()
sd.pop('model.decoder.embed_positions.weight' )
sd.pop('model.encoder.embed_positions.weight' )
torch.save(_A , Path(_A ) / 'pytorch_model.bin' )
if __name__ == "__main__":
__UpperCamelCase : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""tf_ckpt_path""", type=str, help="""passed to tf.train.list_variables""")
parser.add_argument("""save_dir""", default=None, type=str, help="""Path to the output PyTorch model.""")
__UpperCamelCase : List[Any] = parser.parse_args()
if args.save_dir is None:
__UpperCamelCase : Any = Path(args.tf_ckpt_path).parent.name
__UpperCamelCase : List[Any] = os.path.join("""pegasus""", dataset)
convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir)
| 328
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a = {"""configuration_xlnet""": ["""XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """XLNetConfig"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = ["""XLNetTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = ["""XLNetTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = [
"""XLNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XLNetForMultipleChoice""",
"""XLNetForQuestionAnswering""",
"""XLNetForQuestionAnsweringSimple""",
"""XLNetForSequenceClassification""",
"""XLNetForTokenClassification""",
"""XLNetLMHeadModel""",
"""XLNetModel""",
"""XLNetPreTrainedModel""",
"""load_tf_weights_in_xlnet""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = [
"""TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFXLNetForMultipleChoice""",
"""TFXLNetForQuestionAnsweringSimple""",
"""TFXLNetForSequenceClassification""",
"""TFXLNetForTokenClassification""",
"""TFXLNetLMHeadModel""",
"""TFXLNetMainLayer""",
"""TFXLNetModel""",
"""TFXLNetPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet import XLNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet_fast import XLNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlnet import (
XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
XLNetForMultipleChoice,
XLNetForQuestionAnswering,
XLNetForQuestionAnsweringSimple,
XLNetForSequenceClassification,
XLNetForTokenClassification,
XLNetLMHeadModel,
XLNetModel,
XLNetPreTrainedModel,
load_tf_weights_in_xlnet,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlnet import (
TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLNetForMultipleChoice,
TFXLNetForQuestionAnsweringSimple,
TFXLNetForSequenceClassification,
TFXLNetForTokenClassification,
TFXLNetLMHeadModel,
TFXLNetMainLayer,
TFXLNetModel,
TFXLNetPreTrainedModel,
)
else:
import sys
a = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 702
|
import argparse
import os
import jax as jnp
import numpy as onp
import torch
import torch.nn as nn
from music_spectrogram_diffusion import inference
from tax import checkpoints
from diffusers import DDPMScheduler, OnnxRuntimeModel, SpectrogramDiffusionPipeline
from diffusers.pipelines.spectrogram_diffusion import SpectrogramContEncoder, SpectrogramNotesEncoder, TaFilmDecoder
a = """base_with_context"""
def UpperCamelCase_( __magic_name__ : Tuple , __magic_name__ : List[str] ):
"""simple docstring"""
_lowerCAmelCase :Dict = nn.Parameter(torch.FloatTensor(weights['token_embedder']['embedding'] ) )
_lowerCAmelCase :Union[str, Any] = nn.Parameter(
torch.FloatTensor(weights['Embed_0']['embedding'] ) , requires_grad=__magic_name__ )
for lyr_num, lyr in enumerate(model.encoders ):
_lowerCAmelCase :Optional[int] = weights[f"""layers_{lyr_num}"""]
_lowerCAmelCase :Union[str, Any] = nn.Parameter(
torch.FloatTensor(ly_weight['pre_attention_layer_norm']['scale'] ) )
_lowerCAmelCase :Any = ly_weight['attention']
_lowerCAmelCase :Any = nn.Parameter(torch.FloatTensor(attention_weights['query']['kernel'].T ) )
_lowerCAmelCase :Optional[Any] = nn.Parameter(torch.FloatTensor(attention_weights['key']['kernel'].T ) )
_lowerCAmelCase :Any = nn.Parameter(torch.FloatTensor(attention_weights['value']['kernel'].T ) )
_lowerCAmelCase :Optional[Any] = nn.Parameter(torch.FloatTensor(attention_weights['out']['kernel'].T ) )
_lowerCAmelCase :List[Any] = nn.Parameter(torch.FloatTensor(ly_weight['pre_mlp_layer_norm']['scale'] ) )
_lowerCAmelCase :Any = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_0']['kernel'].T ) )
_lowerCAmelCase :List[str] = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_1']['kernel'].T ) )
_lowerCAmelCase :List[str] = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wo']['kernel'].T ) )
_lowerCAmelCase :Optional[int] = nn.Parameter(torch.FloatTensor(weights['encoder_norm']['scale'] ) )
return model
def UpperCamelCase_( __magic_name__ : Dict , __magic_name__ : Tuple ):
"""simple docstring"""
_lowerCAmelCase :int = nn.Parameter(torch.FloatTensor(weights['input_proj']['kernel'].T ) )
_lowerCAmelCase :Union[str, Any] = nn.Parameter(
torch.FloatTensor(weights['Embed_0']['embedding'] ) , requires_grad=__magic_name__ )
for lyr_num, lyr in enumerate(model.encoders ):
_lowerCAmelCase :Any = weights[f"""layers_{lyr_num}"""]
_lowerCAmelCase :str = ly_weight['attention']
_lowerCAmelCase :str = nn.Parameter(torch.FloatTensor(attention_weights['query']['kernel'].T ) )
_lowerCAmelCase :Optional[Any] = nn.Parameter(torch.FloatTensor(attention_weights['key']['kernel'].T ) )
_lowerCAmelCase :Tuple = nn.Parameter(torch.FloatTensor(attention_weights['value']['kernel'].T ) )
_lowerCAmelCase :str = nn.Parameter(torch.FloatTensor(attention_weights['out']['kernel'].T ) )
_lowerCAmelCase :Union[str, Any] = nn.Parameter(
torch.FloatTensor(ly_weight['pre_attention_layer_norm']['scale'] ) )
_lowerCAmelCase :Optional[Any] = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_0']['kernel'].T ) )
_lowerCAmelCase :int = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_1']['kernel'].T ) )
_lowerCAmelCase :Union[str, Any] = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wo']['kernel'].T ) )
_lowerCAmelCase :List[str] = nn.Parameter(torch.FloatTensor(ly_weight['pre_mlp_layer_norm']['scale'] ) )
_lowerCAmelCase :str = nn.Parameter(torch.FloatTensor(weights['encoder_norm']['scale'] ) )
return model
def UpperCamelCase_( __magic_name__ : Optional[Any] , __magic_name__ : Optional[int] ):
"""simple docstring"""
_lowerCAmelCase :List[str] = nn.Parameter(torch.FloatTensor(weights['time_emb_dense0']['kernel'].T ) )
_lowerCAmelCase :Any = nn.Parameter(torch.FloatTensor(weights['time_emb_dense1']['kernel'].T ) )
_lowerCAmelCase :Dict = nn.Parameter(
torch.FloatTensor(weights['Embed_0']['embedding'] ) , requires_grad=__magic_name__ )
_lowerCAmelCase :List[Any] = nn.Parameter(
torch.FloatTensor(weights['continuous_inputs_projection']['kernel'].T ) )
for lyr_num, lyr in enumerate(model.decoders ):
_lowerCAmelCase :int = weights[f"""layers_{lyr_num}"""]
_lowerCAmelCase :Tuple = nn.Parameter(
torch.FloatTensor(ly_weight['pre_self_attention_layer_norm']['scale'] ) )
_lowerCAmelCase :Tuple = nn.Parameter(
torch.FloatTensor(ly_weight['FiLMLayer_0']['DenseGeneral_0']['kernel'].T ) )
_lowerCAmelCase :Tuple = ly_weight['self_attention']
_lowerCAmelCase :Dict = nn.Parameter(torch.FloatTensor(attention_weights['query']['kernel'].T ) )
_lowerCAmelCase :Tuple = nn.Parameter(torch.FloatTensor(attention_weights['key']['kernel'].T ) )
_lowerCAmelCase :Tuple = nn.Parameter(torch.FloatTensor(attention_weights['value']['kernel'].T ) )
_lowerCAmelCase :List[Any] = nn.Parameter(torch.FloatTensor(attention_weights['out']['kernel'].T ) )
_lowerCAmelCase :List[Any] = ly_weight['MultiHeadDotProductAttention_0']
_lowerCAmelCase :Union[str, Any] = nn.Parameter(torch.FloatTensor(attention_weights['query']['kernel'].T ) )
_lowerCAmelCase :Any = nn.Parameter(torch.FloatTensor(attention_weights['key']['kernel'].T ) )
_lowerCAmelCase :str = nn.Parameter(torch.FloatTensor(attention_weights['value']['kernel'].T ) )
_lowerCAmelCase :str = nn.Parameter(torch.FloatTensor(attention_weights['out']['kernel'].T ) )
_lowerCAmelCase :Union[str, Any] = nn.Parameter(
torch.FloatTensor(ly_weight['pre_cross_attention_layer_norm']['scale'] ) )
_lowerCAmelCase :List[str] = nn.Parameter(torch.FloatTensor(ly_weight['pre_mlp_layer_norm']['scale'] ) )
_lowerCAmelCase :int = nn.Parameter(
torch.FloatTensor(ly_weight['FiLMLayer_1']['DenseGeneral_0']['kernel'].T ) )
_lowerCAmelCase :Optional[Any] = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_0']['kernel'].T ) )
_lowerCAmelCase :List[Any] = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_1']['kernel'].T ) )
_lowerCAmelCase :List[str] = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wo']['kernel'].T ) )
_lowerCAmelCase :List[str] = nn.Parameter(torch.FloatTensor(weights['decoder_norm']['scale'] ) )
_lowerCAmelCase :Optional[Any] = nn.Parameter(torch.FloatTensor(weights['spec_out_dense']['kernel'].T ) )
return model
def UpperCamelCase_( __magic_name__ : Optional[Any] ):
"""simple docstring"""
_lowerCAmelCase :Dict = checkpoints.load_tax_checkpoint(args.checkpoint_path )
_lowerCAmelCase :Tuple = jnp.tree_util.tree_map(onp.array , __magic_name__ )
_lowerCAmelCase :List[str] = [
'from __gin__ import dynamic_registration',
'from music_spectrogram_diffusion.models.diffusion import diffusion_utils',
'diffusion_utils.ClassifierFreeGuidanceConfig.eval_condition_weight = 2.0',
'diffusion_utils.DiffusionConfig.classifier_free_guidance = @diffusion_utils.ClassifierFreeGuidanceConfig()',
]
_lowerCAmelCase :Any = os.path.join(args.checkpoint_path , '..' , 'config.gin' )
_lowerCAmelCase :Tuple = inference.parse_training_gin_file(__magic_name__ , __magic_name__ )
_lowerCAmelCase :List[Any] = inference.InferenceModel(args.checkpoint_path , __magic_name__ )
_lowerCAmelCase :Dict = DDPMScheduler(beta_schedule='squaredcos_cap_v2' , variance_type='fixed_large' )
_lowerCAmelCase :Dict = SpectrogramNotesEncoder(
max_length=synth_model.sequence_length['inputs'] , vocab_size=synth_model.model.module.config.vocab_size , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj='gated-gelu' , )
_lowerCAmelCase :Any = SpectrogramContEncoder(
input_dims=synth_model.audio_codec.n_dims , targets_context_length=synth_model.sequence_length['targets_context'] , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj='gated-gelu' , )
_lowerCAmelCase :Any = TaFilmDecoder(
input_dims=synth_model.audio_codec.n_dims , targets_length=synth_model.sequence_length['targets_context'] , max_decoder_noise_time=synth_model.model.module.config.max_decoder_noise_time , d_model=synth_model.model.module.config.emb_dim , num_layers=synth_model.model.module.config.num_decoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , dropout_rate=synth_model.model.module.config.dropout_rate , )
_lowerCAmelCase :str = load_notes_encoder(ta_checkpoint['target']['token_encoder'] , __magic_name__ )
_lowerCAmelCase :Optional[int] = load_continuous_encoder(ta_checkpoint['target']['continuous_encoder'] , __magic_name__ )
_lowerCAmelCase :List[str] = load_decoder(ta_checkpoint['target']['decoder'] , __magic_name__ )
_lowerCAmelCase :Tuple = OnnxRuntimeModel.from_pretrained('kashif/soundstream_mel_decoder' )
_lowerCAmelCase :Union[str, Any] = SpectrogramDiffusionPipeline(
notes_encoder=__magic_name__ , continuous_encoder=__magic_name__ , decoder=__magic_name__ , scheduler=__magic_name__ , melgan=__magic_name__ , )
if args.save:
pipe.save_pretrained(args.output_path )
if __name__ == "__main__":
a = argparse.ArgumentParser()
parser.add_argument("""--output_path""", default=None, type=str, required=True, help="""Path to the converted model.""")
parser.add_argument(
"""--save""", default=True, type=bool, required=False, help="""Whether to save the converted model or not."""
)
parser.add_argument(
"""--checkpoint_path""",
default=F'''{MODEL}/checkpoint_500000''',
type=str,
required=False,
help="""Path to the original jax model checkpoint.""",
)
a = parser.parse_args()
main(args)
| 382
| 0
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_distilbert import DistilBertTokenizer
_A : int = logging.get_logger(__name__)
_A : Tuple = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
_A : Any = {
"""vocab_file""": {
"""distilbert-base-uncased""": """https://huggingface.co/distilbert-base-uncased/resolve/main/vocab.txt""",
"""distilbert-base-uncased-distilled-squad""": (
"""https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/vocab.txt"""
),
"""distilbert-base-cased""": """https://huggingface.co/distilbert-base-cased/resolve/main/vocab.txt""",
"""distilbert-base-cased-distilled-squad""": (
"""https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/vocab.txt"""
),
"""distilbert-base-german-cased""": """https://huggingface.co/distilbert-base-german-cased/resolve/main/vocab.txt""",
"""distilbert-base-multilingual-cased""": (
"""https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""distilbert-base-uncased""": """https://huggingface.co/distilbert-base-uncased/resolve/main/tokenizer.json""",
"""distilbert-base-uncased-distilled-squad""": (
"""https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/tokenizer.json"""
),
"""distilbert-base-cased""": """https://huggingface.co/distilbert-base-cased/resolve/main/tokenizer.json""",
"""distilbert-base-cased-distilled-squad""": (
"""https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/tokenizer.json"""
),
"""distilbert-base-german-cased""": (
"""https://huggingface.co/distilbert-base-german-cased/resolve/main/tokenizer.json"""
),
"""distilbert-base-multilingual-cased""": (
"""https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/tokenizer.json"""
),
},
}
_A : Optional[int] = {
"""distilbert-base-uncased""": 5_12,
"""distilbert-base-uncased-distilled-squad""": 5_12,
"""distilbert-base-cased""": 5_12,
"""distilbert-base-cased-distilled-squad""": 5_12,
"""distilbert-base-german-cased""": 5_12,
"""distilbert-base-multilingual-cased""": 5_12,
}
_A : Optional[Any] = {
"""distilbert-base-uncased""": {"""do_lower_case""": True},
"""distilbert-base-uncased-distilled-squad""": {"""do_lower_case""": True},
"""distilbert-base-cased""": {"""do_lower_case""": False},
"""distilbert-base-cased-distilled-squad""": {"""do_lower_case""": False},
"""distilbert-base-german-cased""": {"""do_lower_case""": False},
"""distilbert-base-multilingual-cased""": {"""do_lower_case""": False},
}
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCamelCase__ : Dict = VOCAB_FILES_NAMES
lowerCamelCase__ : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ : Optional[Any] = PRETRAINED_INIT_CONFIGURATION
lowerCamelCase__ : Dict = ["""input_ids""", """attention_mask"""]
lowerCamelCase__ : str = DistilBertTokenizer
def __init__( self , A_=None , A_=None , A_=True , A_="[UNK]" , A_="[SEP]" , A_="[PAD]" , A_="[CLS]" , A_="[MASK]" , A_=True , A_=None , **A_ , ):
'''simple docstring'''
super().__init__(
A_ , tokenizer_file=A_ , do_lower_case=A_ , unk_token=A_ , sep_token=A_ , pad_token=A_ , cls_token=A_ , mask_token=A_ , tokenize_chinese_chars=A_ , strip_accents=A_ , **A_ , )
SCREAMING_SNAKE_CASE__ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , A_ ) != do_lower_case
or normalizer_state.get('''strip_accents''' , A_ ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , A_ ) != tokenize_chinese_chars
):
SCREAMING_SNAKE_CASE__ = getattr(A_ , normalizer_state.pop('''type''' ) )
SCREAMING_SNAKE_CASE__ = do_lower_case
SCREAMING_SNAKE_CASE__ = strip_accents
SCREAMING_SNAKE_CASE__ = tokenize_chinese_chars
SCREAMING_SNAKE_CASE__ = normalizer_class(**A_ )
SCREAMING_SNAKE_CASE__ = do_lower_case
def lowercase_ ( self , A_ , A_=None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowercase_ ( self , A_ , A_ = None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = [self.sep_token_id]
SCREAMING_SNAKE_CASE__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowercase_ ( self , A_ , A_ = None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = self._tokenizer.model.save(A_ , name=A_ )
return tuple(A_ )
| 100
|
import random
import unittest
import torch
from diffusers import IFInpaintingPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class __snake_case ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : Any = IFInpaintingPipeline
lowerCamelCase__ : str = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"""width""", """height"""}
lowerCamelCase__ : Optional[Any] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
lowerCamelCase__ : Tuple = PipelineTesterMixin.required_optional_params - {"""latents"""}
def lowercase_ ( self ):
'''simple docstring'''
return self._get_dummy_components()
def lowercase_ ( self , A_ , A_=0 ):
'''simple docstring'''
if str(A_ ).startswith('''mps''' ):
SCREAMING_SNAKE_CASE__ = torch.manual_seed(A_ )
else:
SCREAMING_SNAKE_CASE__ = torch.Generator(device=A_ ).manual_seed(A_ )
SCREAMING_SNAKE_CASE__ = floats_tensor((1, 3, 32, 32) , rng=random.Random(A_ ) ).to(A_ )
SCREAMING_SNAKE_CASE__ = floats_tensor((1, 3, 32, 32) , rng=random.Random(A_ ) ).to(A_ )
SCREAMING_SNAKE_CASE__ = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''mask_image''': mask_image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def lowercase_ ( self ):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def lowercase_ ( self ):
'''simple docstring'''
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != '''cuda''' , reason='''float16 requires CUDA''' )
def lowercase_ ( self ):
'''simple docstring'''
super().test_save_load_floataa(expected_max_diff=1E-1 )
def lowercase_ ( self ):
'''simple docstring'''
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def lowercase_ ( self ):
'''simple docstring'''
self._test_save_load_local()
def lowercase_ ( self ):
'''simple docstring'''
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 100
| 1
|
'''simple docstring'''
def _UpperCAmelCase ( a : float , a : float ) -> float:
"""simple docstring"""
return price * (1 + tax_rate)
if __name__ == "__main__":
print(f"""{price_plus_tax(1_0_0, 0.25) = }""")
print(f"""{price_plus_tax(125.50, 0.05) = }""")
| 7
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A: int = logging.get_logger(__name__)
A: int = {
"bigcode/gpt_bigcode-santacoder": "https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json",
}
class __magic_name__ ( UpperCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = 'gpt_bigcode'
SCREAMING_SNAKE_CASE_ : int = ['past_key_values']
SCREAMING_SNAKE_CASE_ : Any = {
'hidden_size': 'n_embd',
'max_position_embeddings': 'n_positions',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self , _lowercase=5_0257 , _lowercase=1024 , _lowercase=768 , _lowercase=12 , _lowercase=12 , _lowercase=None , _lowercase="gelu_pytorch_tanh" , _lowercase=0.1 , _lowercase=0.1 , _lowercase=0.1 , _lowercase=1E-5 , _lowercase=0.02 , _lowercase=True , _lowercase=True , _lowercase=5_0256 , _lowercase=5_0256 , _lowercase=True , _lowercase=True , _lowercase=True , **_lowercase , ) -> Any:
lowercase_ : Tuple = vocab_size
lowercase_ : str = n_positions
lowercase_ : List[str] = n_embd
lowercase_ : str = n_layer
lowercase_ : Optional[Any] = n_head
lowercase_ : Optional[int] = n_inner
lowercase_ : Union[str, Any] = activation_function
lowercase_ : Dict = resid_pdrop
lowercase_ : str = embd_pdrop
lowercase_ : Optional[Any] = attn_pdrop
lowercase_ : List[Any] = layer_norm_epsilon
lowercase_ : Optional[int] = initializer_range
lowercase_ : List[Any] = scale_attn_weights
lowercase_ : Any = use_cache
lowercase_ : List[str] = attention_softmax_in_fpaa
lowercase_ : Any = scale_attention_softmax_in_fpaa
lowercase_ : Optional[Any] = multi_query
lowercase_ : Optional[Any] = bos_token_id
lowercase_ : Optional[Any] = eos_token_id
super().__init__(bos_token_id=_lowercase , eos_token_id=_lowercase , **_lowercase )
| 7
| 1
|
"""simple docstring"""
def lowerCamelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )-> Optional[int]:
"""simple docstring"""
if index == r:
for j in range(__A ):
print(data[j] , end=" " )
print(" " )
return
# When no more elements are there to put in data[]
if i >= n:
return
# current is included, put next at next location
UpperCamelCase = arr[i]
combination_util(__A , __A , __A , index + 1 , __A , i + 1 )
# current is excluded, replace it with
# next (Note that i+1 is passed, but
# index is not changed)
combination_util(__A , __A , __A , __A , __A , i + 1 )
# The main function that prints all combinations
# of size r in arr[] of size n. This function
# mainly uses combinationUtil()
def lowerCamelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )-> Dict:
"""simple docstring"""
# A temporary array to store all combination one by one
UpperCamelCase = [0] * r
# Print all combination using temporary array 'data[]'
combination_util(__A , __A , __A , 0 , __A , 0 )
if __name__ == "__main__":
# Driver code to check the function above
SCREAMING_SNAKE_CASE = [10, 20, 30, 40, 50]
print_combination(arr, len(arr), 3)
# This code is contributed by Ambuj sahu
| 554
|
import argparse
import collections
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import TaConfig, TaEncoderModel, TaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def A__ ( __A : List[str] , __A : Optional[Any] , __A : Union[str, Any] , __A : int="attention" ) ->str:
__A =params[F'''{prefix}/layers_{i}/{layer_name}/key/kernel''']
__A =params[F'''{prefix}/layers_{i}/{layer_name}/out/kernel''']
__A =params[F'''{prefix}/layers_{i}/{layer_name}/query/kernel''']
__A =params[F'''{prefix}/layers_{i}/{layer_name}/value/kernel''']
return k, o, q, v
def A__ ( __A : Optional[int] , __A : List[str] , __A : Any , __A : Tuple=False ) ->Any:
if split_mlp_wi:
__A =params[F'''{prefix}/layers_{i}/mlp/wi_0/kernel''']
__A =params[F'''{prefix}/layers_{i}/mlp/wi_1/kernel''']
__A =(wi_a, wi_a)
else:
__A =params[F'''{prefix}/layers_{i}/mlp/wi/kernel''']
__A =params[F'''{prefix}/layers_{i}/mlp/wo/kernel''']
return wi, wo
def A__ ( __A : int , __A : Any , __A : Any , __A : Optional[Any] ) ->str:
return params[F'''{prefix}/layers_{i}/{layer_name}/scale''']
def A__ ( __A : dict , *, __A : int , __A : bool ) ->Optional[Any]:
__A =traverse_util.flatten_dict(variables['''target'''] )
__A ={'''/'''.join(__A ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
__A ='''encoder/layers_0/mlp/wi_0/kernel''' in old
print('''Split MLP:''' , __A )
__A =collections.OrderedDict()
# Shared embeddings.
__A =old['''token_embedder/embedding''']
# Encoder.
for i in range(__A ):
# Block i, layer 0 (Self Attention).
__A =tax_layer_norm_lookup(__A , __A , '''encoder''' , '''pre_attention_layer_norm''' )
__A , __A , __A , __A =tax_attention_lookup(__A , __A , '''encoder''' , '''attention''' )
__A =layer_norm
__A =k.T
__A =o.T
__A =q.T
__A =v.T
# Block i, layer 1 (MLP).
__A =tax_layer_norm_lookup(__A , __A , '''encoder''' , '''pre_mlp_layer_norm''' )
__A , __A =tax_mlp_lookup(__A , __A , '''encoder''' , __A )
__A =layer_norm
if split_mlp_wi:
__A =wi[0].T
__A =wi[1].T
else:
__A =wi.T
__A =wo.T
__A =old[
'''encoder/relpos_bias/rel_embedding'''
].T
__A =old['''encoder/encoder_norm/scale''']
if not is_encoder_only:
# Decoder.
for i in range(__A ):
# Block i, layer 0 (Self Attention).
__A =tax_layer_norm_lookup(__A , __A , '''decoder''' , '''pre_self_attention_layer_norm''' )
__A , __A , __A , __A =tax_attention_lookup(__A , __A , '''decoder''' , '''self_attention''' )
__A =layer_norm
__A =k.T
__A =o.T
__A =q.T
__A =v.T
# Block i, layer 1 (Cross Attention).
__A =tax_layer_norm_lookup(__A , __A , '''decoder''' , '''pre_cross_attention_layer_norm''' )
__A , __A , __A , __A =tax_attention_lookup(__A , __A , '''decoder''' , '''encoder_decoder_attention''' )
__A =layer_norm
__A =k.T
__A =o.T
__A =q.T
__A =v.T
# Block i, layer 2 (MLP).
__A =tax_layer_norm_lookup(__A , __A , '''decoder''' , '''pre_mlp_layer_norm''' )
__A , __A =tax_mlp_lookup(__A , __A , '''decoder''' , __A )
__A =layer_norm
if split_mlp_wi:
__A =wi[0].T
__A =wi[1].T
else:
__A =wi.T
__A =wo.T
__A =old['''decoder/decoder_norm/scale''']
__A =old[
'''decoder/relpos_bias/rel_embedding'''
].T
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
__A =old['''decoder/logits_dense/kernel'''].T
return new
def A__ ( __A : Union[str, Any] , __A : bool ) ->Any:
__A =collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
__A =state_dict['''shared.weight''']
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
__A =state_dict['''shared.weight''']
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print('''Using shared word embeddings as lm_head.''' )
__A =state_dict['''shared.weight''']
return state_dict
def A__ ( __A : str , __A : Optional[int] , __A : int , __A : Optional[Any] ) ->Tuple:
__A =checkpoints.load_tax_checkpoint(__A )
__A =convert_tax_to_pytorch(__A , num_layers=config.num_layers , is_encoder_only=__A )
__A =make_state_dict(__A , __A )
model.load_state_dict(__A , strict=__A )
def A__ ( __A : List[str] , __A : str , __A : str , __A : bool = False ) ->List[str]:
__A =TaConfig.from_json_file(__A )
print(F'''Building PyTorch model from configuration: {config}''' )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
__A =TaEncoderModel(__A )
else:
__A =TaForConditionalGeneration(__A )
# Load weights from tf checkpoint
load_tax_weights_in_ta(__A , __A , __A , __A )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
model.save_pretrained(__A )
# Verify that we can load the checkpoint.
model.from_pretrained(__A )
print('''Done''' )
if __name__ == "__main__":
_lowerCamelCase : List[str] = argparse.ArgumentParser(description='''Converts a native T5X checkpoint into a PyTorch checkpoint.''')
# Required parameters
parser.add_argument(
'''--t5x_checkpoint_path''', default=None, type=str, required=True, help='''Path to the T5X checkpoint.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--is_encoder_only''', action='''store_true''', help='''Check if the model is encoder-decoder model''', default=False
)
_lowerCamelCase : int = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path, args.config_file, args.pytorch_dump_path, args.is_encoder_only
)
| 184
| 0
|
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_squeezebert import SqueezeBertTokenizer
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
__UpperCAmelCase = {
'''vocab_file''': {
'''squeezebert/squeezebert-uncased''': (
'''https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/vocab.txt'''
),
'''squeezebert/squeezebert-mnli''': '''https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/vocab.txt''',
'''squeezebert/squeezebert-mnli-headless''': (
'''https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''squeezebert/squeezebert-uncased''': (
'''https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/tokenizer.json'''
),
'''squeezebert/squeezebert-mnli''': (
'''https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/tokenizer.json'''
),
'''squeezebert/squeezebert-mnli-headless''': (
'''https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/tokenizer.json'''
),
},
}
__UpperCAmelCase = {
'''squeezebert/squeezebert-uncased''': 512,
'''squeezebert/squeezebert-mnli''': 512,
'''squeezebert/squeezebert-mnli-headless''': 512,
}
__UpperCAmelCase = {
'''squeezebert/squeezebert-uncased''': {'''do_lower_case''': True},
'''squeezebert/squeezebert-mnli''': {'''do_lower_case''': True},
'''squeezebert/squeezebert-mnli-headless''': {'''do_lower_case''': True},
}
class a__ ( a__ ):
'''simple docstring'''
lowercase__ : Optional[int] = VOCAB_FILES_NAMES
lowercase__ : Tuple = PRETRAINED_VOCAB_FILES_MAP
lowercase__ : int = PRETRAINED_INIT_CONFIGURATION
lowercase__ : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ : Tuple = SqueezeBertTokenizer
def __init__( self , lowerCamelCase_=None , lowerCamelCase_=None , lowerCamelCase_=True , lowerCamelCase_="[UNK]" , lowerCamelCase_="[SEP]" , lowerCamelCase_="[PAD]" , lowerCamelCase_="[CLS]" , lowerCamelCase_="[MASK]" , lowerCamelCase_=True , lowerCamelCase_=None , **lowerCamelCase_ , ) -> Optional[Any]:
super().__init__(
lowerCamelCase_ , tokenizer_file=lowerCamelCase_ , do_lower_case=lowerCamelCase_ , unk_token=lowerCamelCase_ , sep_token=lowerCamelCase_ , pad_token=lowerCamelCase_ , cls_token=lowerCamelCase_ , mask_token=lowerCamelCase_ , tokenize_chinese_chars=lowerCamelCase_ , strip_accents=lowerCamelCase_ , **lowerCamelCase_ , )
lowerCAmelCase__ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , lowerCamelCase_ ) != do_lower_case
or normalizer_state.get('''strip_accents''' , lowerCamelCase_ ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , lowerCamelCase_ ) != tokenize_chinese_chars
):
lowerCAmelCase__ = getattr(lowerCamelCase_ , normalizer_state.pop('''type''' ) )
lowerCAmelCase__ = do_lower_case
lowerCAmelCase__ = strip_accents
lowerCAmelCase__ = tokenize_chinese_chars
lowerCAmelCase__ = normalizer_class(**lowerCamelCase_ )
lowerCAmelCase__ = do_lower_case
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_=None ) -> Union[str, Any]:
lowerCAmelCase__ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ = None ) -> List[int]:
lowerCAmelCase__ = [self.sep_token_id]
lowerCAmelCase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ = None ) -> Tuple[str]:
lowerCAmelCase__ = self._tokenizer.model.save(lowerCamelCase_ , name=lowerCamelCase_ )
return tuple(lowerCamelCase_ )
| 98
|
'''simple docstring'''
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_outputs import (
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import logging
from .configuration_regnet import RegNetConfig
__UpperCAmelCase = logging.get_logger(__name__)
# General docstring
__UpperCAmelCase = '''RegNetConfig'''
# Base docstring
__UpperCAmelCase = '''facebook/regnet-y-040'''
__UpperCAmelCase = [1, 1_088, 7, 7]
# Image classification docstring
__UpperCAmelCase = '''facebook/regnet-y-040'''
__UpperCAmelCase = '''tabby, tabby cat'''
__UpperCAmelCase = [
'''facebook/regnet-y-040''',
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class a__ ( nn.Module ):
'''simple docstring'''
def __init__( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = 3 , lowerCamelCase_ = 1 , lowerCamelCase_ = 1 , lowerCamelCase_ = "relu" , ) -> int:
super().__init__()
lowerCAmelCase__ = nn.Convad(
lowerCamelCase_ , lowerCamelCase_ , kernel_size=lowerCamelCase_ , stride=lowerCamelCase_ , padding=kernel_size // 2 , groups=lowerCamelCase_ , bias=lowerCamelCase_ , )
lowerCAmelCase__ = nn.BatchNormad(lowerCamelCase_ )
lowerCAmelCase__ = ACTaFN[activation] if activation is not None else nn.Identity()
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> Dict:
lowerCAmelCase__ = self.convolution(lowerCamelCase_ )
lowerCAmelCase__ = self.normalization(lowerCamelCase_ )
lowerCAmelCase__ = self.activation(lowerCamelCase_ )
return hidden_state
class a__ ( nn.Module ):
'''simple docstring'''
def __init__( self , lowerCamelCase_ ) -> Optional[Any]:
super().__init__()
lowerCAmelCase__ = RegNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act )
lowerCAmelCase__ = config.num_channels
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> int:
lowerCAmelCase__ = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
'''Make sure that the channel dimension of the pixel values match with the one set in the configuration.''' )
lowerCAmelCase__ = self.embedder(lowerCamelCase_ )
return hidden_state
class a__ ( nn.Module ):
'''simple docstring'''
def __init__( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = 2 ) -> Any:
super().__init__()
lowerCAmelCase__ = nn.Convad(lowerCamelCase_ , lowerCamelCase_ , kernel_size=1 , stride=lowerCamelCase_ , bias=lowerCamelCase_ )
lowerCAmelCase__ = nn.BatchNormad(lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> Tensor:
lowerCAmelCase__ = self.convolution(lowerCamelCase_ )
lowerCAmelCase__ = self.normalization(lowerCamelCase_ )
return hidden_state
class a__ ( nn.Module ):
'''simple docstring'''
def __init__( self , lowerCamelCase_ , lowerCamelCase_ ) -> List[Any]:
super().__init__()
lowerCAmelCase__ = nn.AdaptiveAvgPoolad((1, 1) )
lowerCAmelCase__ = nn.Sequential(
nn.Convad(lowerCamelCase_ , lowerCamelCase_ , kernel_size=1 ) , nn.ReLU() , nn.Convad(lowerCamelCase_ , lowerCamelCase_ , kernel_size=1 ) , nn.Sigmoid() , )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> int:
# b c h w -> b c 1 1
lowerCAmelCase__ = self.pooler(lowerCamelCase_ )
lowerCAmelCase__ = self.attention(lowerCamelCase_ )
lowerCAmelCase__ = hidden_state * attention
return hidden_state
class a__ ( nn.Module ):
'''simple docstring'''
def __init__( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = 1 ) -> Optional[int]:
super().__init__()
lowerCAmelCase__ = in_channels != out_channels or stride != 1
lowerCAmelCase__ = max(1 , out_channels // config.groups_width )
lowerCAmelCase__ = (
RegNetShortCut(lowerCamelCase_ , lowerCamelCase_ , stride=lowerCamelCase_ ) if should_apply_shortcut else nn.Identity()
)
lowerCAmelCase__ = nn.Sequential(
RegNetConvLayer(lowerCamelCase_ , lowerCamelCase_ , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(lowerCamelCase_ , lowerCamelCase_ , stride=lowerCamelCase_ , groups=lowerCamelCase_ , activation=config.hidden_act ) , RegNetConvLayer(lowerCamelCase_ , lowerCamelCase_ , kernel_size=1 , activation=lowerCamelCase_ ) , )
lowerCAmelCase__ = ACTaFN[config.hidden_act]
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> List[str]:
lowerCAmelCase__ = hidden_state
lowerCAmelCase__ = self.layer(lowerCamelCase_ )
lowerCAmelCase__ = self.shortcut(lowerCamelCase_ )
hidden_state += residual
lowerCAmelCase__ = self.activation(lowerCamelCase_ )
return hidden_state
class a__ ( nn.Module ):
'''simple docstring'''
def __init__( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = 1 ) -> Optional[int]:
super().__init__()
lowerCAmelCase__ = in_channels != out_channels or stride != 1
lowerCAmelCase__ = max(1 , out_channels // config.groups_width )
lowerCAmelCase__ = (
RegNetShortCut(lowerCamelCase_ , lowerCamelCase_ , stride=lowerCamelCase_ ) if should_apply_shortcut else nn.Identity()
)
lowerCAmelCase__ = nn.Sequential(
RegNetConvLayer(lowerCamelCase_ , lowerCamelCase_ , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(lowerCamelCase_ , lowerCamelCase_ , stride=lowerCamelCase_ , groups=lowerCamelCase_ , activation=config.hidden_act ) , RegNetSELayer(lowerCamelCase_ , reduced_channels=int(round(in_channels / 4 ) ) ) , RegNetConvLayer(lowerCamelCase_ , lowerCamelCase_ , kernel_size=1 , activation=lowerCamelCase_ ) , )
lowerCAmelCase__ = ACTaFN[config.hidden_act]
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> Union[str, Any]:
lowerCAmelCase__ = hidden_state
lowerCAmelCase__ = self.layer(lowerCamelCase_ )
lowerCAmelCase__ = self.shortcut(lowerCamelCase_ )
hidden_state += residual
lowerCAmelCase__ = self.activation(lowerCamelCase_ )
return hidden_state
class a__ ( nn.Module ):
'''simple docstring'''
def __init__( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = 2 , lowerCamelCase_ = 2 , ) -> Dict:
super().__init__()
lowerCAmelCase__ = RegNetXLayer if config.layer_type == '''x''' else RegNetYLayer
lowerCAmelCase__ = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , stride=lowerCamelCase_ , ) , *[layer(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) for _ in range(depth - 1 )] , )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> List[str]:
lowerCAmelCase__ = self.layers(lowerCamelCase_ )
return hidden_state
class a__ ( nn.Module ):
'''simple docstring'''
def __init__( self , lowerCamelCase_ ) -> Optional[int]:
super().__init__()
lowerCAmelCase__ = nn.ModuleList([] )
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
RegNetStage(
lowerCamelCase_ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) )
lowerCAmelCase__ = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(lowerCamelCase_ , config.depths[1:] ):
self.stages.append(RegNetStage(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , depth=lowerCamelCase_ ) )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ = False , lowerCamelCase_ = True ) -> BaseModelOutputWithNoAttention:
lowerCAmelCase__ = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
lowerCAmelCase__ = hidden_states + (hidden_state,)
lowerCAmelCase__ = stage_module(lowerCamelCase_ )
if output_hidden_states:
lowerCAmelCase__ = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=lowerCamelCase_ , hidden_states=lowerCamelCase_ )
class a__ ( a__ ):
'''simple docstring'''
lowercase__ : List[Any] = RegNetConfig
lowercase__ : Tuple = "regnet"
lowercase__ : List[str] = "pixel_values"
lowercase__ : Tuple = True
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> int:
if isinstance(lowerCamelCase_ , nn.Convad ):
nn.init.kaiming_normal_(module.weight , mode='''fan_out''' , nonlinearity='''relu''' )
elif isinstance(lowerCamelCase_ , (nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight , 1 )
nn.init.constant_(module.bias , 0 )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_=False ) -> int:
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
lowerCAmelCase__ = value
__UpperCAmelCase = R'''
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
'''
__UpperCAmelCase = R'''
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConvNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.
'''
@add_start_docstrings(
"The bare RegNet model outputting raw features without any specific head on top." , a__ , )
# Copied from transformers.models.resnet.modeling_resnet.ResNetModel with RESNET->REGNET,ResNet->RegNet
class a__ ( a__ ):
'''simple docstring'''
def __init__( self , lowerCamelCase_ ) -> Optional[int]:
super().__init__(lowerCamelCase_ )
lowerCAmelCase__ = config
lowerCAmelCase__ = RegNetEmbeddings(lowerCamelCase_ )
lowerCAmelCase__ = RegNetEncoder(lowerCamelCase_ )
lowerCAmelCase__ = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowerCamelCase_ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowerCamelCase_ , config_class=_CONFIG_FOR_DOC , modality='''vision''' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ = None , lowerCamelCase_ = None ) -> BaseModelOutputWithPoolingAndNoAttention:
lowerCAmelCase__ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowerCAmelCase__ = return_dict if return_dict is not None else self.config.use_return_dict
lowerCAmelCase__ = self.embedder(lowerCamelCase_ )
lowerCAmelCase__ = self.encoder(
lowerCamelCase_ , output_hidden_states=lowerCamelCase_ , return_dict=lowerCamelCase_ )
lowerCAmelCase__ = encoder_outputs[0]
lowerCAmelCase__ = self.pooler(lowerCamelCase_ )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=lowerCamelCase_ , pooler_output=lowerCamelCase_ , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
"\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " , a__ , )
# Copied from transformers.models.resnet.modeling_resnet.ResNetForImageClassification with RESNET->REGNET,ResNet->RegNet,resnet->regnet
class a__ ( a__ ):
'''simple docstring'''
def __init__( self , lowerCamelCase_ ) -> Optional[Any]:
super().__init__(lowerCamelCase_ )
lowerCAmelCase__ = config.num_labels
lowerCAmelCase__ = RegNetModel(lowerCamelCase_ )
# classification head
lowerCAmelCase__ = nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowerCamelCase_ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowerCamelCase_ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = None , ) -> ImageClassifierOutputWithNoAttention:
lowerCAmelCase__ = return_dict if return_dict is not None else self.config.use_return_dict
lowerCAmelCase__ = self.regnet(lowerCamelCase_ , output_hidden_states=lowerCamelCase_ , return_dict=lowerCamelCase_ )
lowerCAmelCase__ = outputs.pooler_output if return_dict else outputs[1]
lowerCAmelCase__ = self.classifier(lowerCamelCase_ )
lowerCAmelCase__ = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
lowerCAmelCase__ = '''regression'''
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
lowerCAmelCase__ = '''single_label_classification'''
else:
lowerCAmelCase__ = '''multi_label_classification'''
if self.config.problem_type == "regression":
lowerCAmelCase__ = MSELoss()
if self.num_labels == 1:
lowerCAmelCase__ = loss_fct(logits.squeeze() , labels.squeeze() )
else:
lowerCAmelCase__ = loss_fct(lowerCamelCase_ , lowerCamelCase_ )
elif self.config.problem_type == "single_label_classification":
lowerCAmelCase__ = CrossEntropyLoss()
lowerCAmelCase__ = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
lowerCAmelCase__ = BCEWithLogitsLoss()
lowerCAmelCase__ = loss_fct(lowerCamelCase_ , lowerCamelCase_ )
if not return_dict:
lowerCAmelCase__ = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=lowerCamelCase_ , logits=lowerCamelCase_ , hidden_states=outputs.hidden_states )
| 98
| 1
|
from collections import OrderedDict
from typing import Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...feature_extraction_utils import FeatureExtractionMixin
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType, logging
_a: List[Any] = logging.get_logger(__name__)
_a: Union[str, Any] = {
"""deepmind/language-perceiver""": """https://huggingface.co/deepmind/language-perceiver/resolve/main/config.json""",
# See all Perceiver models at https://huggingface.co/models?filter=perceiver
}
class __UpperCamelCase ( lowercase ):
SCREAMING_SNAKE_CASE__ = 'perceiver'
def __init__( self : List[Any] , lowerCAmelCase : Union[str, Any]=256 , lowerCAmelCase : Union[str, Any]=1_280 , lowerCAmelCase : Dict=768 , lowerCAmelCase : List[Any]=1 , lowerCAmelCase : Optional[int]=26 , lowerCAmelCase : Dict=8 , lowerCAmelCase : Any=8 , lowerCAmelCase : Tuple=None , lowerCAmelCase : Any=None , lowerCAmelCase : List[Any]="kv" , lowerCAmelCase : Any=1 , lowerCAmelCase : int=1 , lowerCAmelCase : Dict="gelu" , lowerCAmelCase : Optional[Any]=0.1 , lowerCAmelCase : Optional[Any]=0.02 , lowerCAmelCase : Union[str, Any]=1e-12 , lowerCAmelCase : Tuple=True , lowerCAmelCase : Union[str, Any]=262 , lowerCAmelCase : Tuple=2_048 , lowerCAmelCase : Optional[Any]=56 , lowerCAmelCase : str=[368, 496] , lowerCAmelCase : Optional[int]=16 , lowerCAmelCase : Optional[int]=1_920 , lowerCAmelCase : Union[str, Any]=16 , lowerCAmelCase : List[str]=[1, 16, 224, 224] , **lowerCAmelCase : int , ):
'''simple docstring'''
super().__init__(**lowerCAmelCase )
UpperCAmelCase_ = num_latents
UpperCAmelCase_ = d_latents
UpperCAmelCase_ = d_model
UpperCAmelCase_ = num_blocks
UpperCAmelCase_ = num_self_attends_per_block
UpperCAmelCase_ = num_self_attention_heads
UpperCAmelCase_ = num_cross_attention_heads
UpperCAmelCase_ = qk_channels
UpperCAmelCase_ = v_channels
UpperCAmelCase_ = cross_attention_shape_for_attention
UpperCAmelCase_ = self_attention_widening_factor
UpperCAmelCase_ = cross_attention_widening_factor
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = layer_norm_eps
UpperCAmelCase_ = use_query_residual
# masked language modeling attributes
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = max_position_embeddings
# image classification attributes
UpperCAmelCase_ = image_size
# flow attributes
UpperCAmelCase_ = train_size
# multimodal autoencoding attributes
UpperCAmelCase_ = num_frames
UpperCAmelCase_ = audio_samples_per_frame
UpperCAmelCase_ = samples_per_patch
UpperCAmelCase_ = output_shape
class __UpperCamelCase ( lowercase ):
@property
def __A ( self : Any ):
'''simple docstring'''
if self.task == "multiple-choice":
UpperCAmelCase_ = {0: "batch", 1: "choice", 2: "sequence"}
else:
UpperCAmelCase_ = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("inputs", dynamic_axis),
("attention_mask", dynamic_axis),
] )
@property
def __A ( self : Dict ):
'''simple docstring'''
return 1e-4
def __A ( self : List[Any] , lowerCAmelCase : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , lowerCAmelCase : int = -1 , lowerCAmelCase : int = -1 , lowerCAmelCase : int = -1 , lowerCAmelCase : bool = False , lowerCAmelCase : Optional[TensorType] = None , lowerCAmelCase : int = 3 , lowerCAmelCase : int = 40 , lowerCAmelCase : int = 40 , ):
'''simple docstring'''
if isinstance(lowerCAmelCase , lowerCAmelCase ):
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
UpperCAmelCase_ = compute_effective_axis_dimension(
lowerCAmelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
UpperCAmelCase_ = preprocessor.num_special_tokens_to_add(lowerCAmelCase )
UpperCAmelCase_ = compute_effective_axis_dimension(
lowerCAmelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=lowerCAmelCase )
# Generate dummy inputs according to compute batch and sequence
UpperCAmelCase_ = [" ".join(["a"] ) * seq_length] * batch_size
UpperCAmelCase_ = dict(preprocessor(lowerCAmelCase , return_tensors=lowerCAmelCase ) )
UpperCAmelCase_ = inputs.pop("input_ids" )
return inputs
elif isinstance(lowerCAmelCase , lowerCAmelCase ) and preprocessor.model_input_names[0] == "pixel_values":
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
UpperCAmelCase_ = compute_effective_axis_dimension(lowerCAmelCase , fixed_dimension=OnnxConfig.default_fixed_batch )
UpperCAmelCase_ = self._generate_dummy_images(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase_ = dict(preprocessor(images=lowerCAmelCase , return_tensors=lowerCAmelCase ) )
UpperCAmelCase_ = inputs.pop("pixel_values" )
return inputs
else:
raise ValueError(
"Unable to generate dummy inputs for the model. Please provide a tokenizer or a preprocessor." )
| 162
|
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> Any:
lowercase__ = [0] * len(_SCREAMING_SNAKE_CASE )
lowercase__ = []
lowercase__ = [1] * len(_SCREAMING_SNAKE_CASE )
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
if indegree[i] == 0:
queue.append(_SCREAMING_SNAKE_CASE )
while queue:
lowercase__ = queue.pop(0 )
for x in graph[vertex]:
indegree[x] -= 1
if long_dist[vertex] + 1 > long_dist[x]:
lowercase__ = long_dist[vertex] + 1
if indegree[x] == 0:
queue.append(_SCREAMING_SNAKE_CASE )
print(max(_SCREAMING_SNAKE_CASE ) )
# Adjacency list of Graph
lowercase_ = {0: [2, 3, 4], 1: [2, 7], 2: [5], 3: [5, 7], 4: [7], 5: [6], 6: [7], 7: []}
longest_distance(graph)
| 235
| 0
|
'''simple docstring'''
from __future__ import annotations
import random
import unittest
from transformers import TransfoXLConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLModel,
)
class _lowercase :
def __init__( self: List[Any] , UpperCamelCase__: int , ):
lowerCamelCase__ : str = parent
lowerCamelCase__ : Tuple = 13
lowerCamelCase__ : int = 7
lowerCamelCase__ : Tuple = 30
lowerCamelCase__ : str = self.seq_length + self.mem_len
lowerCamelCase__ : Any = 15
lowerCamelCase__ : Union[str, Any] = True
lowerCamelCase__ : Optional[Any] = True
lowerCamelCase__ : str = 99
lowerCamelCase__ : str = [10, 50, 80]
lowerCamelCase__ : int = 32
lowerCamelCase__ : Dict = 32
lowerCamelCase__ : Tuple = 4
lowerCamelCase__ : str = 8
lowerCamelCase__ : Dict = 128
lowerCamelCase__ : Optional[int] = 2
lowerCamelCase__ : Optional[int] = 2
lowerCamelCase__ : Dict = None
lowerCamelCase__ : List[Any] = 1
lowerCamelCase__ : Dict = 0
lowerCamelCase__ : List[str] = 3
lowerCamelCase__ : Optional[Any] = self.vocab_size - 1
lowerCamelCase__ : List[Any] = 0.01
def lowerCamelCase_ ( self: List[Any] ):
lowerCamelCase__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase__ : Optional[int] = None
if self.use_labels:
lowerCamelCase__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase__ : Dict = TransfoXLConfig(
vocab_size=self.vocab_size , mem_len=self.mem_len , clamp_len=self.clamp_len , cutoffs=self.cutoffs , d_model=self.hidden_size , d_embed=self.d_embed , n_head=self.num_attention_heads , d_head=self.d_head , d_inner=self.d_inner , div_val=self.div_val , n_layer=self.num_hidden_layers , eos_token_id=self.eos_token_id , pad_token_id=self.vocab_size - 1 , init_range=self.init_range , num_labels=self.num_labels , )
return (config, input_ids_a, input_ids_a, lm_labels)
def lowerCamelCase_ ( self: List[str] ):
random.seed(self.seed )
tf.random.set_seed(self.seed )
def lowerCamelCase_ ( self: Optional[int] , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: Any , UpperCamelCase__: Dict , UpperCamelCase__: Optional[int] ):
lowerCamelCase__ : Union[str, Any] = TFTransfoXLModel(UpperCamelCase__ )
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = model(UpperCamelCase__ ).to_tuple()
lowerCamelCase__ : List[str] = {"""input_ids""": input_ids_a, """mems""": mems_a}
lowerCamelCase__ , lowerCamelCase__ : List[Any] = model(UpperCamelCase__ ).to_tuple()
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def lowerCamelCase_ ( self: Dict , UpperCamelCase__: Dict , UpperCamelCase__: Optional[int] , UpperCamelCase__: List[Any] , UpperCamelCase__: Optional[int] ):
lowerCamelCase__ : Tuple = TFTransfoXLLMHeadModel(UpperCamelCase__ )
lowerCamelCase__ , lowerCamelCase__ : List[str] = model(UpperCamelCase__ ).to_tuple()
lowerCamelCase__ : Optional[int] = {"""input_ids""": input_ids_a, """labels""": lm_labels}
lowerCamelCase__ , lowerCamelCase__ : str = model(UpperCamelCase__ ).to_tuple()
lowerCamelCase__ , lowerCamelCase__ : Any = model([input_ids_a, mems_a] ).to_tuple()
lowerCamelCase__ : List[str] = {"""input_ids""": input_ids_a, """mems""": mems_a, """labels""": lm_labels}
lowerCamelCase__ , lowerCamelCase__ : Dict = model(UpperCamelCase__ ).to_tuple()
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def lowerCamelCase_ ( self: List[str] , UpperCamelCase__: Dict , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: List[str] , UpperCamelCase__: Optional[Any] ):
lowerCamelCase__ : Dict = TFTransfoXLForSequenceClassification(UpperCamelCase__ )
lowerCamelCase__ : List[Any] = model(UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase_ ( self: Any ):
lowerCamelCase__ : Tuple = self.prepare_config_and_inputs()
((lowerCamelCase__) , (lowerCamelCase__) , (lowerCamelCase__) , (lowerCamelCase__)) : Optional[Any] = config_and_inputs
lowerCamelCase__ : Dict = {"""input_ids""": input_ids_a}
return config, inputs_dict
@require_tf
class _lowercase ( _lowercase , _lowercase , unittest.TestCase ):
a = (
(TFTransfoXLModel, TFTransfoXLLMHeadModel, TFTransfoXLForSequenceClassification) if is_tf_available() else ()
)
a = () if is_tf_available() else ()
a = (
{
"""feature-extraction""": TFTransfoXLModel,
"""text-classification""": TFTransfoXLForSequenceClassification,
"""text-generation""": TFTransfoXLLMHeadModel,
"""zero-shot""": TFTransfoXLForSequenceClassification,
}
if is_tf_available()
else {}
)
# TODO: add this test when TFTransfoXLLMHead has a linear output layer implemented
a = False
a = False
a = False
a = False
def lowerCamelCase_ ( self: str , UpperCamelCase__: str , UpperCamelCase__: Tuple , UpperCamelCase__: int , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: str ):
if pipeline_test_casse_name == "TextGenerationPipelineTests":
# Get `ValueError: AttributeError: 'NoneType' object has no attribute 'new_ones'` or `AssertionError`.
# `TransfoXLConfig` was never used in pipeline tests: cannot create a simple
# tokenizer.
return True
return False
def lowerCamelCase_ ( self: str ):
lowerCamelCase__ : List[str] = TFTransfoXLModelTester(self )
lowerCamelCase__ : Union[str, Any] = ConfigTester(self , config_class=UpperCamelCase__ , d_embed=37 )
def lowerCamelCase_ ( self: List[Any] ):
self.config_tester.run_common_tests()
def lowerCamelCase_ ( self: Dict ):
self.model_tester.set_seed()
lowerCamelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_model(*UpperCamelCase__ )
def lowerCamelCase_ ( self: List[str] ):
self.model_tester.set_seed()
lowerCamelCase__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_lm_head(*UpperCamelCase__ )
def lowerCamelCase_ ( self: Any ):
lowerCamelCase__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_for_sequence_classification(*UpperCamelCase__ )
def lowerCamelCase_ ( self: Optional[Any] ):
lowerCamelCase__ , lowerCamelCase__ : str = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase__ : int = [TFTransfoXLForSequenceClassification]
for model_class in self.all_model_classes:
lowerCamelCase__ : int = model_class(UpperCamelCase__ )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class in list_other_models_with_output_ebd:
lowerCamelCase__ : Union[str, Any] = model.get_output_embeddings()
assert isinstance(UpperCamelCase__ , tf.keras.layers.Layer )
lowerCamelCase__ : Any = model.get_bias()
assert name is None
else:
lowerCamelCase__ : List[str] = model.get_output_embeddings()
assert x is None
lowerCamelCase__ : int = model.get_bias()
assert name is None
def lowerCamelCase_ ( self: str ):
# TODO JP: Make TransfoXL XLA compliant
pass
@slow
def lowerCamelCase_ ( self: List[Any] ):
for model_name in TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__ : Tuple = TFTransfoXLModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
@unittest.skip(reason="""This model doesn't play well with fit() due to not returning a single loss.""" )
def lowerCamelCase_ ( self: Tuple ):
pass
@require_tf
class _lowercase ( unittest.TestCase ):
@unittest.skip("""Skip test until #12651 is resolved.""" )
@slow
def lowerCamelCase_ ( self: Tuple ):
lowerCamelCase__ : Any = TFTransfoXLLMHeadModel.from_pretrained("""transfo-xl-wt103""" )
# fmt: off
lowerCamelCase__ : Optional[Any] = tf.convert_to_tensor([[33,1_297,2,1,1_009,4,1_109,11_739,4_762,358,5,25,245,22,1_706,17,20_098,5,3_215,21,37,1_110,3,13,1_041,4,24,603,490,2,71_477,20_098,104_447,2,20_961,1,2_604,4,1,329,3,6_224,831,16_002,2,8,603,78_967,29_546,23,803,20,25,416,5,8,232,4,277,6,1_855,4_601,3,29_546,54,8,3_609,5,57_211,49,4,1,277,18,8,1_755,15_691,3,341,25,416,693,42_573,71,17,401,94,31,17_919,2,29_546,7_873,18,1,435,23,11_011,755,5,5_167,3,7_983,98,84,2,29_546,3_267,8,3_609,4,1,4_865,1_075,2,6_087,71,6,346,8,5_854,3,29_546,824,1_400,1_868,2,19,160,2,311,8,5_496,2,20_920,17,25,15_097,3,24,24,0]] , dtype=tf.intaa ) # noqa: E231
# fmt: on
# In 1991 , the remains of Russian Tsar Nicholas II and his family
# ( except for Alexei and Maria ) are discovered .
# The voice of Nicholas's young son , Tsarevich Alexei Nikolaevich , narrates the
# remainder of the story . 1883 Western Siberia ,
# a young Grigori Rasputin is asked by his father and a group of men to perform magic .
# Rasputin has a vision and denounces one of the men as a horse thief . Although his
# father initially slaps him for making such an accusation , Rasputin watches as the
# man is chased outside and beaten . Twenty years later , Rasputin sees a vision of
# the Virgin Mary , prompting him to become a priest . Rasputin quickly becomes famous ,
# with people , even a bishop , begging for his blessing . <eod> </s> <eos>
# fmt: off
lowerCamelCase__ : Optional[int] = [33,1_297,2,1,1_009,4,1_109,11_739,4_762,358,5,25,245,22,1_706,17,20_098,5,3_215,21,37,1_110,3,13,1_041,4,24,603,490,2,71_477,20_098,104_447,2,20_961,1,2_604,4,1,329,3,6_224,831,16_002,2,8,603,78_967,29_546,23,803,20,25,416,5,8,232,4,277,6,1_855,4_601,3,29_546,54,8,3_609,5,57_211,49,4,1,277,18,8,1_755,15_691,3,341,25,416,693,42_573,71,17,401,94,31,17_919,2,29_546,7_873,18,1,435,23,11_011,755,5,5_167,3,7_983,98,84,2,29_546,3_267,8,3_609,4,1,4_865,1_075,2,6_087,71,6,346,8,5_854,3,29_546,824,1_400,1_868,2,19,160,2,311,8,5_496,2,20_920,17,25,15_097,3,24,24,0,33,1,1_857,2,1,1_009,4,1_109,11_739,4_762,358,5,25,245,28,1_110,3,13,1_041,4,24,603,490,2,71_477,20_098,104_447,2,20_961,1,2_604,4,1,329,3,0] # noqa: E231
# fmt: on
# In 1991, the remains of Russian Tsar Nicholas II and his family (
# except for Alexei and Maria ) are discovered. The voice of young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.
# 1883 Western Siberia, a young Grigori Rasputin is asked by his father
# and a group of men to perform magic. Rasputin has a vision and
# denounces one of the men as a horse thief. Although his father initially
# slaps him for making such an accusation, Rasputin watches as the man
# is chased outside and beaten. Twenty years later, Rasputin sees a vision
# of the Virgin Mary, prompting him to become a priest.
# Rasputin quickly becomes famous, with people, even a bishop, begging for
# his blessing. <unk> <unk> <eos> In the 1990s, the remains of Russian Tsar
# Nicholas II and his family were discovered. The voice of <unk> young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.<eos>
lowerCamelCase__ : List[Any] = model.generate(UpperCamelCase__ , max_length=200 , do_sample=UpperCamelCase__ )
self.assertListEqual(output_ids[0].numpy().tolist() , UpperCamelCase__ )
| 708
|
'''simple docstring'''
import os
from typing import Optional
import fsspec
from fsspec.archive import AbstractArchiveFileSystem
from fsspec.utils import DEFAULT_BLOCK_SIZE
class _lowercase ( _lowercase ):
a = """"""
a = (
None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz
)
a = None # compression type in fsspec. ex: "gzip"
a = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz
def __init__( self: str , UpperCamelCase__: str = "" , UpperCamelCase__: Optional[str] = None , UpperCamelCase__: Optional[dict] = None , **UpperCamelCase__: List[Any] ):
super().__init__(self , **UpperCamelCase__ )
# always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode
lowerCamelCase__ : List[Any] = fsspec.open(
UpperCamelCase__ , mode="""rb""" , protocol=UpperCamelCase__ , compression=self.compression , client_kwargs={
"""requote_redirect_url""": False, # see https://github.com/huggingface/datasets/pull/5459
"""trust_env""": True, # Enable reading proxy env variables.
**(target_options or {}).pop("""client_kwargs""" , {} ), # To avoid issues if it was already passed.
} , **(target_options or {}) , )
lowerCamelCase__ : str = os.path.basename(self.file.path.split("""::""" )[0] )
lowerCamelCase__ : Union[str, Any] = (
self.compressed_name[: self.compressed_name.rindex(""".""" )]
if """.""" in self.compressed_name
else self.compressed_name
)
lowerCamelCase__ : Tuple = None
@classmethod
def lowerCamelCase_ ( cls: Optional[int] , UpperCamelCase__: Optional[int] ):
# compressed file paths are always relative to the archive root
return super()._strip_protocol(UpperCamelCase__ ).lstrip("""/""" )
def lowerCamelCase_ ( self: Tuple ):
if self.dir_cache is None:
lowerCamelCase__ : Dict = {**self.file.fs.info(self.file.path ), """name""": self.uncompressed_name}
lowerCamelCase__ : int = {f["""name"""]: f}
def lowerCamelCase_ ( self: List[str] , UpperCamelCase__: str ):
return self.file.open().read()
def lowerCamelCase_ ( self: Optional[int] , UpperCamelCase__: str , UpperCamelCase__: str = "rb" , UpperCamelCase__: Optional[int]=None , UpperCamelCase__: Tuple=True , UpperCamelCase__: Tuple=None , **UpperCamelCase__: Optional[Any] , ):
lowerCamelCase__ : Union[str, Any] = self._strip_protocol(UpperCamelCase__ )
if mode != "rb":
raise ValueError(F'''Tried to read with mode {mode} on file {self.file.path} opened with mode \'rb\'''' )
return self.file.open()
class _lowercase ( _lowercase ):
a = """bz2"""
a = """bz2"""
a = """.bz2"""
class _lowercase ( _lowercase ):
a = """gzip"""
a = """gzip"""
a = """.gz"""
class _lowercase ( _lowercase ):
a = """lz4"""
a = """lz4"""
a = """.lz4"""
class _lowercase ( _lowercase ):
a = """xz"""
a = """xz"""
a = """.xz"""
class _lowercase ( _lowercase ):
a = """zstd"""
a = """zstd"""
a = """.zst"""
def __init__( self: int , UpperCamelCase__: str , UpperCamelCase__: str = "rb" , UpperCamelCase__: Optional[str] = None , UpperCamelCase__: Optional[dict] = None , UpperCamelCase__: int = DEFAULT_BLOCK_SIZE , **UpperCamelCase__: Dict , ):
super().__init__(
fo=UpperCamelCase__ , mode=UpperCamelCase__ , target_protocol=UpperCamelCase__ , target_options=UpperCamelCase__ , block_size=UpperCamelCase__ , **UpperCamelCase__ , )
# We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2:
#
# File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open
# out.close = close
# AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only
#
# see https://github.com/intake/filesystem_spec/issues/725
lowerCamelCase__ : Tuple = self.file.__enter__
class _lowercase :
def __init__( self: Optional[int] , UpperCamelCase__: Any ):
lowerCamelCase__ : Optional[int] = file_
def __enter__( self: List[Any] ):
self._file.__enter__()
return self
def __exit__( self: Any , *UpperCamelCase__: str , **UpperCamelCase__: Any ):
self._file.__exit__(*UpperCamelCase__ , **UpperCamelCase__ )
def __iter__( self: Any ):
return iter(self._file )
def lowerCamelCase_ ( self: List[Any] ):
return next(self._file )
def __getattr__( self: List[str] , UpperCamelCase__: Dict ):
return getattr(self._file , UpperCamelCase__ )
def fixed_enter(*UpperCamelCase__: Union[str, Any] , **UpperCamelCase__: List[str] ):
return WrappedFile(_enter(*UpperCamelCase__ , **UpperCamelCase__ ) )
lowerCamelCase__ : Optional[Any] = fixed_enter
| 631
| 0
|
import argparse
import os
import re
import packaging.version
__magic_name__ = '''examples/'''
__magic_name__ = {
'''examples''': (re.compile(R'''^check_min_version\("[^"]+"\)\s*$''', re.MULTILINE), '''check_min_version("VERSION")\n'''),
'''init''': (re.compile(R'''^__version__\s+=\s+"([^"]+)"\s*$''', re.MULTILINE), '''__version__ = "VERSION"\n'''),
'''setup''': (re.compile(R'''^(\s*)version\s*=\s*"[^"]+",''', re.MULTILINE), R'''\1version="VERSION",'''),
'''doc''': (re.compile(R'''^(\s*)release\s*=\s*"[^"]+"$''', re.MULTILINE), '''release = "VERSION"\n'''),
}
__magic_name__ = {
'''init''': '''src/diffusers/__init__.py''',
'''setup''': '''setup.py''',
}
__magic_name__ = '''README.md'''
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
with open(lowerCAmelCase_ , "r" , encoding="utf-8" , newline="\n") as f:
lowerCamelCase_ : Tuple = f.read()
lowerCamelCase_ ,lowerCamelCase_ : Union[str, Any] = REPLACE_PATTERNS[pattern]
lowerCamelCase_ : Any = replace.replace("VERSION" , lowerCAmelCase_)
lowerCamelCase_ : Any = re_pattern.sub(lowerCAmelCase_ , lowerCAmelCase_)
with open(lowerCAmelCase_ , "w" , encoding="utf-8" , newline="\n") as f:
f.write(lowerCAmelCase_)
def __magic_name__ ( lowerCAmelCase_):
'''simple docstring'''
for folder, directories, fnames in os.walk(lowerCAmelCase_):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove("research_projects")
if "legacy" in directories:
directories.remove("legacy")
for fname in fnames:
if fname.endswith(".py"):
update_version_in_file(os.path.join(lowerCAmelCase_ , lowerCAmelCase_) , lowerCAmelCase_ , pattern="examples")
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_=False):
'''simple docstring'''
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_)
if not patch:
update_version_in_examples(lowerCAmelCase_)
def __magic_name__ ( ):
'''simple docstring'''
lowerCamelCase_ : Optional[Any] = "🤗 Transformers currently provides the following architectures"
lowerCamelCase_ : int = "1. Want to contribute a new model?"
with open(lowerCAmelCase_ , "r" , encoding="utf-8" , newline="\n") as f:
lowerCamelCase_ : int = f.readlines()
# Find the start of the list.
lowerCamelCase_ : Optional[int] = 0
while not lines[start_index].startswith(_start_prompt):
start_index += 1
start_index += 1
lowerCamelCase_ : Optional[Any] = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt):
if lines[index].startswith("1."):
lowerCamelCase_ : int = lines[index].replace(
"https://huggingface.co/docs/diffusers/main/model_doc" , "https://huggingface.co/docs/diffusers/model_doc" , )
index += 1
with open(lowerCAmelCase_ , "w" , encoding="utf-8" , newline="\n") as f:
f.writelines(lowerCAmelCase_)
def __magic_name__ ( ):
'''simple docstring'''
with open(REPLACE_FILES["init"] , "r") as f:
lowerCamelCase_ : int = f.read()
lowerCamelCase_ : Optional[Any] = REPLACE_PATTERNS["init"][0].search(lowerCAmelCase_).groups()[0]
return packaging.version.parse(lowerCAmelCase_)
def __magic_name__ ( lowerCAmelCase_=False):
'''simple docstring'''
lowerCamelCase_ : Union[str, Any] = get_version()
if patch and default_version.is_devrelease:
raise ValueError("Can't create a patch version from the dev branch, checkout a released version!")
if default_version.is_devrelease:
lowerCamelCase_ : Union[str, Any] = default_version.base_version
elif patch:
lowerCamelCase_ : int = F"""{default_version.major}.{default_version.minor}.{default_version.micro + 1}"""
else:
lowerCamelCase_ : Union[str, Any] = F"""{default_version.major}.{default_version.minor + 1}.0"""
# Now let's ask nicely if that's the right one.
lowerCamelCase_ : int = input(F"""Which version are you releasing? [{default_version}]""")
if len(lowerCAmelCase_) == 0:
lowerCamelCase_ : Tuple = default_version
print(F"""Updating version to {version}.""")
global_version_update(lowerCAmelCase_ , patch=lowerCAmelCase_)
def __magic_name__ ( ):
'''simple docstring'''
lowerCamelCase_ : Any = get_version()
lowerCamelCase_ : Dict = F"""{current_version.major}.{current_version.minor + 1}.0.dev0"""
lowerCamelCase_ : int = current_version.base_version
# Check with the user we got that right.
lowerCamelCase_ : List[Any] = input(F"""Which version are we developing now? [{dev_version}]""")
if len(lowerCAmelCase_) == 0:
lowerCamelCase_ : List[Any] = dev_version
print(F"""Updating version to {version}.""")
global_version_update(lowerCAmelCase_)
# print("Cleaning main README, don't forget to run `make fix-copies`.")
# clean_main_ref_in_model_list()
if __name__ == "__main__":
__magic_name__ = argparse.ArgumentParser()
parser.add_argument('''--post_release''', action='''store_true''', help='''Whether this is pre or post release.''')
parser.add_argument('''--patch''', action='''store_true''', help='''Whether or not this is a patch release.''')
__magic_name__ = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print('''Nothing to do after a patch :-)''')
else:
post_release_work()
| 250
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__magic_name__ = {
'''configuration_conditional_detr''': [
'''CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''ConditionalDetrConfig''',
'''ConditionalDetrOnnxConfig''',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ['''ConditionalDetrFeatureExtractor''']
__magic_name__ = ['''ConditionalDetrImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'''CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ConditionalDetrForObjectDetection''',
'''ConditionalDetrForSegmentation''',
'''ConditionalDetrModel''',
'''ConditionalDetrPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP,
ConditionalDetrConfig,
ConditionalDetrOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_conditional_detr import ConditionalDetrFeatureExtractor
from .image_processing_conditional_detr import ConditionalDetrImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrModel,
ConditionalDetrPreTrainedModel,
)
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 250
| 1
|
import os
try:
from .build_directory_md import good_file_paths
except ImportError:
from build_directory_md import good_file_paths # type: ignore
a__ : Optional[int] = list(good_file_paths())
assert filepaths, "good_file_paths() failed!"
a__ : int = [file for file in filepaths if file != file.lower()]
if upper_files:
print(F"{len(upper_files)} files contain uppercase characters:")
print('''\n'''.join(upper_files) + '''\n''')
a__ : str = [file for file in filepaths if ''' ''' in file]
if space_files:
print(F"{len(space_files)} files contain space characters:")
print('''\n'''.join(space_files) + '''\n''')
a__ : Optional[Any] = [file for file in filepaths if '''-''' in file]
if hyphen_files:
print(F"{len(hyphen_files)} files contain hyphen characters:")
print('''\n'''.join(hyphen_files) + '''\n''')
a__ : List[str] = [file for file in filepaths if os.sep not in file]
if nodir_files:
print(F"{len(nodir_files)} files are not in a directory:")
print('''\n'''.join(nodir_files) + '''\n''')
a__ : Optional[int] = len(upper_files + space_files + hyphen_files + nodir_files)
if bad_files:
import sys
sys.exit(bad_files)
| 333
|
import gc
import unittest
import torch
from parameterized import parameterized
from diffusers import AutoencoderKL
from diffusers.utils import floats_tensor, load_hf_numpy, require_torch_gpu, slow, torch_all_close, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class a_ ( a__ , a__ , unittest.TestCase ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = AutoencoderKL
__SCREAMING_SNAKE_CASE : Optional[int] = 'sample'
__SCREAMING_SNAKE_CASE : Any = 1E-2
@property
def __lowerCAmelCase ( self ) ->Tuple:
SCREAMING_SNAKE_CASE : List[Any] = 4
SCREAMING_SNAKE_CASE : List[Any] = 3
SCREAMING_SNAKE_CASE : int = (32, 32)
SCREAMING_SNAKE_CASE : Tuple = floats_tensor((batch_size, num_channels) + sizes ).to(_lowerCamelCase )
return {"sample": image}
@property
def __lowerCAmelCase ( self ) ->str:
return (3, 32, 32)
@property
def __lowerCAmelCase ( self ) ->Dict:
return (3, 32, 32)
def __lowerCAmelCase ( self ) ->Tuple:
SCREAMING_SNAKE_CASE : Optional[int] = {
'''block_out_channels''': [32, 64],
'''in_channels''': 3,
'''out_channels''': 3,
'''down_block_types''': ['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''],
'''up_block_types''': ['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''],
'''latent_channels''': 4,
}
SCREAMING_SNAKE_CASE : Union[str, Any] = self.dummy_input
return init_dict, inputs_dict
def __lowerCAmelCase ( self ) ->Dict:
pass
def __lowerCAmelCase ( self ) ->Optional[Any]:
pass
@unittest.skipIf(torch_device == '''mps''' , '''Gradient checkpointing skipped on MPS''' )
def __lowerCAmelCase ( self ) ->Dict:
# enable deterministic behavior for gradient checkpointing
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = self.prepare_init_args_and_inputs_for_common()
SCREAMING_SNAKE_CASE : Optional[Any] = self.model_class(**_lowerCamelCase )
model.to(_lowerCamelCase )
assert not model.is_gradient_checkpointing and model.training
SCREAMING_SNAKE_CASE : Tuple = model(**_lowerCamelCase ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model.zero_grad()
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.randn_like(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[Any] = (out - labels).mean()
loss.backward()
# re-instantiate the model now enabling gradient checkpointing
SCREAMING_SNAKE_CASE : str = self.model_class(**_lowerCamelCase )
# clone model
model_a.load_state_dict(model.state_dict() )
model_a.to(_lowerCamelCase )
model_a.enable_gradient_checkpointing()
assert model_a.is_gradient_checkpointing and model_a.training
SCREAMING_SNAKE_CASE : Any = model_a(**_lowerCamelCase ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model_a.zero_grad()
SCREAMING_SNAKE_CASE : Tuple = (out_a - labels).mean()
loss_a.backward()
# compare the output and parameters gradients
self.assertTrue((loss - loss_a).abs() < 1e-5 )
SCREAMING_SNAKE_CASE : List[Any] = dict(model.named_parameters() )
SCREAMING_SNAKE_CASE : Optional[int] = dict(model_a.named_parameters() )
for name, param in named_params.items():
self.assertTrue(torch_all_close(param.grad.data , named_params_a[name].grad.data , atol=5e-5 ) )
def __lowerCAmelCase ( self ) ->List[str]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = AutoencoderKL.from_pretrained('''fusing/autoencoder-kl-dummy''' , output_loading_info=_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
self.assertEqual(len(loading_info['''missing_keys'''] ) , 0 )
model.to(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Tuple = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def __lowerCAmelCase ( self ) ->Dict:
SCREAMING_SNAKE_CASE : str = AutoencoderKL.from_pretrained('''fusing/autoencoder-kl-dummy''' )
SCREAMING_SNAKE_CASE : Dict = model.to(_lowerCamelCase )
model.eval()
if torch_device == "mps":
SCREAMING_SNAKE_CASE : List[str] = torch.manual_seed(0 )
else:
SCREAMING_SNAKE_CASE : Optional[Any] = torch.Generator(device=_lowerCamelCase ).manual_seed(0 )
SCREAMING_SNAKE_CASE : Any = torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , )
SCREAMING_SNAKE_CASE : List[Any] = image.to(_lowerCamelCase )
with torch.no_grad():
SCREAMING_SNAKE_CASE : Dict = model(_lowerCamelCase , sample_posterior=_lowerCamelCase , generator=_lowerCamelCase ).sample
SCREAMING_SNAKE_CASE : Union[str, Any] = output[0, -1, -3:, -3:].flatten().cpu()
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
if torch_device == "mps":
SCREAMING_SNAKE_CASE : str = torch.tensor(
[
-4.0078e-01,
-3.8323e-04,
-1.2681e-01,
-1.1462e-01,
2.0095e-01,
1.0893e-01,
-8.8247e-02,
-3.0361e-01,
-9.8644e-03,
] )
elif torch_device == "cpu":
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor(
[-0.1_3_5_2, 0.0_8_7_8, 0.0_4_1_9, -0.0_8_1_8, -0.1_0_6_9, 0.0_6_8_8, -0.1_4_5_8, -0.4_4_4_6, -0.0_0_2_6] )
else:
SCREAMING_SNAKE_CASE : Dict = torch.tensor(
[-0.2_4_2_1, 0.4_6_4_2, 0.2_5_0_7, -0.0_4_3_8, 0.0_6_8_2, 0.3_1_6_0, -0.2_0_1_8, -0.0_7_2_7, 0.2_4_8_5] )
self.assertTrue(torch_all_close(_lowerCamelCase , _lowerCamelCase , rtol=1e-2 ) )
@slow
class a_ ( unittest.TestCase ):
"""simple docstring"""
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase ) ->Union[str, Any]:
return F"""gaussian_noise_s={seed}_shape={"_".join([str(_lowerCamelCase ) for s in shape] )}.npy"""
def __lowerCAmelCase ( self ) ->Dict:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCAmelCase ( self , _lowerCamelCase=0 , _lowerCamelCase=(4, 3, 512, 512) , _lowerCamelCase=False ) ->List[Any]:
SCREAMING_SNAKE_CASE : Optional[int] = torch.floataa if fpaa else torch.floataa
SCREAMING_SNAKE_CASE : Dict = torch.from_numpy(load_hf_numpy(self.get_file_format(_lowerCamelCase , _lowerCamelCase ) ) ).to(_lowerCamelCase ).to(_lowerCamelCase )
return image
def __lowerCAmelCase ( self , _lowerCamelCase="CompVis/stable-diffusion-v1-4" , _lowerCamelCase=False ) ->List[Any]:
SCREAMING_SNAKE_CASE : List[str] = '''fp16''' if fpaa else None
SCREAMING_SNAKE_CASE : Dict = torch.floataa if fpaa else torch.floataa
SCREAMING_SNAKE_CASE : List[str] = AutoencoderKL.from_pretrained(
_lowerCamelCase , subfolder='''vae''' , torch_dtype=_lowerCamelCase , revision=_lowerCamelCase , )
model.to(_lowerCamelCase ).eval()
return model
def __lowerCAmelCase ( self , _lowerCamelCase=0 ) ->Optional[int]:
if torch_device == "mps":
return torch.manual_seed(_lowerCamelCase )
return torch.Generator(device=_lowerCamelCase ).manual_seed(_lowerCamelCase )
@parameterized.expand(
[
# fmt: off
[33, [-0.1_6_0_3, 0.9_8_7_8, -0.0_4_9_5, -0.0_7_9_0, -0.2_7_0_9, 0.8_3_7_5, -0.2_0_6_0, -0.0_8_2_4], [-0.2_3_9_5, 0.0_0_9_8, 0.0_1_0_2, -0.0_7_0_9, -0.2_8_4_0, -0.0_2_7_4, -0.0_7_1_8, -0.1_8_2_4]],
[47, [-0.2_3_7_6, 0.1_1_6_8, 0.1_3_3_2, -0.4_8_4_0, -0.2_5_0_8, -0.0_7_9_1, -0.0_4_9_3, -0.4_0_8_9], [0.0_3_5_0, 0.0_8_4_7, 0.0_4_6_7, 0.0_3_4_4, -0.0_8_4_2, -0.0_5_4_7, -0.0_6_3_3, -0.1_1_3_1]],
# fmt: on
] )
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) ->Optional[int]:
SCREAMING_SNAKE_CASE : List[Any] = self.get_sd_vae_model()
SCREAMING_SNAKE_CASE : List[str] = self.get_sd_image(_lowerCamelCase )
SCREAMING_SNAKE_CASE : List[str] = self.get_generator(_lowerCamelCase )
with torch.no_grad():
SCREAMING_SNAKE_CASE : Union[str, Any] = model(_lowerCamelCase , generator=_lowerCamelCase , sample_posterior=_lowerCamelCase ).sample
assert sample.shape == image.shape
SCREAMING_SNAKE_CASE : Any = sample[-1, -2:, -2:, :2].flatten().float().cpu()
SCREAMING_SNAKE_CASE : Any = torch.tensor(expected_slice_mps if torch_device == '''mps''' else expected_slice )
assert torch_all_close(_lowerCamelCase , _lowerCamelCase , atol=3e-3 )
@parameterized.expand(
[
# fmt: off
[33, [-0.0_5_1_3, 0.0_2_8_9, 1.3_7_9_9, 0.2_1_6_6, -0.2_5_7_3, -0.0_8_7_1, 0.5_1_0_3, -0.0_9_9_9]],
[47, [-0.4_1_2_8, -0.1_3_2_0, -0.3_7_0_4, 0.1_9_6_5, -0.4_1_1_6, -0.2_3_3_2, -0.3_3_4_0, 0.2_2_4_7]],
# fmt: on
] )
@require_torch_gpu
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase ) ->Dict:
SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_sd_vae_model(fpaa=_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[int] = self.get_sd_image(_lowerCamelCase , fpaa=_lowerCamelCase )
SCREAMING_SNAKE_CASE : int = self.get_generator(_lowerCamelCase )
with torch.no_grad():
SCREAMING_SNAKE_CASE : Optional[Any] = model(_lowerCamelCase , generator=_lowerCamelCase , sample_posterior=_lowerCamelCase ).sample
assert sample.shape == image.shape
SCREAMING_SNAKE_CASE : Optional[int] = sample[-1, -2:, :2, -2:].flatten().float().cpu()
SCREAMING_SNAKE_CASE : str = torch.tensor(_lowerCamelCase )
assert torch_all_close(_lowerCamelCase , _lowerCamelCase , atol=1e-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.1_6_0_9, 0.9_8_6_6, -0.0_4_8_7, -0.0_7_7_7, -0.2_7_1_6, 0.8_3_6_8, -0.2_0_5_5, -0.0_8_1_4], [-0.2_3_9_5, 0.0_0_9_8, 0.0_1_0_2, -0.0_7_0_9, -0.2_8_4_0, -0.0_2_7_4, -0.0_7_1_8, -0.1_8_2_4]],
[47, [-0.2_3_7_7, 0.1_1_4_7, 0.1_3_3_3, -0.4_8_4_1, -0.2_5_0_6, -0.0_8_0_5, -0.0_4_9_1, -0.4_0_8_5], [0.0_3_5_0, 0.0_8_4_7, 0.0_4_6_7, 0.0_3_4_4, -0.0_8_4_2, -0.0_5_4_7, -0.0_6_3_3, -0.1_1_3_1]],
# fmt: on
] )
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) ->Optional[int]:
SCREAMING_SNAKE_CASE : Dict = self.get_sd_vae_model()
SCREAMING_SNAKE_CASE : List[str] = self.get_sd_image(_lowerCamelCase )
with torch.no_grad():
SCREAMING_SNAKE_CASE : Any = model(_lowerCamelCase ).sample
assert sample.shape == image.shape
SCREAMING_SNAKE_CASE : Dict = sample[-1, -2:, -2:, :2].flatten().float().cpu()
SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor(expected_slice_mps if torch_device == '''mps''' else expected_slice )
assert torch_all_close(_lowerCamelCase , _lowerCamelCase , atol=3e-3 )
@parameterized.expand(
[
# fmt: off
[13, [-0.2_0_5_1, -0.1_8_0_3, -0.2_3_1_1, -0.2_1_1_4, -0.3_2_9_2, -0.3_5_7_4, -0.2_9_5_3, -0.3_3_2_3]],
[37, [-0.2_6_3_2, -0.2_6_2_5, -0.2_1_9_9, -0.2_7_4_1, -0.4_5_3_9, -0.4_9_9_0, -0.3_7_2_0, -0.4_9_2_5]],
# fmt: on
] )
@require_torch_gpu
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase ) ->Dict:
SCREAMING_SNAKE_CASE : str = self.get_sd_vae_model()
SCREAMING_SNAKE_CASE : List[str] = self.get_sd_image(_lowerCamelCase , shape=(3, 4, 64, 64) )
with torch.no_grad():
SCREAMING_SNAKE_CASE : Tuple = model.decode(_lowerCamelCase ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
SCREAMING_SNAKE_CASE : Any = sample[-1, -2:, :2, -2:].flatten().cpu()
SCREAMING_SNAKE_CASE : Tuple = torch.tensor(_lowerCamelCase )
assert torch_all_close(_lowerCamelCase , _lowerCamelCase , atol=1e-3 )
@parameterized.expand(
[
# fmt: off
[27, [-0.0_3_6_9, 0.0_2_0_7, -0.0_7_7_6, -0.0_6_8_2, -0.1_7_4_7, -0.1_9_3_0, -0.1_4_6_5, -0.2_0_3_9]],
[16, [-0.1_6_2_8, -0.2_1_3_4, -0.2_7_4_7, -0.2_6_4_2, -0.3_7_7_4, -0.4_4_0_4, -0.3_6_8_7, -0.4_2_7_7]],
# fmt: on
] )
@require_torch_gpu
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase ) ->int:
SCREAMING_SNAKE_CASE : int = self.get_sd_vae_model(fpaa=_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[Any] = self.get_sd_image(_lowerCamelCase , shape=(3, 4, 64, 64) , fpaa=_lowerCamelCase )
with torch.no_grad():
SCREAMING_SNAKE_CASE : List[str] = model.decode(_lowerCamelCase ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
SCREAMING_SNAKE_CASE : str = sample[-1, -2:, :2, -2:].flatten().float().cpu()
SCREAMING_SNAKE_CASE : List[Any] = torch.tensor(_lowerCamelCase )
assert torch_all_close(_lowerCamelCase , _lowerCamelCase , atol=5e-3 )
@parameterized.expand([(13,), (16,), (27,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason='''xformers is not required when using PyTorch 2.0.''' )
def __lowerCAmelCase ( self , _lowerCamelCase ) ->Optional[Any]:
SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_sd_vae_model(fpaa=_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[Any] = self.get_sd_image(_lowerCamelCase , shape=(3, 4, 64, 64) , fpaa=_lowerCamelCase )
with torch.no_grad():
SCREAMING_SNAKE_CASE : List[Any] = model.decode(_lowerCamelCase ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
SCREAMING_SNAKE_CASE : Optional[Any] = model.decode(_lowerCamelCase ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
assert torch_all_close(_lowerCamelCase , _lowerCamelCase , atol=1e-1 )
@parameterized.expand([(13,), (16,), (37,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason='''xformers is not required when using PyTorch 2.0.''' )
def __lowerCAmelCase ( self , _lowerCamelCase ) ->Optional[int]:
SCREAMING_SNAKE_CASE : int = self.get_sd_vae_model()
SCREAMING_SNAKE_CASE : int = self.get_sd_image(_lowerCamelCase , shape=(3, 4, 64, 64) )
with torch.no_grad():
SCREAMING_SNAKE_CASE : List[Any] = model.decode(_lowerCamelCase ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
SCREAMING_SNAKE_CASE : Union[str, Any] = model.decode(_lowerCamelCase ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
assert torch_all_close(_lowerCamelCase , _lowerCamelCase , atol=1e-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.3_0_0_1, 0.0_9_1_8, -2.6_9_8_4, -3.9_7_2_0, -3.2_0_9_9, -5.0_3_5_3, 1.7_3_3_8, -0.2_0_6_5, 3.4_2_6_7]],
[47, [-1.5_0_3_0, -4.3_8_7_1, -6.0_3_5_5, -9.1_1_5_7, -1.6_6_6_1, -2.7_8_5_3, 2.1_6_0_7, -5.0_8_2_3, 2.5_6_3_3]],
# fmt: on
] )
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase ) ->List[Any]:
SCREAMING_SNAKE_CASE : int = self.get_sd_vae_model()
SCREAMING_SNAKE_CASE : List[str] = self.get_sd_image(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Any = self.get_generator(_lowerCamelCase )
with torch.no_grad():
SCREAMING_SNAKE_CASE : Tuple = model.encode(_lowerCamelCase ).latent_dist
SCREAMING_SNAKE_CASE : int = dist.sample(generator=_lowerCamelCase )
assert list(sample.shape ) == [image.shape[0], 4] + [i // 8 for i in image.shape[2:]]
SCREAMING_SNAKE_CASE : Optional[Any] = sample[0, -1, -3:, -3:].flatten().cpu()
SCREAMING_SNAKE_CASE : List[str] = torch.tensor(_lowerCamelCase )
SCREAMING_SNAKE_CASE : List[str] = 3e-3 if torch_device != '''mps''' else 1e-2
assert torch_all_close(_lowerCamelCase , _lowerCamelCase , atol=_lowerCamelCase )
| 333
| 1
|
from heapq import heappop, heappush
import numpy as np
def _UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , ) ->Tuple:
UpperCAmelCase , UpperCAmelCase = grid.shape
UpperCAmelCase = [-1, 1, 0, 0]
UpperCAmelCase = [0, 0, -1, 1]
if allow_diagonal:
dx += [-1, -1, 1, 1]
dy += [-1, 1, -1, 1]
UpperCAmelCase , UpperCAmelCase = [(0, source)], set()
UpperCAmelCase = np.full((rows, cols) , np.inf )
UpperCAmelCase = 0
UpperCAmelCase = np.empty((rows, cols) , dtype=__snake_case )
UpperCAmelCase = None
while queue:
((UpperCAmelCase) , (UpperCAmelCase)) = heappop(__snake_case )
if (x, y) in visited:
continue
visited.add((x, y) )
if (x, y) == destination:
UpperCAmelCase = []
while (x, y) != source:
path.append((x, y) )
UpperCAmelCase , UpperCAmelCase = predecessors[x, y]
path.append(__snake_case ) # add the source manually
path.reverse()
return matrix[destination], path
for i in range(len(__snake_case ) ):
UpperCAmelCase , UpperCAmelCase = x + dx[i], y + dy[i]
if 0 <= nx < rows and 0 <= ny < cols:
UpperCAmelCase = grid[nx][ny]
if next_node == 1 and matrix[nx, ny] > dist + 1:
heappush(__snake_case , (dist + 1, (nx, ny)) )
UpperCAmelCase = dist + 1
UpperCAmelCase = (x, y)
return np.inf, []
if __name__ == "__main__":
import doctest
doctest.testmod()
| 377
|
from __future__ import annotations
def __lowerCAmelCase ( __snake_case ):
__lowerCAmelCase = len(__snake_case )
# We need to create solution object to save path.
__lowerCAmelCase = [[0 for _ in range(__snake_case )] for _ in range(__snake_case )]
__lowerCAmelCase = run_maze(__snake_case , 0 , 0 , __snake_case )
if solved:
print("\n".join(str(__snake_case ) for row in solutions ) )
else:
print("No solution exists!" )
return solved
def __lowerCAmelCase ( __snake_case , __snake_case , __snake_case , __snake_case ):
__lowerCAmelCase = len(__snake_case )
# Final check point.
if i == j == (size - 1):
__lowerCAmelCase = 1
return True
__lowerCAmelCase = (not i < 0) and (not j < 0) # Check lower bounds
__lowerCAmelCase = (i < size) and (j < size) # Check upper bounds
if lower_flag and upper_flag:
# check for already visited and block points.
__lowerCAmelCase = (not solutions[i][j]) and (not maze[i][j])
if block_flag:
# check visited
__lowerCAmelCase = 1
# check for directions
if (
run_maze(__snake_case , i + 1 , __snake_case , __snake_case )
or run_maze(__snake_case , __snake_case , j + 1 , __snake_case )
or run_maze(__snake_case , i - 1 , __snake_case , __snake_case )
or run_maze(__snake_case , __snake_case , j - 1 , __snake_case )
):
return True
__lowerCAmelCase = 0
return False
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 367
| 0
|
"""simple docstring"""
import argparse
import json
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
VideoMAEConfig,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEImageProcessor,
)
def _snake_case ( _snake_case : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
_A = VideoMAEConfig()
set_architecture_configs(_snake_case , _snake_case )
if "finetuned" not in model_name:
_A = False
if "finetuned" in model_name:
_A = 'huggingface/label-files'
if "kinetics" in model_name:
_A = 4_00
_A = 'kinetics400-id2label.json'
elif "ssv2" in model_name:
_A = 1_74
_A = 'something-something-v2-id2label.json'
else:
raise ValueError('Model name should either contain \'kinetics\' or \'ssv2\' in case it\'s fine-tuned.' )
_A = json.load(open(hf_hub_download(_snake_case , _snake_case , repo_type='dataset' ) , 'r' ) )
_A = {int(_snake_case ): v for k, v in idalabel.items()}
_A = idalabel
_A = {v: k for k, v in idalabel.items()}
return config
def _snake_case ( _snake_case : Tuple , _snake_case : Tuple ) -> Any:
'''simple docstring'''
if "small" in model_name:
_A = 3_84
_A = 15_36
_A = 12
_A = 16
_A = 12
_A = 3
_A = 1_92
_A = 7_68
elif "large" in model_name:
_A = 10_24
_A = 40_96
_A = 24
_A = 16
_A = 12
_A = 8
_A = 5_12
_A = 20_48
elif "huge" in model_name:
_A = 12_80
_A = 51_20
_A = 32
_A = 16
_A = 12
_A = 8
_A = 6_40
_A = 25_60
elif "base" not in model_name:
raise ValueError('Model name should include either "small", "base", "large", or "huge"' )
def _snake_case ( _snake_case : Tuple ) -> Any:
'''simple docstring'''
if "encoder." in name:
_A = name.replace('encoder.' , '' )
if "cls_token" in name:
_A = name.replace('cls_token' , 'videomae.embeddings.cls_token' )
if "decoder_pos_embed" in name:
_A = name.replace('decoder_pos_embed' , 'decoder.decoder_pos_embed' )
if "pos_embed" in name and "decoder" not in name:
_A = name.replace('pos_embed' , 'videomae.embeddings.position_embeddings' )
if "patch_embed.proj" in name:
_A = name.replace('patch_embed.proj' , 'videomae.embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
_A = name.replace('patch_embed.norm' , 'videomae.embeddings.norm' )
if "decoder.blocks" in name:
_A = name.replace('decoder.blocks' , 'decoder.decoder_layers' )
if "blocks" in name:
_A = name.replace('blocks' , 'videomae.encoder.layer' )
if "attn.proj" in name:
_A = name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name and "bias" not in name:
_A = name.replace('attn' , 'attention.self' )
if "attn" in name:
_A = name.replace('attn' , 'attention.attention' )
if "norm1" in name:
_A = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
_A = name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
_A = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
_A = name.replace('mlp.fc2' , 'output.dense' )
if "decoder_embed" in name:
_A = name.replace('decoder_embed' , 'decoder.decoder_embed' )
if "decoder_norm" in name:
_A = name.replace('decoder_norm' , 'decoder.decoder_norm' )
if "decoder_pred" in name:
_A = name.replace('decoder_pred' , 'decoder.decoder_pred' )
if "norm.weight" in name and "decoder" not in name and "fc" not in name:
_A = name.replace('norm.weight' , 'videomae.layernorm.weight' )
if "norm.bias" in name and "decoder" not in name and "fc" not in name:
_A = name.replace('norm.bias' , 'videomae.layernorm.bias' )
if "head" in name and "decoder" not in name:
_A = name.replace('head' , 'classifier' )
return name
def _snake_case ( _snake_case : str , _snake_case : Optional[int] ) -> Dict:
'''simple docstring'''
for key in orig_state_dict.copy().keys():
_A = orig_state_dict.pop(_snake_case )
if key.startswith('encoder.' ):
_A = key.replace('encoder.' , '' )
if "qkv" in key:
_A = key.split('.' )
if key.startswith('decoder.blocks' ):
_A = config.decoder_hidden_size
_A = int(key_split[2] )
_A = 'decoder.decoder_layers.'
if "weight" in key:
_A = val[:dim, :]
_A = val[dim : dim * 2, :]
_A = val[-dim:, :]
else:
_A = config.hidden_size
_A = int(key_split[1] )
_A = 'videomae.encoder.layer.'
if "weight" in key:
_A = val[:dim, :]
_A = val[dim : dim * 2, :]
_A = val[-dim:, :]
else:
_A = val
return orig_state_dict
def _snake_case ( ) -> Union[str, Any]:
'''simple docstring'''
_A = hf_hub_download(
repo_id='hf-internal-testing/spaghetti-video' , filename='eating_spaghetti.npy' , repo_type='dataset' )
_A = np.load(_snake_case )
return list(_snake_case )
def _snake_case ( _snake_case : Tuple , _snake_case : List[Any] , _snake_case : int , _snake_case : Dict ) -> Tuple:
'''simple docstring'''
_A = get_videomae_config(_snake_case )
if "finetuned" in model_name:
_A = VideoMAEForVideoClassification(_snake_case )
else:
_A = VideoMAEForPreTraining(_snake_case )
# download original checkpoint, hosted on Google Drive
_A = 'pytorch_model.bin'
gdown.cached_download(_snake_case , _snake_case , quiet=_snake_case )
_A = torch.load(_snake_case , map_location='cpu' )
if "model" in files:
_A = files['model']
else:
_A = files['module']
_A = convert_state_dict(_snake_case , _snake_case )
model.load_state_dict(_snake_case )
model.eval()
# verify model on basic input
_A = VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
_A = prepare_video()
_A = image_processor(_snake_case , return_tensors='pt' )
if "finetuned" not in model_name:
_A = hf_hub_download(repo_id='hf-internal-testing/bool-masked-pos' , filename='bool_masked_pos.pt' )
_A = torch.load(_snake_case )
_A = model(**_snake_case )
_A = outputs.logits
_A = [
'videomae-small-finetuned-kinetics',
'videomae-small-finetuned-ssv2',
# Kinetics-400 checkpoints (short = pretrained only for 800 epochs instead of 1600)
'videomae-base-short',
'videomae-base-short-finetuned-kinetics',
'videomae-base',
'videomae-base-finetuned-kinetics',
'videomae-large',
'videomae-large-finetuned-kinetics',
'videomae-huge-finetuned-kinetics',
# Something-Something-v2 checkpoints (short = pretrained only for 800 epochs instead of 2400)
'videomae-base-short-ssv2',
'videomae-base-short-finetuned-ssv2',
'videomae-base-ssv2',
'videomae-base-finetuned-ssv2',
]
# NOTE: logits were tested with image_mean and image_std equal to [0.5, 0.5, 0.5] and [0.5, 0.5, 0.5]
if model_name == "videomae-small-finetuned-kinetics":
_A = torch.Size([1, 4_00] )
_A = torch.tensor([-0.9291, -0.4061, -0.9307] )
elif model_name == "videomae-small-finetuned-ssv2":
_A = torch.Size([1, 1_74] )
_A = torch.tensor([0.2671, -0.4689, -0.8235] )
elif model_name == "videomae-base":
_A = torch.Size([1, 14_08, 15_36] )
_A = torch.tensor([[0.7739, 0.7968, 0.7089], [0.6701, 0.7487, 0.6209], [0.4287, 0.5158, 0.4773]] )
elif model_name == "videomae-base-short":
_A = torch.Size([1, 14_08, 15_36] )
_A = torch.tensor([[0.7994, 0.9612, 0.8508], [0.7401, 0.8958, 0.8302], [0.5862, 0.7468, 0.7325]] )
# we verified the loss both for normalized and unnormalized targets for this one
_A = torch.tensor([0.5142] ) if config.norm_pix_loss else torch.tensor([0.6469] )
elif model_name == "videomae-large":
_A = torch.Size([1, 14_08, 15_36] )
_A = torch.tensor([[0.7149, 0.7997, 0.6966], [0.6768, 0.7869, 0.6948], [0.5139, 0.6221, 0.5605]] )
elif model_name == "videomae-large-finetuned-kinetics":
_A = torch.Size([1, 4_00] )
_A = torch.tensor([0.0771, 0.0011, -0.3625] )
elif model_name == "videomae-huge-finetuned-kinetics":
_A = torch.Size([1, 4_00] )
_A = torch.tensor([0.2433, 0.1632, -0.4894] )
elif model_name == "videomae-base-short-finetuned-kinetics":
_A = torch.Size([1, 4_00] )
_A = torch.tensor([0.6588, 0.0990, -0.2493] )
elif model_name == "videomae-base-finetuned-kinetics":
_A = torch.Size([1, 4_00] )
_A = torch.tensor([0.3669, -0.0688, -0.2421] )
elif model_name == "videomae-base-short-ssv2":
_A = torch.Size([1, 14_08, 15_36] )
_A = torch.tensor([[0.4712, 0.5296, 0.5786], [0.2278, 0.2729, 0.4026], [0.0352, 0.0730, 0.2506]] )
elif model_name == "videomae-base-short-finetuned-ssv2":
_A = torch.Size([1, 1_74] )
_A = torch.tensor([-0.0537, -0.1539, -0.3266] )
elif model_name == "videomae-base-ssv2":
_A = torch.Size([1, 14_08, 15_36] )
_A = torch.tensor([[0.8131, 0.8727, 0.8546], [0.7366, 0.9377, 0.8870], [0.5935, 0.8874, 0.8564]] )
elif model_name == "videomae-base-finetuned-ssv2":
_A = torch.Size([1, 1_74] )
_A = torch.tensor([0.1961, -0.8337, -0.6389] )
else:
raise ValueError(F'''Model name not supported. Should be one of {model_names}''' )
# verify logits
assert logits.shape == expected_shape
if "finetuned" in model_name:
assert torch.allclose(logits[0, :3] , _snake_case , atol=1E-4 )
else:
print('Logits:' , logits[0, :3, :3] )
assert torch.allclose(logits[0, :3, :3] , _snake_case , atol=1E-4 )
print('Logits ok!' )
# verify loss, if applicable
if model_name == "videomae-base-short":
_A = outputs.loss
assert torch.allclose(_snake_case , _snake_case , atol=1E-4 )
print('Loss ok!' )
if pytorch_dump_folder_path is not None:
print(F'''Saving model and image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(_snake_case )
model.save_pretrained(_snake_case )
if push_to_hub:
print('Pushing to the hub...' )
model.push_to_hub(_snake_case , organization='nielsr' )
if __name__ == "__main__":
a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://drive.google.com/u/1/uc?id=1tEhLyskjb755TJ65ptsrafUG2llSwQE1&export=download&confirm=t&uuid=aa3276eb-fb7e-482a-adec-dc7171df14c4''',
type=str,
help=(
'''URL of the original PyTorch checkpoint (on Google Drive) you\'d like to convert. Should be a direct'''
''' download link.'''
),
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''/Users/nielsrogge/Documents/VideoMAE/Test''',
type=str,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument('''--model_name''', default='''videomae-base''', type=str, help='''Name of the model.''')
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
a = parser.parse_args()
convert_videomae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 718
|
"""simple docstring"""
from __future__ import annotations
def _snake_case ( _snake_case : list[int] ) -> list[int]:
'''simple docstring'''
if len(_snake_case ) == 0:
return array
_A , _A = min(_snake_case ), max(_snake_case )
# Compute the variables
_A = _max - _min + 1
_A , _A = [0] * holes_range, [0] * holes_range
# Make the sorting.
for i in array:
_A = i - _min
_A = i
holes_repeat[index] += 1
# Makes the array back by replacing the numbers.
_A = 0
for i in range(_snake_case ):
while holes_repeat[i] > 0:
_A = holes[i]
index += 1
holes_repeat[i] -= 1
# Returns the sorted array.
return array
if __name__ == "__main__":
import doctest
doctest.testmod()
a = input('''Enter numbers separated by comma:\n''')
a = [int(x) for x in user_input.split(''',''')]
print(pigeon_sort(unsorted))
| 505
| 0
|
import argparse
import os
import re
import tensorflow as tf
import torch
from transformers import BertConfig, BertModel
from transformers.utils import logging
logging.set_verbosity_info()
a__ = logging.get_logger(__name__)
def _UpperCAmelCase ( a : str , a : str , a : Optional[Any] ):
snake_case__ = os.path.abspath(a )
logger.info(F'''Converting TensorFlow checkpoint from {tf_path}''' )
# Load weights from TF model
snake_case__ = tf.train.list_variables(a )
snake_case__ = []
snake_case__ = []
snake_case__ = []
for full_name, shape in init_vars:
# logger.info(f"Loading TF weight {name} with shape {shape}")
snake_case__ = full_name.split("""/""" )
if full_name == "_CHECKPOINTABLE_OBJECT_GRAPH" or name[0] in ["global_step", "save_counter"]:
logger.info(F'''Skipping non-model layer {full_name}''' )
continue
if "optimizer" in full_name:
logger.info(F'''Skipping optimization layer {full_name}''' )
continue
if name[0] == "model":
# ignore initial 'model'
snake_case__ = name[1:]
# figure out how many levels deep the name is
snake_case__ = 0
for _name in name:
if _name.startswith("""layer_with_weights""" ):
depth += 1
else:
break
layer_depth.append(a )
# read data
snake_case__ = tf.train.load_variable(a , a )
names.append("""/""".join(a ) )
arrays.append(a )
logger.info(F'''Read a total of {len(a ):,} layers''' )
# Sanity check
if len(set(a ) ) != 1:
raise ValueError(F'''Found layer names with different depths (layer depth {list(set(a ) )})''' )
snake_case__ = list(set(a ) )[0]
if layer_depth != 1:
raise ValueError(
"""The model contains more than just the embedding/encoder layers. This script does not handle MLM/NSP"""
""" heads.""" )
# convert layers
logger.info("""Converting weights...""" )
for full_name, array in zip(a , a ):
snake_case__ = full_name.split("""/""" )
snake_case__ = model
snake_case__ = []
for i, m_name in enumerate(a ):
if m_name == ".ATTRIBUTES":
# variable names end with .ATTRIBUTES/VARIABLE_VALUE
break
if m_name.startswith("""layer_with_weights""" ):
snake_case__ = int(m_name.split("""-""" )[-1] )
if layer_num <= 2:
# embedding layers
# layer_num 0: word_embeddings
# layer_num 1: position_embeddings
# layer_num 2: token_type_embeddings
continue
elif layer_num == 3:
# embedding LayerNorm
trace.extend(["""embeddings""", """LayerNorm"""] )
snake_case__ = getattr(a , """embeddings""" )
snake_case__ = getattr(a , """LayerNorm""" )
elif layer_num > 3 and layer_num < config.num_hidden_layers + 4:
# encoder layers
trace.extend(["""encoder""", """layer""", str(layer_num - 4 )] )
snake_case__ = getattr(a , """encoder""" )
snake_case__ = getattr(a , """layer""" )
snake_case__ = pointer[layer_num - 4]
elif layer_num == config.num_hidden_layers + 4:
# pooler layer
trace.extend(["""pooler""", """dense"""] )
snake_case__ = getattr(a , """pooler""" )
snake_case__ = getattr(a , """dense""" )
elif m_name == "embeddings":
trace.append("""embeddings""" )
snake_case__ = getattr(a , """embeddings""" )
if layer_num == 0:
trace.append("""word_embeddings""" )
snake_case__ = getattr(a , """word_embeddings""" )
elif layer_num == 1:
trace.append("""position_embeddings""" )
snake_case__ = getattr(a , """position_embeddings""" )
elif layer_num == 2:
trace.append("""token_type_embeddings""" )
snake_case__ = getattr(a , """token_type_embeddings""" )
else:
raise ValueError(F'''Unknown embedding layer with name {full_name}''' )
trace.append("""weight""" )
snake_case__ = getattr(a , """weight""" )
elif m_name == "_attention_layer":
# self-attention layer
trace.extend(["""attention""", """self"""] )
snake_case__ = getattr(a , """attention""" )
snake_case__ = getattr(a , """self""" )
elif m_name == "_attention_layer_norm":
# output attention norm
trace.extend(["""attention""", """output""", """LayerNorm"""] )
snake_case__ = getattr(a , """attention""" )
snake_case__ = getattr(a , """output""" )
snake_case__ = getattr(a , """LayerNorm""" )
elif m_name == "_attention_output_dense":
# output attention dense
trace.extend(["""attention""", """output""", """dense"""] )
snake_case__ = getattr(a , """attention""" )
snake_case__ = getattr(a , """output""" )
snake_case__ = getattr(a , """dense""" )
elif m_name == "_output_dense":
# output dense
trace.extend(["""output""", """dense"""] )
snake_case__ = getattr(a , """output""" )
snake_case__ = getattr(a , """dense""" )
elif m_name == "_output_layer_norm":
# output dense
trace.extend(["""output""", """LayerNorm"""] )
snake_case__ = getattr(a , """output""" )
snake_case__ = getattr(a , """LayerNorm""" )
elif m_name == "_key_dense":
# attention key
trace.append("""key""" )
snake_case__ = getattr(a , """key""" )
elif m_name == "_query_dense":
# attention query
trace.append("""query""" )
snake_case__ = getattr(a , """query""" )
elif m_name == "_value_dense":
# attention value
trace.append("""value""" )
snake_case__ = getattr(a , """value""" )
elif m_name == "_intermediate_dense":
# attention intermediate dense
trace.extend(["""intermediate""", """dense"""] )
snake_case__ = getattr(a , """intermediate""" )
snake_case__ = getattr(a , """dense""" )
elif m_name == "_output_layer_norm":
# output layer norm
trace.append("""output""" )
snake_case__ = getattr(a , """output""" )
# weights & biases
elif m_name in ["bias", "beta"]:
trace.append("""bias""" )
snake_case__ = getattr(a , """bias""" )
elif m_name in ["kernel", "gamma"]:
trace.append("""weight""" )
snake_case__ = getattr(a , """weight""" )
else:
logger.warning(F'''Ignored {m_name}''' )
# for certain layers reshape is necessary
snake_case__ = """.""".join(a )
if re.match(r"""(\S+)\.attention\.self\.(key|value|query)\.(bias|weight)""" , a ) or re.match(
r"""(\S+)\.attention\.output\.dense\.weight""" , a ):
snake_case__ = array.reshape(pointer.data.shape )
if "kernel" in full_name:
snake_case__ = array.transpose()
if pointer.shape == array.shape:
snake_case__ = torch.from_numpy(a )
else:
raise ValueError(
F'''Shape mismatch in layer {full_name}: Model expects shape {pointer.shape} but layer contains shape:'''
F''' {array.shape}''' )
logger.info(F'''Successfully set variable {full_name} to PyTorch layer {trace}''' )
return model
def _UpperCAmelCase ( a : Optional[int] , a : List[str] , a : Optional[int] ):
# Instantiate model
logger.info(F'''Loading model based on config from {config_path}...''' )
snake_case__ = BertConfig.from_json_file(a )
snake_case__ = BertModel(a )
# Load weights from checkpoint
logger.info(F'''Loading weights from checkpoint {tf_checkpoint_path}...''' )
load_tfa_weights_in_bert(a , a , a )
# Save pytorch-model
logger.info(F'''Saving PyTorch model to {pytorch_dump_path}...''' )
torch.save(model.state_dict() , a )
if __name__ == "__main__":
a__ = argparse.ArgumentParser()
parser.add_argument(
"""--tf_checkpoint_path""", type=str, required=True, help="""Path to the TensorFlow 2.x checkpoint path."""
)
parser.add_argument(
"""--bert_config_file""",
type=str,
required=True,
help="""The config json file corresponding to the BERT model. This specifies the model architecture.""",
)
parser.add_argument(
"""--pytorch_dump_path""",
type=str,
required=True,
help="""Path to the output PyTorch model (must include filename).""",
)
a__ = parser.parse_args()
convert_tfa_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 654
|
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTConfig, DeiTForImageClassificationWithTeacher, DeiTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
a__ = logging.get_logger(__name__)
def _UpperCAmelCase ( a : List[str] , a : Any=False ):
snake_case__ = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'''blocks.{i}.norm1.weight''', F'''deit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((F'''blocks.{i}.norm1.bias''', F'''deit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append((F'''blocks.{i}.attn.proj.weight''', F'''deit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((F'''blocks.{i}.attn.proj.bias''', F'''deit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((F'''blocks.{i}.norm2.weight''', F'''deit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((F'''blocks.{i}.norm2.bias''', F'''deit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((F'''blocks.{i}.mlp.fc1.weight''', F'''deit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((F'''blocks.{i}.mlp.fc1.bias''', F'''deit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((F'''blocks.{i}.mlp.fc2.weight''', F'''deit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((F'''blocks.{i}.mlp.fc2.bias''', F'''deit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
("""cls_token""", """deit.embeddings.cls_token"""),
("""dist_token""", """deit.embeddings.distillation_token"""),
("""patch_embed.proj.weight""", """deit.embeddings.patch_embeddings.projection.weight"""),
("""patch_embed.proj.bias""", """deit.embeddings.patch_embeddings.projection.bias"""),
("""pos_embed""", """deit.embeddings.position_embeddings"""),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("""norm.weight""", """layernorm.weight"""),
("""norm.bias""", """layernorm.bias"""),
("""pre_logits.fc.weight""", """pooler.dense.weight"""),
("""pre_logits.fc.bias""", """pooler.dense.bias"""),
] )
# if just the base model, we should remove "deit" from all keys that start with "deit"
snake_case__ = [(pair[0], pair[1][4:]) if pair[1].startswith("""deit""" ) else pair for pair in rename_keys]
else:
# layernorm + classification heads
rename_keys.extend(
[
("""norm.weight""", """deit.layernorm.weight"""),
("""norm.bias""", """deit.layernorm.bias"""),
("""head.weight""", """cls_classifier.weight"""),
("""head.bias""", """cls_classifier.bias"""),
("""head_dist.weight""", """distillation_classifier.weight"""),
("""head_dist.bias""", """distillation_classifier.bias"""),
] )
return rename_keys
def _UpperCAmelCase ( a : int , a : List[Any] , a : Union[str, Any]=False ):
for i in range(config.num_hidden_layers ):
if base_model:
snake_case__ = """"""
else:
snake_case__ = """deit."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
snake_case__ = state_dict.pop(F'''blocks.{i}.attn.qkv.weight''' )
snake_case__ = state_dict.pop(F'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
snake_case__ = in_proj_weight[
: config.hidden_size, :
]
snake_case__ = in_proj_bias[: config.hidden_size]
snake_case__ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
snake_case__ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
snake_case__ = in_proj_weight[
-config.hidden_size :, :
]
snake_case__ = in_proj_bias[-config.hidden_size :]
def _UpperCAmelCase ( a : Dict , a : Union[str, Any] , a : int ):
snake_case__ = dct.pop(a )
snake_case__ = val
def _UpperCAmelCase ( ):
snake_case__ = """http://images.cocodataset.org/val2017/000000039769.jpg"""
snake_case__ = Image.open(requests.get(a , stream=a ).raw )
return im
@torch.no_grad()
def _UpperCAmelCase ( a : List[str] , a : Tuple ):
snake_case__ = DeiTConfig()
# all deit models have fine-tuned heads
snake_case__ = False
# dataset (fine-tuned on ImageNet 2012), patch_size and image_size
snake_case__ = 1000
snake_case__ = """huggingface/label-files"""
snake_case__ = """imagenet-1k-id2label.json"""
snake_case__ = json.load(open(hf_hub_download(a , a , repo_type="""dataset""" ) , """r""" ) )
snake_case__ = {int(a ): v for k, v in idalabel.items()}
snake_case__ = idalabel
snake_case__ = {v: k for k, v in idalabel.items()}
snake_case__ = int(deit_name[-6:-4] )
snake_case__ = int(deit_name[-3:] )
# size of the architecture
if deit_name[9:].startswith("""tiny""" ):
snake_case__ = 192
snake_case__ = 768
snake_case__ = 12
snake_case__ = 3
elif deit_name[9:].startswith("""small""" ):
snake_case__ = 384
snake_case__ = 1536
snake_case__ = 12
snake_case__ = 6
if deit_name[9:].startswith("""base""" ):
pass
elif deit_name[4:].startswith("""large""" ):
snake_case__ = 1024
snake_case__ = 4096
snake_case__ = 24
snake_case__ = 16
# load original model from timm
snake_case__ = timm.create_model(a , pretrained=a )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
snake_case__ = timm_model.state_dict()
snake_case__ = create_rename_keys(a , a )
for src, dest in rename_keys:
rename_key(a , a , a )
read_in_q_k_v(a , a , a )
# load HuggingFace model
snake_case__ = DeiTForImageClassificationWithTeacher(a ).eval()
model.load_state_dict(a )
# Check outputs on an image, prepared by DeiTImageProcessor
snake_case__ = int(
(256 / 224) * config.image_size ) # to maintain same ratio w.r.t. 224 images, see https://github.com/facebookresearch/deit/blob/ab5715372db8c6cad5740714b2216d55aeae052e/datasets.py#L103
snake_case__ = DeiTImageProcessor(size=a , crop_size=config.image_size )
snake_case__ = image_processor(images=prepare_img() , return_tensors="""pt""" )
snake_case__ = encoding["""pixel_values"""]
snake_case__ = model(a )
snake_case__ = timm_model(a )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(a , outputs.logits , atol=1e-3 )
Path(a ).mkdir(exist_ok=a )
print(F'''Saving model {deit_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(a )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(a )
if __name__ == "__main__":
a__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--deit_name""",
default="""vit_deit_base_distilled_patch16_224""",
type=str,
help="""Name of the DeiT timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
a__ = parser.parse_args()
convert_deit_checkpoint(args.deit_name, args.pytorch_dump_folder_path)
| 654
| 1
|
import argparse
import glob
import logging
import os
from argparse import Namespace
from importlib import import_module
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch.nn import CrossEntropyLoss
from torch.utils.data import DataLoader, TensorDataset
from utils_ner import TokenClassificationTask
UpperCAmelCase = logging.getLogger(__name__)
class A_ ( __lowerCamelCase ):
'''simple docstring'''
_UpperCamelCase : int = """token-classification"""
def __init__( self , snake_case ):
if type(snake_case ) == dict:
lowercase = Namespace(**snake_case )
lowercase = import_module('tasks' )
try:
lowercase = getattr(snake_case , hparams.task_type )
lowercase = token_classification_task_clazz()
except AttributeError:
raise ValueError(
F'''Task {hparams.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. '''
F'''Available tasks classes are: {TokenClassificationTask.__subclasses__()}''' )
lowercase = self.token_classification_task.get_labels(hparams.labels )
lowercase = CrossEntropyLoss().ignore_index
super().__init__(snake_case , len(self.labels ) , self.mode )
def SCREAMING_SNAKE_CASE__ ( self , **snake_case ):
return self.model(**snake_case )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case ):
lowercase = {'input_ids': batch[0], 'attention_mask': batch[1], 'labels': batch[3]}
if self.config.model_type != "distilbert":
lowercase = (
batch[2] if self.config.model_type in ['bert', 'xlnet'] else None
) # XLM and RoBERTa don"t use token_type_ids
lowercase = self(**snake_case )
lowercase = outputs[0]
# tensorboard_logs = {"loss": loss, "rate": self.lr_scheduler.get_last_lr()[-1]}
return {"loss": loss}
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.hparams
for mode in ["train", "dev", "test"]:
lowercase = self._feature_file(snake_case )
if os.path.exists(snake_case ) and not args.overwrite_cache:
logger.info('Loading features from cached file %s' , snake_case )
lowercase = torch.load(snake_case )
else:
logger.info('Creating features from dataset file at %s' , args.data_dir )
lowercase = self.token_classification_task.read_examples_from_file(args.data_dir , snake_case )
lowercase = self.token_classification_task.convert_examples_to_features(
snake_case , self.labels , args.max_seq_length , self.tokenizer , cls_token_at_end=bool(self.config.model_type in ['xlnet'] ) , cls_token=self.tokenizer.cls_token , cls_token_segment_id=2 if self.config.model_type in ['xlnet'] else 0 , sep_token=self.tokenizer.sep_token , sep_token_extra=snake_case , pad_on_left=bool(self.config.model_type in ['xlnet'] ) , pad_token=self.tokenizer.pad_token_id , pad_token_segment_id=self.tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info('Saving features into cached file %s' , snake_case )
torch.save(snake_case , snake_case )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case = False ):
lowercase = self._feature_file(snake_case )
logger.info('Loading features from cached file %s' , snake_case )
lowercase = torch.load(snake_case )
lowercase = torch.tensor([f.input_ids for f in features] , dtype=torch.long )
lowercase = torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
if features[0].token_type_ids is not None:
lowercase = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
else:
lowercase = torch.tensor([0 for f in features] , dtype=torch.long )
# HACK(we will not use this anymore soon)
lowercase = torch.tensor([f.label_ids for f in features] , dtype=torch.long )
return DataLoader(
TensorDataset(snake_case , snake_case , snake_case , snake_case ) , batch_size=snake_case )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case ):
"""Compute validation""" ""
lowercase = {'input_ids': batch[0], 'attention_mask': batch[1], 'labels': batch[3]}
if self.config.model_type != "distilbert":
lowercase = (
batch[2] if self.config.model_type in ['bert', 'xlnet'] else None
) # XLM and RoBERTa don"t use token_type_ids
lowercase = self(**snake_case )
lowercase , lowercase = outputs[:2]
lowercase = logits.detach().cpu().numpy()
lowercase = inputs['labels'].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
lowercase = torch.stack([x['val_loss'] for x in outputs] ).mean()
lowercase = np.concatenate([x['pred'] for x in outputs] , axis=0 )
lowercase = np.argmax(snake_case , axis=2 )
lowercase = np.concatenate([x['target'] for x in outputs] , axis=0 )
lowercase = dict(enumerate(self.labels ) )
lowercase = [[] for _ in range(out_label_ids.shape[0] )]
lowercase = [[] for _ in range(out_label_ids.shape[0] )]
for i in range(out_label_ids.shape[0] ):
for j in range(out_label_ids.shape[1] ):
if out_label_ids[i, j] != self.pad_token_label_id:
out_label_list[i].append(label_map[out_label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
lowercase = {
'val_loss': val_loss_mean,
'accuracy_score': accuracy_score(snake_case , snake_case ),
'precision': precision_score(snake_case , snake_case ),
'recall': recall_score(snake_case , snake_case ),
'f1': fa_score(snake_case , snake_case ),
}
lowercase = dict(results.items() )
lowercase = results
return ret, preds_list, out_label_list
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
# when stable
lowercase , lowercase , lowercase = self._eval_end(snake_case )
lowercase = ret['log']
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
# updating to test_epoch_end instead of deprecated test_end
lowercase , lowercase , lowercase = self._eval_end(snake_case )
# Converting to the dict required by pl
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master/\
# pytorch_lightning/trainer/logging.py#L139
lowercase = ret['log']
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def SCREAMING_SNAKE_CASE__ ( snake_case , snake_case ):
# Add NER specific options
BaseTransformer.add_model_specific_args(snake_case , snake_case )
parser.add_argument(
'--task_type' , default='NER' , type=snake_case , help='Task type to fine tune in training (e.g. NER, POS, etc)' )
parser.add_argument(
'--max_seq_length' , default=128 , type=snake_case , help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
) , )
parser.add_argument(
'--labels' , default='' , type=snake_case , help='Path to a file containing all labels. If not specified, CoNLL-2003 labels are used.' , )
parser.add_argument(
'--gpus' , default=0 , type=snake_case , help='The number of GPUs allocated for this, it is by default 0 meaning none' , )
parser.add_argument(
'--overwrite_cache' , action='store_true' , help='Overwrite the cached training and evaluation sets' )
return parser
if __name__ == "__main__":
UpperCAmelCase = argparse.ArgumentParser()
add_generic_args(parser, os.getcwd())
UpperCAmelCase = NERTransformer.add_model_specific_args(parser, os.getcwd())
UpperCAmelCase = parser.parse_args()
UpperCAmelCase = NERTransformer(args)
UpperCAmelCase = generic_train(model, args)
if args.do_predict:
# See https://github.com/huggingface/transformers/issues/3159
# pl use this default format to create a checkpoint:
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master\
# /pytorch_lightning/callbacks/model_checkpoint.py#L322
UpperCAmelCase = sorted(glob.glob(os.path.join(args.output_dir, '''checkpoint-epoch=*.ckpt'''), recursive=True))
UpperCAmelCase = model.load_from_checkpoint(checkpoints[-1])
trainer.test(model)
| 565
|
from itertools import product
from cva import COLOR_BGR2GRAY, cvtColor, imread, imshow, waitKey
from numpy import dot, exp, mgrid, pi, ravel, square, uinta, zeros
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowercase = k_size // 2
lowercase , lowercase = mgrid[0 - center : k_size - center, 0 - center : k_size - center]
lowercase = 1 / (2 * pi * sigma) * exp(-(square(__SCREAMING_SNAKE_CASE ) + square(__SCREAMING_SNAKE_CASE )) / (2 * square(__SCREAMING_SNAKE_CASE )) )
return g
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowercase , lowercase = image.shape[0], image.shape[1]
# dst image height and width
lowercase = height - k_size + 1
lowercase = width - k_size + 1
# im2col, turn the k_size*k_size pixels into a row and np.vstack all rows
lowercase = zeros((dst_height * dst_width, k_size * k_size) )
lowercase = 0
for i, j in product(range(__SCREAMING_SNAKE_CASE ) , range(__SCREAMING_SNAKE_CASE ) ):
lowercase = ravel(image[i : i + k_size, j : j + k_size] )
lowercase = window
row += 1
# turn the kernel into shape(k*k, 1)
lowercase = gen_gaussian_kernel(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowercase = ravel(__SCREAMING_SNAKE_CASE )
# reshape and get the dst image
lowercase = dot(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).reshape(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).astype(__SCREAMING_SNAKE_CASE )
return dst
if __name__ == "__main__":
# read original image
UpperCAmelCase = imread(R'''../image_data/lena.jpg''')
# turn image in gray scale value
UpperCAmelCase = cvtColor(img, COLOR_BGR2GRAY)
# get values with two different mask size
UpperCAmelCase = gaussian_filter(gray, 3, sigma=1)
UpperCAmelCase = gaussian_filter(gray, 5, sigma=0.8)
# show result images
imshow('''gaussian filter with 3x3 mask''', gaussianaxa)
imshow('''gaussian filter with 5x5 mask''', gaussianaxa)
waitKey()
| 565
| 1
|
from __future__ import annotations
def lowerCamelCase__ ( _lowercase , _lowercase = None , _lowercase = None ):
'''simple docstring'''
if start is None:
UpperCAmelCase_ : List[str] = 0
if end is None:
UpperCAmelCase_ : Dict = len(_lowercase ) - 1
if start >= end:
return
UpperCAmelCase_ : Optional[Any] = (start + end) // 2
slowsort(_lowercase , _lowercase , _lowercase )
slowsort(_lowercase , mid + 1 , _lowercase )
if sequence[end] < sequence[mid]:
UpperCAmelCase_, UpperCAmelCase_ : List[str] = sequence[mid], sequence[end]
slowsort(_lowercase , _lowercase , end - 1 )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 30
|
'''simple docstring'''
from typing import Dict, Iterable, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
UpperCamelCase__ = logging.get_logger(__name__)
class lowerCamelCase_ ( __a ):
lowerCAmelCase__ = ['pixel_values']
def __init__( self : List[str] , _A : bool = True , _A : Dict[str, int] = None , _A : PILImageResampling = PILImageResampling.BICUBIC , _A : bool = True , _A : Dict[str, int] = None , _A : bool = True , _A : Union[int, float] = 1 / 255 , _A : bool = True , _A : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_MEAN , _A : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_STD , **_A : int , ):
'''simple docstring'''
super().__init__(**_A )
UpperCAmelCase__ : Dict = size if size is not None else {'''shortest_edge''': 224}
UpperCAmelCase__ : int = get_size_dict(_A , default_to_square=_A )
UpperCAmelCase__ : Union[str, Any] = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
UpperCAmelCase__ : List[str] = get_size_dict(_A , param_name='''crop_size''' )
UpperCAmelCase__ : str = do_resize
UpperCAmelCase__ : List[Any] = size
UpperCAmelCase__ : int = resample
UpperCAmelCase__ : int = do_center_crop
UpperCAmelCase__ : List[str] = crop_size
UpperCAmelCase__ : Union[str, Any] = do_rescale
UpperCAmelCase__ : Optional[int] = rescale_factor
UpperCAmelCase__ : List[Any] = do_normalize
UpperCAmelCase__ : Dict = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
UpperCAmelCase__ : Dict = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def lowercase_ ( self : str , _A : np.ndarray , _A : Dict[str, int] , _A : PILImageResampling = PILImageResampling.BICUBIC , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Any , ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = get_size_dict(_A , default_to_square=_A )
# size_dict is a dict with either keys "height" and "width" or "shortest_edge"
if "shortest_edge" in size:
UpperCAmelCase__ : Tuple = int((256 / 224) * size['''shortest_edge'''] )
UpperCAmelCase__ : Tuple = get_resize_output_image_size(_A , size=_A , default_to_square=_A )
UpperCAmelCase__ : Dict = {'''height''': output_size[0], '''width''': output_size[1]}
if "height" not in size_dict or "width" not in size_dict:
raise ValueError(
f"""Size dict must have keys 'height' and 'width' or 'shortest_edge'. Got {size_dict.keys()}""" )
return resize(
_A , size=(size_dict['''height'''], size_dict['''width''']) , resample=_A , data_format=_A , **_A )
def lowercase_ ( self : Optional[Any] , _A : np.ndarray , _A : Dict[str, int] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Optional[int] , ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = get_size_dict(_A )
if "height" not in size or "width" not in size:
raise ValueError(f"""Size dict must have keys 'height' and 'width'. Got {size.keys()}""" )
return center_crop(_A , size=(size['''height'''], size['''width''']) , data_format=_A , **_A )
def lowercase_ ( self : List[str] , _A : np.ndarray , _A : Union[int, float] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Dict , ):
'''simple docstring'''
return rescale(_A , scale=_A , data_format=_A , **_A )
def lowercase_ ( self : Dict , _A : np.ndarray , _A : Union[float, List[float]] , _A : Union[float, List[float]] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Optional[int] , ):
'''simple docstring'''
return normalize(_A , mean=_A , std=_A , data_format=_A , **_A )
def lowercase_ ( self : Optional[Any] , _A : ImageInput , _A : Optional[bool] = None , _A : Optional[Dict[str, int]] = None , _A : PILImageResampling = None , _A : Optional[bool] = None , _A : Optional[Dict[str, int]] = None , _A : Optional[bool] = None , _A : Optional[float] = None , _A : Optional[bool] = None , _A : Optional[Union[float, Iterable[float]]] = None , _A : Optional[Union[float, Iterable[float]]] = None , _A : Optional[TensorType] = None , _A : ChannelDimension = ChannelDimension.FIRST , **_A : str , ):
'''simple docstring'''
UpperCAmelCase__ : str = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase__ : Optional[int] = resample if resample is not None else self.resample
UpperCAmelCase__ : List[str] = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCAmelCase__ : Tuple = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase__ : List[str] = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase__ : List[str] = do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase__ : Tuple = image_mean if image_mean is not None else self.image_mean
UpperCAmelCase__ : List[str] = image_std if image_std is not None else self.image_std
UpperCAmelCase__ : Tuple = size if size is not None else self.size
UpperCAmelCase__ : int = get_size_dict(_A , default_to_square=_A )
UpperCAmelCase__ : Union[str, Any] = crop_size if crop_size is not None else self.crop_size
UpperCAmelCase__ : int = get_size_dict(_A , param_name='''crop_size''' )
UpperCAmelCase__ : Union[str, Any] = make_list_of_images(_A )
if not valid_images(_A ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
UpperCAmelCase__ : int = [to_numpy_array(_A ) for image in images]
if do_resize:
UpperCAmelCase__ : str = [self.resize(_A , _A , _A ) for image in images]
if do_center_crop:
UpperCAmelCase__ : Tuple = [self.center_crop(_A , _A ) for image in images]
if do_rescale:
UpperCAmelCase__ : Optional[int] = [self.rescale(_A , _A ) for image in images]
if do_normalize:
UpperCAmelCase__ : Any = [self.normalize(_A , _A , _A ) for image in images]
UpperCAmelCase__ : Tuple = [to_channel_dimension_format(_A , _A ) for image in images]
UpperCAmelCase__ : Dict = {'''pixel_values''': images}
return BatchFeature(data=_A , tensor_type=_A )
| 75
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase : str = logging.get_logger(__name__)
__UpperCamelCase : Optional[Any] = {
"google/realm-cc-news-pretrained-embedder": (
"https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/config.json"
),
"google/realm-cc-news-pretrained-encoder": (
"https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/config.json"
),
"google/realm-cc-news-pretrained-scorer": (
"https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/config.json"
),
"google/realm-cc-news-pretrained-openqa": (
"https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/config.json"
),
"google/realm-orqa-nq-openqa": "https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/config.json",
"google/realm-orqa-nq-reader": "https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/config.json",
"google/realm-orqa-wq-openqa": "https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/config.json",
"google/realm-orqa-wq-reader": "https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/config.json",
# See all REALM models at https://huggingface.co/models?filter=realm
}
class __magic_name__ ( __lowerCAmelCase):
A: int = "realm"
def __init__( self : Optional[Any] , lowerCamelCase__ : str=30522 , lowerCamelCase__ : Optional[int]=768 , lowerCamelCase__ : Dict=128 , lowerCamelCase__ : Union[str, Any]=12 , lowerCamelCase__ : Any=12 , lowerCamelCase__ : Union[str, Any]=8 , lowerCamelCase__ : List[str]=3072 , lowerCamelCase__ : Any="gelu_new" , lowerCamelCase__ : Optional[int]=0.1 , lowerCamelCase__ : Optional[int]=0.1 , lowerCamelCase__ : Dict=512 , lowerCamelCase__ : str=2 , lowerCamelCase__ : Optional[int]=0.02 , lowerCamelCase__ : Optional[Any]=1E-1_2 , lowerCamelCase__ : int=256 , lowerCamelCase__ : int=10 , lowerCamelCase__ : Optional[Any]=1E-3 , lowerCamelCase__ : List[Any]=5 , lowerCamelCase__ : List[str]=320 , lowerCamelCase__ : str=13353718 , lowerCamelCase__ : Dict=5000 , lowerCamelCase__ : Dict=1 , lowerCamelCase__ : List[str]=0 , lowerCamelCase__ : Tuple=2 , **lowerCamelCase__ : Union[str, Any] , ) -> List[str]:
'''simple docstring'''
super().__init__(pad_token_id=lowerCamelCase__ , bos_token_id=lowerCamelCase__ , eos_token_id=lowerCamelCase__ , **lowerCamelCase__ )
# Common config
UpperCamelCase__ : int = vocab_size
UpperCamelCase__ : Optional[int] = max_position_embeddings
UpperCamelCase__ : Optional[int] = hidden_size
UpperCamelCase__ : Union[str, Any] = retriever_proj_size
UpperCamelCase__ : Dict = num_hidden_layers
UpperCamelCase__ : str = num_attention_heads
UpperCamelCase__ : int = num_candidates
UpperCamelCase__ : Any = intermediate_size
UpperCamelCase__ : Tuple = hidden_act
UpperCamelCase__ : List[str] = hidden_dropout_prob
UpperCamelCase__ : str = attention_probs_dropout_prob
UpperCamelCase__ : Union[str, Any] = initializer_range
UpperCamelCase__ : List[str] = type_vocab_size
UpperCamelCase__ : str = layer_norm_eps
# Reader config
UpperCamelCase__ : Dict = span_hidden_size
UpperCamelCase__ : int = max_span_width
UpperCamelCase__ : str = reader_layer_norm_eps
UpperCamelCase__ : int = reader_beam_size
UpperCamelCase__ : Optional[Any] = reader_seq_len
# Retrieval config
UpperCamelCase__ : Dict = num_block_records
UpperCamelCase__ : Optional[Any] = searcher_beam_size
| 106
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
__UpperCamelCase : Dict = {
"configuration_trocr": ["TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP", "TrOCRConfig"],
"processing_trocr": ["TrOCRProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Optional[int] = [
"TROCR_PRETRAINED_MODEL_ARCHIVE_LIST",
"TrOCRForCausalLM",
"TrOCRPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
__UpperCamelCase : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 106
| 1
|
"""simple docstring"""
import math
from datetime import datetime, timedelta
def _snake_case ( _snake_case : int ) -> datetime:
'''simple docstring'''
_A = year % 19
_A = year % 4
_A = year % 7
_A = math.floor(year / 1_00 )
_A = math.floor((13 + 8 * leap_day_inhibits) / 25 )
_A = leap_day_inhibits / 4
_A = (
15 - lunar_orbit_correction + leap_day_inhibits - leap_day_reinstall_number
) % 30
_A = (4 + leap_day_inhibits - leap_day_reinstall_number) % 7
# days to be added to March 21
_A = (19 * metonic_cycle + secular_moon_shift) % 30
# PHM -> Paschal Full Moon
_A = (
2 * julian_leap_year
+ 4 * non_leap_year
+ 6 * days_to_add
+ century_starting_point
) % 7
if days_to_add == 29 and days_from_phm_to_sunday == 6:
return datetime(_snake_case , 4 , 19 )
elif days_to_add == 28 and days_from_phm_to_sunday == 6:
return datetime(_snake_case , 4 , 18 )
else:
return datetime(_snake_case , 3 , 22 ) + timedelta(
days=int(days_to_add + days_from_phm_to_sunday ) )
if __name__ == "__main__":
for year in (1_994, 2_000, 2_010, 2_021, 2_023):
a = '''will be''' if year > datetime.now().year else '''was'''
print(F'''Easter in {year} {tense} {gauss_easter(year)}''')
| 7
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
_A = {
'''configuration_falcon''': ['''FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''FalconConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
'''FALCON_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FalconForCausalLM''',
'''FalconModel''',
'''FalconPreTrainedModel''',
'''FalconForSequenceClassification''',
'''FalconForTokenClassification''',
'''FalconForQuestionAnswering''',
]
if TYPE_CHECKING:
from .configuration_falcon import FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP, FalconConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_falcon import (
FALCON_PRETRAINED_MODEL_ARCHIVE_LIST,
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
FalconPreTrainedModel,
)
else:
import sys
_A = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 431
| 0
|
'''simple docstring'''
import argparse
import os
import torch
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
snake_case_ : Tuple = {
"sample_size": 32,
"in_channels": 3,
"out_channels": 3,
"layers_per_block": 2,
"num_class_embeds": 1_000,
"block_out_channels": [32, 64],
"attention_head_dim": 8,
"down_block_types": [
"ResnetDownsampleBlock2D",
"AttnDownBlock2D",
],
"up_block_types": [
"AttnUpBlock2D",
"ResnetUpsampleBlock2D",
],
"resnet_time_scale_shift": "scale_shift",
"upsample_type": "resnet",
"downsample_type": "resnet",
}
snake_case_ : Optional[Any] = {
"sample_size": 64,
"in_channels": 3,
"out_channels": 3,
"layers_per_block": 3,
"num_class_embeds": 1_000,
"block_out_channels": [192, 192 * 2, 192 * 3, 192 * 4],
"attention_head_dim": 64,
"down_block_types": [
"ResnetDownsampleBlock2D",
"AttnDownBlock2D",
"AttnDownBlock2D",
"AttnDownBlock2D",
],
"up_block_types": [
"AttnUpBlock2D",
"AttnUpBlock2D",
"AttnUpBlock2D",
"ResnetUpsampleBlock2D",
],
"resnet_time_scale_shift": "scale_shift",
"upsample_type": "resnet",
"downsample_type": "resnet",
}
snake_case_ : Optional[Any] = {
"sample_size": 256,
"in_channels": 3,
"out_channels": 3,
"layers_per_block": 2,
"num_class_embeds": None,
"block_out_channels": [256, 256, 256 * 2, 256 * 2, 256 * 4, 256 * 4],
"attention_head_dim": 64,
"down_block_types": [
"ResnetDownsampleBlock2D",
"ResnetDownsampleBlock2D",
"ResnetDownsampleBlock2D",
"AttnDownBlock2D",
"AttnDownBlock2D",
"AttnDownBlock2D",
],
"up_block_types": [
"AttnUpBlock2D",
"AttnUpBlock2D",
"AttnUpBlock2D",
"ResnetUpsampleBlock2D",
"ResnetUpsampleBlock2D",
"ResnetUpsampleBlock2D",
],
"resnet_time_scale_shift": "default",
"upsample_type": "resnet",
"downsample_type": "resnet",
}
snake_case_ : Optional[Any] = {
"num_train_timesteps": 40,
"sigma_min": 0.002,
"sigma_max": 80.0,
}
snake_case_ : List[str] = {
"num_train_timesteps": 201,
"sigma_min": 0.002,
"sigma_max": 80.0,
}
snake_case_ : str = {
"num_train_timesteps": 151,
"sigma_min": 0.002,
"sigma_max": 80.0,
}
def __snake_case ( _UpperCAmelCase : int):
if isinstance(lowerCamelCase__, lowerCamelCase__):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError('''boolean value expected''')
def __snake_case ( _UpperCAmelCase : Optional[int], _UpperCAmelCase : Any, _UpperCAmelCase : int, _UpperCAmelCase : Tuple, _UpperCAmelCase : Any=False):
UpperCamelCase = checkpoint[f'{old_prefix}.in_layers.0.weight']
UpperCamelCase = checkpoint[f'{old_prefix}.in_layers.0.bias']
UpperCamelCase = checkpoint[f'{old_prefix}.in_layers.2.weight']
UpperCamelCase = checkpoint[f'{old_prefix}.in_layers.2.bias']
UpperCamelCase = checkpoint[f'{old_prefix}.emb_layers.1.weight']
UpperCamelCase = checkpoint[f'{old_prefix}.emb_layers.1.bias']
UpperCamelCase = checkpoint[f'{old_prefix}.out_layers.0.weight']
UpperCamelCase = checkpoint[f'{old_prefix}.out_layers.0.bias']
UpperCamelCase = checkpoint[f'{old_prefix}.out_layers.3.weight']
UpperCamelCase = checkpoint[f'{old_prefix}.out_layers.3.bias']
if has_skip:
UpperCamelCase = checkpoint[f'{old_prefix}.skip_connection.weight']
UpperCamelCase = checkpoint[f'{old_prefix}.skip_connection.bias']
return new_checkpoint
def __snake_case ( _UpperCAmelCase : Any, _UpperCAmelCase : Union[str, Any], _UpperCAmelCase : Tuple, _UpperCAmelCase : List[Any], _UpperCAmelCase : Optional[int]=None):
UpperCamelCase = checkpoint[f'{old_prefix}.qkv.weight'].chunk(3, dim=0)
UpperCamelCase = checkpoint[f'{old_prefix}.qkv.bias'].chunk(3, dim=0)
UpperCamelCase = checkpoint[f'{old_prefix}.norm.weight']
UpperCamelCase = checkpoint[f'{old_prefix}.norm.bias']
UpperCamelCase = weight_q.squeeze(-1).squeeze(-1)
UpperCamelCase = bias_q.squeeze(-1).squeeze(-1)
UpperCamelCase = weight_k.squeeze(-1).squeeze(-1)
UpperCamelCase = bias_k.squeeze(-1).squeeze(-1)
UpperCamelCase = weight_v.squeeze(-1).squeeze(-1)
UpperCamelCase = bias_v.squeeze(-1).squeeze(-1)
UpperCamelCase = (
checkpoint[f'{old_prefix}.proj_out.weight'].squeeze(-1).squeeze(-1)
)
UpperCamelCase = checkpoint[f'{old_prefix}.proj_out.bias'].squeeze(-1).squeeze(-1)
return new_checkpoint
def __snake_case ( _UpperCAmelCase : Union[str, Any], _UpperCAmelCase : Any):
UpperCamelCase = torch.load(lowerCamelCase__, map_location='''cpu''')
UpperCamelCase = {}
UpperCamelCase = checkpoint["time_embed.0.weight"]
UpperCamelCase = checkpoint["time_embed.0.bias"]
UpperCamelCase = checkpoint["time_embed.2.weight"]
UpperCamelCase = checkpoint["time_embed.2.bias"]
if unet_config["num_class_embeds"] is not None:
UpperCamelCase = checkpoint["label_emb.weight"]
UpperCamelCase = checkpoint["input_blocks.0.0.weight"]
UpperCamelCase = checkpoint["input_blocks.0.0.bias"]
UpperCamelCase = unet_config["down_block_types"]
UpperCamelCase = unet_config["layers_per_block"]
UpperCamelCase = unet_config["attention_head_dim"]
UpperCamelCase = unet_config["block_out_channels"]
UpperCamelCase = 1
UpperCamelCase = channels_list[0]
for i, layer_type in enumerate(lowerCamelCase__):
UpperCamelCase = channels_list[i]
UpperCamelCase = current_channels != prev_channels
if layer_type == "ResnetDownsampleBlock2D":
for j in range(lowerCamelCase__):
UpperCamelCase = f'down_blocks.{i}.resnets.{j}'
UpperCamelCase = f'input_blocks.{current_layer}.0'
UpperCamelCase = True if j == 0 and downsample_block_has_skip else False
UpperCamelCase = convert_resnet(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, has_skip=lowerCamelCase__)
current_layer += 1
elif layer_type == "AttnDownBlock2D":
for j in range(lowerCamelCase__):
UpperCamelCase = f'down_blocks.{i}.resnets.{j}'
UpperCamelCase = f'input_blocks.{current_layer}.0'
UpperCamelCase = True if j == 0 and downsample_block_has_skip else False
UpperCamelCase = convert_resnet(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, has_skip=lowerCamelCase__)
UpperCamelCase = f'down_blocks.{i}.attentions.{j}'
UpperCamelCase = f'input_blocks.{current_layer}.1'
UpperCamelCase = convert_attention(
lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__)
current_layer += 1
if i != len(lowerCamelCase__) - 1:
UpperCamelCase = f'down_blocks.{i}.downsamplers.0'
UpperCamelCase = f'input_blocks.{current_layer}.0'
UpperCamelCase = convert_resnet(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__)
current_layer += 1
UpperCamelCase = current_channels
# hardcoded the mid-block for now
UpperCamelCase = "mid_block.resnets.0"
UpperCamelCase = "middle_block.0"
UpperCamelCase = convert_resnet(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__)
UpperCamelCase = "mid_block.attentions.0"
UpperCamelCase = "middle_block.1"
UpperCamelCase = convert_attention(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__)
UpperCamelCase = "mid_block.resnets.1"
UpperCamelCase = "middle_block.2"
UpperCamelCase = convert_resnet(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__)
UpperCamelCase = 0
UpperCamelCase = unet_config["up_block_types"]
for i, layer_type in enumerate(lowerCamelCase__):
if layer_type == "ResnetUpsampleBlock2D":
for j in range(layers_per_block + 1):
UpperCamelCase = f'up_blocks.{i}.resnets.{j}'
UpperCamelCase = f'output_blocks.{current_layer}.0'
UpperCamelCase = convert_resnet(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, has_skip=lowerCamelCase__)
current_layer += 1
if i != len(lowerCamelCase__) - 1:
UpperCamelCase = f'up_blocks.{i}.upsamplers.0'
UpperCamelCase = f'output_blocks.{current_layer-1}.1'
UpperCamelCase = convert_resnet(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__)
elif layer_type == "AttnUpBlock2D":
for j in range(layers_per_block + 1):
UpperCamelCase = f'up_blocks.{i}.resnets.{j}'
UpperCamelCase = f'output_blocks.{current_layer}.0'
UpperCamelCase = convert_resnet(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, has_skip=lowerCamelCase__)
UpperCamelCase = f'up_blocks.{i}.attentions.{j}'
UpperCamelCase = f'output_blocks.{current_layer}.1'
UpperCamelCase = convert_attention(
lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__)
current_layer += 1
if i != len(lowerCamelCase__) - 1:
UpperCamelCase = f'up_blocks.{i}.upsamplers.0'
UpperCamelCase = f'output_blocks.{current_layer-1}.2'
UpperCamelCase = convert_resnet(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__)
UpperCamelCase = checkpoint["out.0.weight"]
UpperCamelCase = checkpoint["out.0.bias"]
UpperCamelCase = checkpoint["out.2.weight"]
UpperCamelCase = checkpoint["out.2.bias"]
return new_checkpoint
if __name__ == "__main__":
snake_case_ : Optional[int] = argparse.ArgumentParser()
parser.add_argument('--unet_path', default=None, type=str, required=True, help='Path to the unet.pt to convert.')
parser.add_argument(
'--dump_path', default=None, type=str, required=True, help='Path to output the converted UNet model.'
)
parser.add_argument('--class_cond', default=True, type=str, help='Whether the model is class-conditional.')
snake_case_ : Any = parser.parse_args()
snake_case_ : Optional[Any] = strabool(args.class_cond)
snake_case_ : Any = os.path.basename(args.unet_path)
print(F'''Checkpoint: {ckpt_name}''')
# Get U-Net config
if "imagenet64" in ckpt_name:
snake_case_ : Optional[int] = IMAGENET_64_UNET_CONFIG
elif "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
snake_case_ : str = LSUN_256_UNET_CONFIG
elif "test" in ckpt_name:
snake_case_ : List[Any] = TEST_UNET_CONFIG
else:
raise ValueError(F'''Checkpoint type {ckpt_name} is not currently supported.''')
if not args.class_cond:
snake_case_ : List[str] = None
snake_case_ : str = con_pt_to_diffuser(args.unet_path, unet_config)
snake_case_ : str = UNetaDModel(**unet_config)
image_unet.load_state_dict(converted_unet_ckpt)
# Get scheduler config
if "cd" in ckpt_name or "test" in ckpt_name:
snake_case_ : Union[str, Any] = CD_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "imagenet64" in ckpt_name:
snake_case_ : List[Any] = CT_IMAGENET_64_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
snake_case_ : List[str] = CT_LSUN_256_SCHEDULER_CONFIG
else:
raise ValueError(F'''Checkpoint type {ckpt_name} is not currently supported.''')
snake_case_ : Optional[int] = CMStochasticIterativeScheduler(**scheduler_config)
snake_case_ : Optional[Any] = ConsistencyModelPipeline(unet=image_unet, scheduler=cm_scheduler)
consistency_model.save_pretrained(args.dump_path)
| 710
|
'''simple docstring'''
from queue import Queue
from typing import TYPE_CHECKING, Optional
if TYPE_CHECKING:
from ..models.auto import AutoTokenizer
class lowercase__ :
'''simple docstring'''
def UpperCAmelCase ( self , lowerCamelCase__ ):
'''simple docstring'''
raise NotImplementedError()
def UpperCAmelCase ( self ):
'''simple docstring'''
raise NotImplementedError()
class lowercase__ ( snake_case_ ):
'''simple docstring'''
def __init__( self , lowerCamelCase__ , lowerCamelCase__ = False , **lowerCamelCase__ ):
'''simple docstring'''
UpperCamelCase = tokenizer
UpperCamelCase = skip_prompt
UpperCamelCase = decode_kwargs
# variables used in the streaming process
UpperCamelCase = []
UpperCamelCase = 0
UpperCamelCase = True
def UpperCAmelCase ( self , lowerCamelCase__ ):
'''simple docstring'''
if len(value.shape ) > 1 and value.shape[0] > 1:
raise ValueError('''TextStreamer only supports batch size 1''' )
elif len(value.shape ) > 1:
UpperCamelCase = value[0]
if self.skip_prompt and self.next_tokens_are_prompt:
UpperCamelCase = False
return
# Add the new token to the cache and decodes the entire thing.
self.token_cache.extend(value.tolist() )
UpperCamelCase = self.tokenizer.decode(self.token_cache , **self.decode_kwargs )
# After the symbol for a new line, we flush the cache.
if text.endswith('''\n''' ):
UpperCamelCase = text[self.print_len :]
UpperCamelCase = []
UpperCamelCase = 0
# If the last token is a CJK character, we print the characters.
elif len(lowerCamelCase__ ) > 0 and self._is_chinese_char(ord(text[-1] ) ):
UpperCamelCase = text[self.print_len :]
self.print_len += len(lowerCamelCase__ )
# Otherwise, prints until the last space char (simple heuristic to avoid printing incomplete words,
# which may change with the subsequent token -- there are probably smarter ways to do this!)
else:
UpperCamelCase = text[self.print_len : text.rfind(''' ''' ) + 1]
self.print_len += len(lowerCamelCase__ )
self.on_finalized_text(lowerCamelCase__ )
def UpperCAmelCase ( self ):
'''simple docstring'''
if len(self.token_cache ) > 0:
UpperCamelCase = self.tokenizer.decode(self.token_cache , **self.decode_kwargs )
UpperCamelCase = text[self.print_len :]
UpperCamelCase = []
UpperCamelCase = 0
else:
UpperCamelCase = ''''''
UpperCamelCase = True
self.on_finalized_text(lowerCamelCase__ , stream_end=lowerCamelCase__ )
def UpperCAmelCase ( self , lowerCamelCase__ , lowerCamelCase__ = False ):
'''simple docstring'''
print(lowerCamelCase__ , flush=lowerCamelCase__ , end='''''' if not stream_end else None )
def UpperCAmelCase ( self , lowerCamelCase__ ):
'''simple docstring'''
if (
(cp >= 0x4_E_0_0 and cp <= 0x9_F_F_F)
or (cp >= 0x3_4_0_0 and cp <= 0x4_D_B_F) #
or (cp >= 0x2_0_0_0_0 and cp <= 0x2_A_6_D_F) #
or (cp >= 0x2_A_7_0_0 and cp <= 0x2_B_7_3_F) #
or (cp >= 0x2_B_7_4_0 and cp <= 0x2_B_8_1_F) #
or (cp >= 0x2_B_8_2_0 and cp <= 0x2_C_E_A_F) #
or (cp >= 0xF_9_0_0 and cp <= 0xF_A_F_F)
or (cp >= 0x2_F_8_0_0 and cp <= 0x2_F_A_1_F) #
): #
return True
return False
class lowercase__ ( snake_case_ ):
'''simple docstring'''
def __init__( self , lowerCamelCase__ , lowerCamelCase__ = False , lowerCamelCase__ = None , **lowerCamelCase__ ):
'''simple docstring'''
super().__init__(lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ )
UpperCamelCase = Queue()
UpperCamelCase = None
UpperCamelCase = timeout
def UpperCAmelCase ( self , lowerCamelCase__ , lowerCamelCase__ = False ):
'''simple docstring'''
self.text_queue.put(lowerCamelCase__ , timeout=self.timeout )
if stream_end:
self.text_queue.put(self.stop_signal , timeout=self.timeout )
def __iter__( self ):
'''simple docstring'''
return self
def UpperCAmelCase ( self ):
'''simple docstring'''
UpperCamelCase = self.text_queue.get(timeout=self.timeout )
if value == self.stop_signal:
raise StopIteration()
else:
return value
| 350
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.