code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
"""simple docstring"""
from __future__ import annotations
from collections import Counter
from random import random
class lowerCAmelCase :
'''simple docstring'''
def __init__( self :List[Any] ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase__ = {}
def lowerCamelCase__ ( self :int , lowerCamelCase_ :str ) -> None:
"""simple docstring"""
UpperCamelCase__ = {}
def lowerCamelCase__ ( self :Any , lowerCamelCase_ :str , lowerCamelCase_ :str , lowerCamelCase_ :float ) -> None:
"""simple docstring"""
if nodea not in self.connections:
self.add_node(lowerCamelCase_ )
if nodea not in self.connections:
self.add_node(lowerCamelCase_ )
UpperCamelCase__ = probability
def lowerCamelCase__ ( self :Optional[Any] ) -> list[str]:
"""simple docstring"""
return list(self.connections )
def lowerCamelCase__ ( self :Union[str, Any] , lowerCamelCase_ :str ) -> str:
"""simple docstring"""
UpperCamelCase__ = 0
UpperCamelCase__ = random()
for dest in self.connections[node]:
current_probability += self.connections[node][dest]
if current_probability > random_value:
return dest
return ""
def snake_case__ ( _snake_case : str , _snake_case : list[tuple[str, str, float]] , _snake_case : int ):
"""simple docstring"""
UpperCamelCase__ = MarkovChainGraphUndirectedUnweighted()
for nodea, nodea, probability in transitions:
graph.add_transition_probability(_snake_case , _snake_case , _snake_case )
UpperCamelCase__ = Counter(graph.get_nodes() )
UpperCamelCase__ = start
for _ in range(_snake_case ):
UpperCamelCase__ = graph.transition(_snake_case )
visited[node] += 1
return visited
if __name__ == "__main__":
import doctest
doctest.testmod()
| 516
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A : List[Any] = {
'configuration_lilt': ['LILT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LiltConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : int = [
'LILT_PRETRAINED_MODEL_ARCHIVE_LIST',
'LiltForQuestionAnswering',
'LiltForSequenceClassification',
'LiltForTokenClassification',
'LiltModel',
'LiltPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_lilt import LILT_PRETRAINED_CONFIG_ARCHIVE_MAP, LiltConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lilt import (
LILT_PRETRAINED_MODEL_ARCHIVE_LIST,
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
LiltPreTrainedModel,
)
else:
import sys
A : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 516
| 1
|
"""simple docstring"""
import warnings
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class lowerCAmelCase__ ( A_ ):
__a = ['''image_processor''', '''tokenizer''']
__a = '''FlavaImageProcessor'''
__a = ('''BertTokenizer''', '''BertTokenizerFast''')
def __init__( self : Union[str, Any] , _lowerCamelCase : str=None , _lowerCamelCase : Dict=None , **_lowerCamelCase : Optional[int] ):
_snake_case = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , _lowerCamelCase , )
_snake_case = kwargs.pop('''feature_extractor''' )
_snake_case = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(_lowerCamelCase , _lowerCamelCase )
_snake_case = self.image_processor
def __call__( self : List[str] , _lowerCamelCase : Optional[Any] = None , _lowerCamelCase : Any = None , _lowerCamelCase : Tuple = True , _lowerCamelCase : List[str] = False , _lowerCamelCase : int = False , _lowerCamelCase : List[Any] = None , _lowerCamelCase : str = 0 , _lowerCamelCase : int = None , _lowerCamelCase : Optional[int] = None , _lowerCamelCase : Tuple = None , _lowerCamelCase : Tuple = None , _lowerCamelCase : Dict = None , _lowerCamelCase : Optional[int] = False , _lowerCamelCase : List[Any] = False , _lowerCamelCase : str = False , _lowerCamelCase : List[Any] = False , _lowerCamelCase : str = True , _lowerCamelCase : Any = None , **_lowerCamelCase : Dict , ):
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
_snake_case = self.tokenizer(
text=_lowerCamelCase , add_special_tokens=_lowerCamelCase , padding=_lowerCamelCase , truncation=_lowerCamelCase , max_length=_lowerCamelCase , stride=_lowerCamelCase , pad_to_multiple_of=_lowerCamelCase , return_token_type_ids=_lowerCamelCase , return_attention_mask=_lowerCamelCase , return_overflowing_tokens=_lowerCamelCase , return_special_tokens_mask=_lowerCamelCase , return_offsets_mapping=_lowerCamelCase , return_length=_lowerCamelCase , verbose=_lowerCamelCase , return_tensors=_lowerCamelCase , **_lowerCamelCase , )
if images is not None:
_snake_case = self.image_processor(
_lowerCamelCase , return_image_mask=_lowerCamelCase , return_codebook_pixels=_lowerCamelCase , return_tensors=_lowerCamelCase , **_lowerCamelCase , )
if text is not None and images is not None:
encoding.update(_lowerCamelCase )
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**_lowerCamelCase ) , tensor_type=_lowerCamelCase )
def lowercase ( self : str , *_lowerCamelCase : Union[str, Any] , **_lowerCamelCase : Optional[int] ):
return self.tokenizer.batch_decode(*_lowerCamelCase , **_lowerCamelCase )
def lowercase ( self : Dict , *_lowerCamelCase : Tuple , **_lowerCamelCase : Tuple ):
return self.tokenizer.decode(*_lowerCamelCase , **_lowerCamelCase )
@property
def lowercase ( self : Optional[int] ):
_snake_case = self.tokenizer.model_input_names
_snake_case = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def lowercase ( self : List[str] ):
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , _lowerCamelCase , )
return self.image_processor_class
@property
def lowercase ( self : Any ):
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , _lowerCamelCase , )
return self.image_processor
| 708
|
"""simple docstring"""
import operator
def _UpperCAmelCase ( __lowerCamelCase : list , __lowerCamelCase : bool = False , __lowerCamelCase : list | None = None ) -> list:
_snake_case = operator.lt if reverse else operator.gt
_snake_case = solution or []
if not arr:
return solution
_snake_case = [arr.pop(0 )]
for i, item in enumerate(__lowerCamelCase ):
if _operator(__lowerCamelCase , sublist[-1] ):
sublist.append(__lowerCamelCase )
arr.pop(__lowerCamelCase )
# merging sublist into solution list
if not solution:
solution.extend(__lowerCamelCase )
else:
while sublist:
_snake_case = sublist.pop(0 )
for i, xx in enumerate(__lowerCamelCase ):
if not _operator(__lowerCamelCase , __lowerCamelCase ):
solution.insert(__lowerCamelCase , __lowerCamelCase )
break
else:
solution.append(__lowerCamelCase )
strand_sort(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
return solution
if __name__ == "__main__":
assert strand_sort([4, 3, 5, 1, 2]) == [1, 2, 3, 4, 5]
assert strand_sort([4, 3, 5, 1, 2], reverse=True) == [5, 4, 3, 2, 1]
| 430
| 0
|
"""simple docstring"""
import re
import string
import numpy as np
import datasets
UpperCAmelCase__ = '''
Returns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list.
'''
UpperCAmelCase__ = '''
Args:
predictions: List of predicted texts.
references: List of reference texts.
regexes_to_ignore: List, defaults to None. Regex expressions of characters to
ignore when calculating the exact matches. Note: these regexes are removed
from the input data before the changes based on the options below (e.g. ignore_case,
ignore_punctuation, ignore_numbers) are applied.
ignore_case: Boolean, defaults to False. If true, turns everything
to lowercase so that capitalization differences are ignored.
ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before
comparing predictions and references.
ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before
comparing predictions and references.
Returns:
exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive.
Examples:
>>> exact_match = datasets.load_metric("exact_match")
>>> refs = ["the cat", "theater", "YELLING", "agent007"]
>>> preds = ["cat?", "theater", "yelling", "agent"]
>>> results = exact_match.compute(references=refs, predictions=preds)
>>> print(round(results["exact_match"], 1))
25.0
>>> exact_match = datasets.load_metric("exact_match")
>>> refs = ["the cat", "theater", "YELLING", "agent007"]
>>> preds = ["cat?", "theater", "yelling", "agent"]
>>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell"], ignore_case=True, ignore_punctuation=True)
>>> print(round(results["exact_match"], 1))
50.0
>>> exact_match = datasets.load_metric("exact_match")
>>> refs = ["the cat", "theater", "YELLING", "agent007"]
>>> preds = ["cat?", "theater", "yelling", "agent"]
>>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True)
>>> print(round(results["exact_match"], 1))
75.0
>>> exact_match = datasets.load_metric("exact_match")
>>> refs = ["the cat", "theater", "YELLING", "agent007"]
>>> preds = ["cat?", "theater", "yelling", "agent"]
>>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True)
>>> print(round(results["exact_match"], 1))
100.0
>>> exact_match = datasets.load_metric("exact_match")
>>> refs = ["The cat sat on the mat.", "Theaters are great.", "It\'s like comparing oranges and apples."]
>>> preds = ["The cat sat on the mat?", "Theaters are great.", "It\'s like comparing apples and oranges."]
>>> results = exact_match.compute(references=refs, predictions=preds)
>>> print(round(results["exact_match"], 1))
33.3
'''
UpperCAmelCase__ = '''
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a ( datasets.Metric ):
def lowerCAmelCase_ ( self : Dict ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Value("""string""" , id="""sequence""" ),
} ) , reference_urls=[] , )
def lowerCAmelCase_ ( self : Union[str, Any] , __lowerCAmelCase : int , __lowerCAmelCase : Tuple , __lowerCAmelCase : Dict=None , __lowerCAmelCase : int=False , __lowerCAmelCase : List[str]=False , __lowerCAmelCase : Any=False , ):
if regexes_to_ignore is not None:
for s in regexes_to_ignore:
_UpperCAmelCase = np.array([re.sub(__a , """""" , __a ) for x in predictions] )
_UpperCAmelCase = np.array([re.sub(__a , """""" , __a ) for x in references] )
else:
_UpperCAmelCase = np.asarray(__a )
_UpperCAmelCase = np.asarray(__a )
if ignore_case:
_UpperCAmelCase = np.char.lower(__a )
_UpperCAmelCase = np.char.lower(__a )
if ignore_punctuation:
_UpperCAmelCase = string.punctuation.maketrans("""""" , """""" , string.punctuation )
_UpperCAmelCase = np.char.translate(__a , table=__a )
_UpperCAmelCase = np.char.translate(__a , table=__a )
if ignore_numbers:
_UpperCAmelCase = string.digits.maketrans("""""" , """""" , string.digits )
_UpperCAmelCase = np.char.translate(__a , table=__a )
_UpperCAmelCase = np.char.translate(__a , table=__a )
_UpperCAmelCase = predictions == references
return {"exact_match": np.mean(__a ) * 100}
| 277
|
"""simple docstring"""
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import TimesformerConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
TimesformerForVideoClassification,
TimesformerModel,
)
from transformers.models.timesformer.modeling_timesformer import TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class snake_case_ :
"""simple docstring"""
def __init__( self , __a , __a=13 , __a=10 , __a=3 , __a=2 , __a=2 , __a=True , __a=True , __a=32 , __a=5 , __a=4 , __a=37 , __a="gelu" , __a=0.1 , __a=0.1 , __a=10 , __a=0.02 , __a="divided_space_time" , __a=None , ):
"""simple docstring"""
A__ = parent
A__ = batch_size
A__ = image_size
A__ = num_channels
A__ = patch_size
A__ = num_frames
A__ = is_training
A__ = use_labels
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = intermediate_size
A__ = hidden_act
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = attention_type
A__ = initializer_range
A__ = scope
A__ = num_labels
# in TimeSformer, the number of spatial tokens equals num_frames * num_patches per frame + 1 CLS token
A__ = (image_size // patch_size) ** 2
A__ = (num_frames) * self.num_patches_per_frame + 1
def _UpperCAmelCase ( self ):
"""simple docstring"""
A__ = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
A__ = None
if self.use_labels:
A__ = ids_tensor([self.batch_size] , self.num_labels )
A__ = self.get_config()
return config, pixel_values, labels
def _UpperCAmelCase ( self ):
"""simple docstring"""
A__ = TimesformerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , attention_type=self.attention_type , )
A__ = self.num_labels
return config
def _UpperCAmelCase ( self , __a , __a , __a ):
"""simple docstring"""
A__ = TimesformerModel(config=__a )
model.to(__a )
model.eval()
A__ = model(__a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCAmelCase ( self , __a , __a , __a ):
"""simple docstring"""
A__ = TimesformerForVideoClassification(__a )
model.to(__a )
model.eval()
A__ = model(__a )
# verify the logits shape
A__ = torch.Size((self.batch_size, self.num_labels) )
self.parent.assertEqual(result.logits.shape , __a )
def _UpperCAmelCase ( self ):
"""simple docstring"""
A__ = self.prepare_config_and_inputs()
A__ , A__ , A__ = config_and_inputs
A__ = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class snake_case_ ( _lowerCamelCase , _lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_: Tuple = (TimesformerModel, TimesformerForVideoClassification) if is_torch_available() else ()
SCREAMING_SNAKE_CASE_: str = (
{"""feature-extraction""": TimesformerModel, """video-classification""": TimesformerForVideoClassification}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE_: List[Any] = False
SCREAMING_SNAKE_CASE_: Dict = False
SCREAMING_SNAKE_CASE_: Optional[int] = False
SCREAMING_SNAKE_CASE_: Any = False
def _UpperCAmelCase ( self ):
"""simple docstring"""
A__ = TimesformerModelTester(self )
A__ = ConfigTester(
self , config_class=__a , has_text_modality=__a , hidden_size=37 )
def _UpperCAmelCase ( self , __a , __a , __a=False ):
"""simple docstring"""
A__ = copy.deepcopy(__a )
if return_labels:
if model_class in get_values(__a ):
A__ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__a )
return inputs_dict
def _UpperCAmelCase ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='TimeSformer does not use inputs_embeds' )
def _UpperCAmelCase ( self ):
"""simple docstring"""
pass
def _UpperCAmelCase ( self ):
"""simple docstring"""
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ = model_class(__a )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
A__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__a , nn.Linear ) )
def _UpperCAmelCase ( self ):
"""simple docstring"""
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ = model_class(__a )
A__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A__ = [*signature.parameters.keys()]
A__ = ['pixel_values']
self.assertListEqual(arg_names[:1] , __a )
def _UpperCAmelCase ( self ):
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def _UpperCAmelCase ( self ):
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_video_classification(*__a )
@slow
def _UpperCAmelCase ( self ):
"""simple docstring"""
for model_name in TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ = TimesformerModel.from_pretrained(__a )
self.assertIsNotNone(__a )
def _UpperCAmelCase ( self ):
"""simple docstring"""
if not self.has_attentions:
pass
else:
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
A__ = True
for model_class in self.all_model_classes:
A__ = self.model_tester.seq_length
A__ = self.model_tester.num_frames
A__ = True
A__ = False
A__ = True
A__ = model_class(__a )
model.to(__a )
model.eval()
with torch.no_grad():
A__ = model(**self._prepare_for_class(__a , __a ) )
A__ = outputs.attentions
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
A__ = True
A__ = model_class(__a )
model.to(__a )
model.eval()
with torch.no_grad():
A__ = model(**self._prepare_for_class(__a , __a ) )
A__ = outputs.attentions
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
A__ = len(__a )
# Check attention is always last and order is fine
A__ = True
A__ = True
A__ = model_class(__a )
model.to(__a )
model.eval()
with torch.no_grad():
A__ = model(**self._prepare_for_class(__a , __a ) )
self.assertEqual(out_len + 1 , len(__a ) )
A__ = outputs.attentions
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
def _UpperCAmelCase ( self ):
"""simple docstring"""
def check_hidden_states_output(__a , __a , __a ):
A__ = model_class(__a )
model.to(__a )
model.eval()
with torch.no_grad():
A__ = model(**self._prepare_for_class(__a , __a ) )
A__ = outputs.hidden_states
A__ = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(__a ) , __a )
A__ = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ = True
check_hidden_states_output(__a , __a , __a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A__ = True
check_hidden_states_output(__a , __a , __a )
def __lowerCamelCase ( ):
A__ = hf_hub_download(
repo_id='hf-internal-testing/spaghetti-video' ,filename='eating_spaghetti.npy' ,repo_type='dataset' )
A__ = np.load(lowerCAmelCase__ )
return list(lowerCAmelCase__ )
@require_torch
@require_vision
class snake_case_ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _UpperCAmelCase ( self ):
"""simple docstring"""
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def _UpperCAmelCase ( self ):
"""simple docstring"""
A__ = TimesformerForVideoClassification.from_pretrained('facebook/timesformer-base-finetuned-k400' ).to(
__a )
A__ = self.default_image_processor
A__ = prepare_video()
A__ = image_processor(video[:8] , return_tensors='pt' ).to(__a )
# forward pass
with torch.no_grad():
A__ = model(**__a )
# verify the logits
A__ = torch.Size((1, 400) )
self.assertEqual(outputs.logits.shape , __a )
A__ = torch.tensor([-0.3016, -0.7713, -0.4205] ).to(__a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __a , atol=1E-4 ) )
| 260
| 0
|
'''simple docstring'''
from collections.abc import Generator
def lowercase ( ):
"""simple docstring"""
_A , _A : int = 0, 1
while True:
_A , _A : Optional[Any] = b, a + b
yield b
def lowercase ( lowerCAmelCase : int = 1000):
"""simple docstring"""
_A : Union[str, Any] = 1
_A : Dict = fibonacci_generator()
while len(str(next(lowercase__))) < n:
answer += 1
return answer + 1
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 719
|
'''simple docstring'''
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import KarrasVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class lowerCamelCase__ ( snake_case_ ):
"""simple docstring"""
__magic_name__ = 42
__magic_name__ = 42
def __init__( self , UpperCAmelCase__ , UpperCAmelCase__ ) -> Optional[int]:
super().__init__()
self.register_modules(unet=UpperCAmelCase__ , scheduler=UpperCAmelCase__ )
@torch.no_grad()
def __call__( self , UpperCAmelCase__ = 1 , UpperCAmelCase__ = 5_0 , UpperCAmelCase__ = None , UpperCAmelCase__ = "pil" , UpperCAmelCase__ = True , **UpperCAmelCase__ , ) -> Union[Tuple, ImagePipelineOutput]:
_A : List[Any] = self.unet.config.sample_size
_A : List[Any] = (batch_size, 3, img_size, img_size)
_A : Optional[int] = self.unet
# sample x_0 ~ N(0, sigma_0^2 * I)
_A : str = randn_tensor(UpperCAmelCase__ , generator=UpperCAmelCase__ , device=self.device ) * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(UpperCAmelCase__ )
for t in self.progress_bar(self.scheduler.timesteps ):
# here sigma_t == t_i from the paper
_A : Dict = self.scheduler.schedule[t]
_A : Optional[Any] = self.scheduler.schedule[t - 1] if t > 0 else 0
# 1. Select temporarily increased noise level sigma_hat
# 2. Add new noise to move from sample_i to sample_hat
_A , _A : Tuple = self.scheduler.add_noise_to_input(UpperCAmelCase__ , UpperCAmelCase__ , generator=UpperCAmelCase__ )
# 3. Predict the noise residual given the noise magnitude `sigma_hat`
# The model inputs and output are adjusted by following eq. (213) in [1].
_A : str = (sigma_hat / 2) * model((sample_hat + 1) / 2 , sigma_hat / 2 ).sample
# 4. Evaluate dx/dt at sigma_hat
# 5. Take Euler step from sigma to sigma_prev
_A : Tuple = self.scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
if sigma_prev != 0:
# 6. Apply 2nd order correction
# The model inputs and output are adjusted by following eq. (213) in [1].
_A : Tuple = (sigma_prev / 2) * model((step_output.prev_sample + 1) / 2 , sigma_prev / 2 ).sample
_A : List[str] = self.scheduler.step_correct(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , step_output.prev_sample , step_output['''derivative'''] , )
_A : Optional[int] = step_output.prev_sample
_A : List[str] = (sample / 2 + 0.5).clamp(0 , 1 )
_A : int = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
_A : int = self.numpy_to_pil(UpperCAmelCase__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=UpperCAmelCase__ )
| 417
| 0
|
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_barthez import BarthezTokenizer
else:
A__ : Tuple = None
A__ : List[Any] = logging.get_logger(__name__)
A__ : str = {'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''}
A__ : Union[str, Any] = {
'''vocab_file''': {
'''moussaKam/mbarthez''': '''https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model''',
'''moussaKam/barthez''': '''https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model''',
'''moussaKam/barthez-orangesum-title''': (
'''https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model'''
),
},
'''tokenizer_file''': {
'''moussaKam/mbarthez''': '''https://huggingface.co/moussaKam/mbarthez/resolve/main/tokenizer.json''',
'''moussaKam/barthez''': '''https://huggingface.co/moussaKam/barthez/resolve/main/tokenizer.json''',
'''moussaKam/barthez-orangesum-title''': (
'''https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/tokenizer.json'''
),
},
}
A__ : List[str] = {
'''moussaKam/mbarthez''': 1024,
'''moussaKam/barthez''': 1024,
'''moussaKam/barthez-orangesum-title''': 1024,
}
A__ : str = '''▁'''
class UpperCAmelCase_ (_UpperCAmelCase ):
"""simple docstring"""
lowerCamelCase : Any = VOCAB_FILES_NAMES
lowerCamelCase : int = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : int = ['input_ids', 'attention_mask']
lowerCamelCase : Union[str, Any] = BarthezTokenizer
def __init__( self , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_="<s>" , SCREAMING_SNAKE_CASE_="</s>" , SCREAMING_SNAKE_CASE_="</s>" , SCREAMING_SNAKE_CASE_="<s>" , SCREAMING_SNAKE_CASE_="<unk>" , SCREAMING_SNAKE_CASE_="<pad>" , SCREAMING_SNAKE_CASE_="<mask>" , **SCREAMING_SNAKE_CASE_ , ) -> Dict:
# Mask token behave like a normal word, i.e. include the space before it
__lowerCamelCase : str = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else mask_token
super().__init__(
SCREAMING_SNAKE_CASE_ , tokenizer_file=SCREAMING_SNAKE_CASE_ , bos_token=SCREAMING_SNAKE_CASE_ , eos_token=SCREAMING_SNAKE_CASE_ , unk_token=SCREAMING_SNAKE_CASE_ , sep_token=SCREAMING_SNAKE_CASE_ , cls_token=SCREAMING_SNAKE_CASE_ , pad_token=SCREAMING_SNAKE_CASE_ , mask_token=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
__lowerCamelCase : List[Any] = vocab_file
__lowerCamelCase : str = False if not self.vocab_file else True
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ) -> Dict:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__lowerCamelCase : List[Any] = [self.cls_token_id]
__lowerCamelCase : Dict = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ) -> Optional[int]:
__lowerCamelCase : Tuple = [self.sep_token_id]
__lowerCamelCase : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ) -> List[Any]:
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(SCREAMING_SNAKE_CASE_ ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
__lowerCamelCase : int = os.path.join(
SCREAMING_SNAKE_CASE_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE_ ):
copyfile(self.vocab_file , SCREAMING_SNAKE_CASE_ )
return (out_vocab_file,)
| 13
|
"""simple docstring"""
__A : Optional[int] = '''
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
__A : str = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
__A : str = {
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 499
| 0
|
'''simple docstring'''
from math import factorial, pi
def __UpperCAmelCase ( a_: float, a_: int = 30 ):
if not isinstance(a_, (int, float) ):
raise ValueError("maclaurin_sin() requires either an int or float for theta" )
if not isinstance(a_, a_ ) or accuracy <= 0:
raise ValueError("maclaurin_sin() requires a positive int for accuracy" )
_UpperCAmelCase : Dict = float(a_ )
_UpperCAmelCase : Union[str, Any] = theta // (2 * pi)
theta -= 2 * div * pi
return sum(
(-1) ** r * theta ** (2 * r + 1) / factorial(2 * r + 1 ) for r in range(a_ ) )
def __UpperCAmelCase ( a_: float, a_: int = 30 ):
if not isinstance(a_, (int, float) ):
raise ValueError("maclaurin_cos() requires either an int or float for theta" )
if not isinstance(a_, a_ ) or accuracy <= 0:
raise ValueError("maclaurin_cos() requires a positive int for accuracy" )
_UpperCAmelCase : Any = float(a_ )
_UpperCAmelCase : List[Any] = theta // (2 * pi)
theta -= 2 * div * pi
return sum((-1) ** r * theta ** (2 * r) / factorial(2 * r ) for r in range(a_ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(maclaurin_sin(10))
print(maclaurin_sin(-10))
print(maclaurin_sin(10, 15))
print(maclaurin_sin(-10, 15))
print(maclaurin_cos(5))
print(maclaurin_cos(-5))
print(maclaurin_cos(10, 15))
print(maclaurin_cos(-10, 15))
| 257
|
'''simple docstring'''
import collections
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__a = logging.get_logger(__name__)
__a = '▁'
__a = {'vocab_file': 'prophetnet.tokenizer'}
__a = {
'vocab_file': {
'microsoft/xprophetnet-large-wiki100-cased': (
'https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/prophetnet.tokenizer'
),
}
}
__a = {
'microsoft/xprophetnet-large-wiki100-cased': {'do_lower_case': False},
}
__a = {
'microsoft/xprophetnet-large-wiki100-cased': 512,
}
def __UpperCAmelCase ( a_: Optional[Any] ):
_UpperCAmelCase : List[str] = collections.OrderedDict()
with open(a_, "r", encoding="utf-8" ) as reader:
_UpperCAmelCase : List[str] = reader.readlines()
for index, token in enumerate(a_ ):
_UpperCAmelCase : int = token.rstrip("\n" )
_UpperCAmelCase : Optional[int] = index
return vocab
class A__ ( UpperCamelCase ):
"""simple docstring"""
UpperCamelCase_ : Tuple = VOCAB_FILES_NAMES
UpperCamelCase_ : int = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ : Dict = ['''input_ids''', '''attention_mask''']
def __init__( self : int , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : str="[SEP]" , lowerCAmelCase__ : str="[SEP]" , lowerCAmelCase__ : Optional[int]="[SEP]" , lowerCAmelCase__ : int="[UNK]" , lowerCAmelCase__ : List[Any]="[PAD]" , lowerCAmelCase__ : str="[CLS]" , lowerCAmelCase__ : int="[MASK]" , lowerCAmelCase__ : Optional[Dict[str, Any]] = None , **lowerCAmelCase__ : Union[str, Any] , ) -> None:
"""simple docstring"""
_UpperCAmelCase : Optional[int] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , sp_model_kwargs=self.sp_model_kwargs , **lowerCAmelCase__ , )
try:
import sentencepiece as spm
except ImportError:
logger.warning(
"You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece"
" pip install sentencepiece" )
raise
_UpperCAmelCase : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(lowerCAmelCase__ ) )
_UpperCAmelCase : Any = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# put special tokens and [unused] tokens into the vocab
_UpperCAmelCase : Optional[int] = {"[PAD]": 0, "[CLS]": 1, "[SEP]": 2, "[UNK]": 3, "[MASK]": 4}
for i in range(1_0 ):
_UpperCAmelCase : int = F"""[unused{i}]"""
_UpperCAmelCase : Optional[int] = 5 + i
# The first "real" token "," has position 15 in the embedding vocab and position 3 in the spm vocab
_UpperCAmelCase : str = 1_2
_UpperCAmelCase : str = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
for k in self.fairseq_tokens_to_ids.keys():
self.unique_no_split_tokens.append(lowerCAmelCase__ )
def __getstate__( self : List[Any] ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase : Optional[Any] = self.__dict__.copy()
_UpperCAmelCase : Dict = None
return state
def __setstate__( self : List[str] , lowerCAmelCase__ : Union[str, Any] ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase : Optional[int] = d
try:
import sentencepiece as spm
except ImportError:
logger.warning(
"You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece"
" pip install sentencepiece" )
raise
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
_UpperCAmelCase : int = {}
_UpperCAmelCase : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _lowerCAmelCase ( self : List[str] , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : Optional[List[int]] = None , lowerCAmelCase__ : bool = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase__ , token_ids_a=lowerCAmelCase__ , already_has_special_tokens=lowerCAmelCase__ )
if token_ids_a is None:
return ([0] * len(lowerCAmelCase__ )) + [1]
return ([0] * len(lowerCAmelCase__ )) + [1] + ([0] * len(lowerCAmelCase__ )) + [1]
def _lowerCAmelCase ( self : Union[str, Any] , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
_UpperCAmelCase : int = [self.sep_token_id]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0]
return len(token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def _lowerCAmelCase ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
return len(self.sp_model ) + self.fairseq_offset
def _lowerCAmelCase ( self : Optional[int] ) -> Dict:
"""simple docstring"""
_UpperCAmelCase : Any = {self.convert_ids_to_tokens(lowerCAmelCase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _lowerCAmelCase ( self : Any , lowerCAmelCase__ : str ) -> str:
"""simple docstring"""
return self.sp_model.encode(lowerCAmelCase__ , out_type=lowerCAmelCase__ )
def _lowerCAmelCase ( self : Optional[int] , lowerCAmelCase__ : Dict ) -> Tuple:
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
_UpperCAmelCase : Optional[int] = self.sp_model.PieceToId(lowerCAmelCase__ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def _lowerCAmelCase ( self : List[Any] , lowerCAmelCase__ : Dict ) -> Optional[int]:
"""simple docstring"""
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def _lowerCAmelCase ( self : Optional[int] , lowerCAmelCase__ : Union[str, Any] ) -> str:
"""simple docstring"""
_UpperCAmelCase : List[Any] = "".join(lowerCAmelCase__ ).replace(lowerCAmelCase__ , " " ).strip()
return out_string
def _lowerCAmelCase ( self : Optional[Any] , lowerCAmelCase__ : str , lowerCAmelCase__ : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(lowerCAmelCase__ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
_UpperCAmelCase : Optional[Any] = os.path.join(
lowerCAmelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowerCAmelCase__ )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCAmelCase__ , "wb" ) as fi:
_UpperCAmelCase : Optional[Any] = self.sp_model.serialized_model_proto()
fi.write(lowerCAmelCase__ )
return (out_vocab_file,)
def _lowerCAmelCase ( self : str , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return token_ids_a + [self.sep_token_id]
_UpperCAmelCase : List[Any] = [self.sep_token_id]
return token_ids_a + sep + token_ids_a + sep
| 257
| 1
|
"""simple docstring"""
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, Features, Value
from .base import TaskTemplate
@dataclass(frozen=_SCREAMING_SNAKE_CASE )
class SCREAMING_SNAKE_CASE__ ( _SCREAMING_SNAKE_CASE ):
snake_case = field(default="automatic-speech-recognition" , metadata={"include_in_asdict_even_if_is_default": True} )
snake_case = Features({"audio": Audio()} )
snake_case = Features({"transcription": Value("string" )} )
snake_case = "audio"
snake_case = "transcription"
def __UpperCAmelCase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Any ):
if self.audio_column not in features:
raise ValueError(f"""Column {self.audio_column} is not present in features.""" )
if not isinstance(features[self.audio_column] , SCREAMING_SNAKE_CASE_ ):
raise ValueError(f"""Column {self.audio_column} is not an Audio type.""" )
lowerCamelCase__ = copy.deepcopy(self )
lowerCamelCase__ = self.input_schema.copy()
lowerCamelCase__ = features[self.audio_column]
lowerCamelCase__ = input_schema
return task_template
@property
def __UpperCAmelCase ( self : Optional[Any] ):
return {self.audio_column: "audio", self.transcription_column: "transcription"}
| 129
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_camembert import CamembertTokenizer
else:
__magic_name__ = None
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {"""vocab_file""": """sentencepiece.bpe.model""", """tokenizer_file""": """tokenizer.json"""}
__magic_name__ = {
"""vocab_file""": {
"""camembert-base""": """https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model""",
},
"""tokenizer_file""": {
"""camembert-base""": """https://huggingface.co/camembert-base/resolve/main/tokenizer.json""",
},
}
__magic_name__ = {
"""camembert-base""": 5_12,
}
__magic_name__ = """▁"""
class SCREAMING_SNAKE_CASE__ ( _SCREAMING_SNAKE_CASE ):
snake_case = VOCAB_FILES_NAMES
snake_case = PRETRAINED_VOCAB_FILES_MAP
snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case = ["input_ids", "attention_mask"]
snake_case = CamembertTokenizer
def __init__( self : str , SCREAMING_SNAKE_CASE_ : Optional[int]=None , SCREAMING_SNAKE_CASE_ : Optional[Any]=None , SCREAMING_SNAKE_CASE_ : Any="<s>" , SCREAMING_SNAKE_CASE_ : Tuple="</s>" , SCREAMING_SNAKE_CASE_ : Tuple="</s>" , SCREAMING_SNAKE_CASE_ : str="<s>" , SCREAMING_SNAKE_CASE_ : Optional[int]="<unk>" , SCREAMING_SNAKE_CASE_ : str="<pad>" , SCREAMING_SNAKE_CASE_ : List[str]="<mask>" , SCREAMING_SNAKE_CASE_ : Tuple=["<s>NOTUSED", "</s>NOTUSED"] , **SCREAMING_SNAKE_CASE_ : Optional[Any] , ):
# Mask token behave like a normal word, i.e. include the space before it
lowerCamelCase__ = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else mask_token
super().__init__(
SCREAMING_SNAKE_CASE_ , tokenizer_file=SCREAMING_SNAKE_CASE_ , bos_token=SCREAMING_SNAKE_CASE_ , eos_token=SCREAMING_SNAKE_CASE_ , sep_token=SCREAMING_SNAKE_CASE_ , cls_token=SCREAMING_SNAKE_CASE_ , unk_token=SCREAMING_SNAKE_CASE_ , pad_token=SCREAMING_SNAKE_CASE_ , mask_token=SCREAMING_SNAKE_CASE_ , additional_special_tokens=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
lowerCamelCase__ = vocab_file
lowerCamelCase__ = False if not self.vocab_file else True
def __UpperCAmelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : List[int] , SCREAMING_SNAKE_CASE_ : Optional[List[int]] = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowerCamelCase__ = [self.cls_token_id]
lowerCamelCase__ = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __UpperCAmelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : List[int] , SCREAMING_SNAKE_CASE_ : Optional[List[int]] = None ):
lowerCamelCase__ = [self.sep_token_id]
lowerCamelCase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __UpperCAmelCase ( self : Dict , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[str] = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(SCREAMING_SNAKE_CASE_ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowerCamelCase__ = os.path.join(
SCREAMING_SNAKE_CASE_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE_ ):
copyfile(self.vocab_file , SCREAMING_SNAKE_CASE_ )
return (out_vocab_file,)
| 129
| 1
|
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__SCREAMING_SNAKE_CASE : str ='''▁'''
__SCREAMING_SNAKE_CASE : Union[str, Any] ={'''vocab_file''': '''spiece.model'''}
__SCREAMING_SNAKE_CASE : str ={
'''vocab_file''': {'''google/pegasus-xsum''': '''https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model'''}
}
__SCREAMING_SNAKE_CASE : Optional[int] ={
'''google/pegasus-xsum''': 512,
}
__SCREAMING_SNAKE_CASE : Tuple =logging.get_logger(__name__)
class A_ ( __a ):
_A :List[str] = VOCAB_FILES_NAMES
_A :Tuple = VOCAB_FILES_NAMES
_A :Any = PRETRAINED_VOCAB_FILES_MAP
_A :List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_A :Dict = ['''input_ids''', '''attention_mask''']
def __init__( self : str , snake_case__ : int , snake_case__ : str="<pad>" , snake_case__ : str="</s>" , snake_case__ : Dict="<unk>" , snake_case__ : int="<mask_2>" , snake_case__ : Union[str, Any]="<mask_1>" , snake_case__ : str=None , snake_case__ : List[str]=1_03 , snake_case__ : Optional[Dict[str, Any]] = None , **snake_case__ : List[str] , ):
lowercase = offset
if additional_special_tokens is not None:
if not isinstance(snake_case__ , snake_case__ ):
raise TypeError(
F"""additional_special_tokens should be of type {type(snake_case__ )}, but is"""
F""" {type(snake_case__ )}""" )
lowercase = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
F"""<unk_{i}>""" for i in range(len(snake_case__ ) , self.offset - 1 )
]
if len(set(snake_case__ ) ) != len(snake_case__ ):
raise ValueError(
"""Please make sure that the provided additional_special_tokens do not contain an incorrectly"""
F""" shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.""" )
lowercase = additional_special_tokens_extended
else:
lowercase = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [F"""<unk_{i}>""" for i in range(2 , self.offset )]
lowercase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=snake_case__ , unk_token=snake_case__ , mask_token=snake_case__ , pad_token=snake_case__ , mask_token_sent=snake_case__ , offset=snake_case__ , additional_special_tokens=snake_case__ , sp_model_kwargs=self.sp_model_kwargs , **snake_case__ , )
lowercase = mask_token_sent
lowercase = vocab_file
lowercase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(snake_case__ )
# add special tokens to encoder dict
lowercase = {
0: self.pad_token,
1: self.eos_token,
}
if self.mask_token_sent is not None:
self.encoder.update(
{
2: self.mask_token_sent,
3: self.mask_token,
} )
if self.offset > 0:
# entries 2-104 are only used for pretraining and called <mask_1>, <mask_2>, unk_2, ...unk_102
# mask_token_sent is already added to list -> so start at 1
self.encoder.update({i + 3: additional_special_tokens[i] for i in range(1 , self.offset - 1 )} )
lowercase = {v: k for k, v in self.encoder.items()}
@property
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
return len(self.sp_model ) + self.offset
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
lowercase = {self.convert_ids_to_tokens(snake_case__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Optional[int] ):
lowercase = self.__dict__.copy()
lowercase = None
return state
def __setstate__( self : List[Any] , snake_case__ : Dict ):
lowercase = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
lowercase = {}
lowercase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def SCREAMING_SNAKE_CASE__ ( self : Any , snake_case__ : str ):
return self.sp_model.encode(snake_case__ , out_type=snake_case__ )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , snake_case__ : str ):
if token in self.decoder:
return self.decoder[token]
elif token in self.added_tokens_decoder:
return self.added_tokens_decoder[token]
lowercase = self.sp_model.piece_to_id(snake_case__ )
return sp_id + self.offset
def SCREAMING_SNAKE_CASE__ ( self : Dict , snake_case__ : int ):
if index in self.encoder:
return self.encoder[index]
elif index in self.added_tokens_encoder:
return self.added_tokens_encoder[index]
else:
lowercase = self.sp_model.IdToPiece(index - self.offset )
return token
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , snake_case__ : Optional[int] ):
lowercase = []
lowercase = """"""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(snake_case__ ) + token
lowercase = []
else:
current_sub_tokens.append(snake_case__ )
out_string += self.sp_model.decode(snake_case__ )
return out_string.strip()
def SCREAMING_SNAKE_CASE__ ( self : int , snake_case__ : str=False ):
return 1
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , snake_case__ : Optional[int] ):
lowercase = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
return [1 if x in all_special_ids else 0 for x in seq]
def SCREAMING_SNAKE_CASE__ ( self : str , snake_case__ : List , snake_case__ : Optional[List] = None , snake_case__ : bool = False ):
if already_has_special_tokens:
return self._special_token_mask(snake_case__ )
elif token_ids_a is None:
return self._special_token_mask(snake_case__ ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def SCREAMING_SNAKE_CASE__ ( self : Tuple , snake_case__ : int , snake_case__ : str=None ):
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def SCREAMING_SNAKE_CASE__ ( self : Any , snake_case__ : str , snake_case__ : Optional[str] = None ):
if not os.path.isdir(snake_case__ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowercase = os.path.join(
snake_case__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , snake_case__ )
elif not os.path.isfile(self.vocab_file ):
with open(snake_case__ , """wb""" ) as fi:
lowercase = self.sp_model.serialized_model_proto()
fi.write(snake_case__ )
return (out_vocab_file,)
| 72
|
import argparse
import torch
from transformers import (
SpeechTaConfig,
SpeechTaFeatureExtractor,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaProcessor,
SpeechTaTokenizer,
logging,
)
from transformers.tokenization_utils import AddedToken
logging.set_verbosity_info()
__SCREAMING_SNAKE_CASE : Any =logging.get_logger('''transformers.models.speecht5''')
__SCREAMING_SNAKE_CASE : Optional[Any] ={
'''speech_encoder_prenet.layer_norm''': '''speecht5.encoder.prenet.feature_projection.layer_norm''',
'''speech_encoder_prenet.post_extract_proj''': '''speecht5.encoder.prenet.feature_projection.projection''',
'''speech_encoder_prenet.pos_conv.0''': '''speecht5.encoder.prenet.pos_conv_embed.conv''',
'''speech_encoder_prenet.mask_emb''': '''speecht5.encoder.prenet.masked_spec_embed''',
}
__SCREAMING_SNAKE_CASE : Union[str, Any] ={
'''text_encoder_prenet.encoder_prenet.0''': '''speecht5.encoder.prenet.embed_tokens''',
'''text_encoder_prenet.encoder_prenet.1.alpha''': '''speecht5.encoder.prenet.encode_positions.alpha''',
}
__SCREAMING_SNAKE_CASE : Optional[int] ={
'''speech_decoder_prenet.decoder_prenet.0.0.prenet.0.0''': '''speecht5.decoder.prenet.layers.0''',
'''speech_decoder_prenet.decoder_prenet.0.0.prenet.1.0''': '''speecht5.decoder.prenet.layers.1''',
'''speech_decoder_prenet.decoder_prenet.0.1''': '''speecht5.decoder.prenet.final_layer''',
'''speech_decoder_prenet.decoder_prenet.1.alpha''': '''speecht5.decoder.prenet.encode_positions.alpha''',
'''speech_decoder_prenet.spkembs_layer.0''': '''speecht5.decoder.prenet.speaker_embeds_layer''',
}
__SCREAMING_SNAKE_CASE : List[Any] ={
'''speech_decoder_postnet.feat_out''': '''speech_decoder_postnet.feat_out''',
'''speech_decoder_postnet.prob_out''': '''speech_decoder_postnet.prob_out''',
'''speech_decoder_postnet.postnet.postnet.0.0''': '''speech_decoder_postnet.layers.0.conv''',
'''speech_decoder_postnet.postnet.postnet.0.1''': '''speech_decoder_postnet.layers.0.batch_norm''',
'''speech_decoder_postnet.postnet.postnet.1.0''': '''speech_decoder_postnet.layers.1.conv''',
'''speech_decoder_postnet.postnet.postnet.1.1''': '''speech_decoder_postnet.layers.1.batch_norm''',
'''speech_decoder_postnet.postnet.postnet.2.0''': '''speech_decoder_postnet.layers.2.conv''',
'''speech_decoder_postnet.postnet.postnet.2.1''': '''speech_decoder_postnet.layers.2.batch_norm''',
'''speech_decoder_postnet.postnet.postnet.3.0''': '''speech_decoder_postnet.layers.3.conv''',
'''speech_decoder_postnet.postnet.postnet.3.1''': '''speech_decoder_postnet.layers.3.batch_norm''',
'''speech_decoder_postnet.postnet.postnet.4.0''': '''speech_decoder_postnet.layers.4.conv''',
'''speech_decoder_postnet.postnet.postnet.4.1''': '''speech_decoder_postnet.layers.4.batch_norm''',
}
__SCREAMING_SNAKE_CASE : List[Any] ={
'''text_decoder_prenet.embed_tokens''': '''speecht5.decoder.prenet.embed_tokens''',
}
__SCREAMING_SNAKE_CASE : Optional[Any] ={
'''text_decoder_postnet.output_projection''': '''text_decoder_postnet.lm_head''',
}
__SCREAMING_SNAKE_CASE : Optional[int] ={
'''encoder.layers.*.self_attn.k_proj''': '''speecht5.encoder.wrapped_encoder.layers.*.attention.k_proj''',
'''encoder.layers.*.self_attn.v_proj''': '''speecht5.encoder.wrapped_encoder.layers.*.attention.v_proj''',
'''encoder.layers.*.self_attn.q_proj''': '''speecht5.encoder.wrapped_encoder.layers.*.attention.q_proj''',
'''encoder.layers.*.self_attn.out_proj''': '''speecht5.encoder.wrapped_encoder.layers.*.attention.out_proj''',
'''encoder.layers.*.self_attn_layer_norm''': '''speecht5.encoder.wrapped_encoder.layers.*.layer_norm''',
'''encoder.layers.*.fc1''': '''speecht5.encoder.wrapped_encoder.layers.*.feed_forward.intermediate_dense''',
'''encoder.layers.*.fc2''': '''speecht5.encoder.wrapped_encoder.layers.*.feed_forward.output_dense''',
'''encoder.layers.*.final_layer_norm''': '''speecht5.encoder.wrapped_encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''speecht5.encoder.wrapped_encoder.layer_norm''',
'''encoder.pos_emb.pe_k''': '''speecht5.encoder.wrapped_encoder.embed_positions.pe_k''',
}
__SCREAMING_SNAKE_CASE : List[Any] ={
'''decoder.layers.*.self_attn.k_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn.k_proj''',
'''decoder.layers.*.self_attn.v_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn.v_proj''',
'''decoder.layers.*.self_attn.q_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn.q_proj''',
'''decoder.layers.*.self_attn.out_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn.out_proj''',
'''decoder.layers.*.self_attn_layer_norm''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn_layer_norm''',
'''decoder.layers.*.encoder_attn.k_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.k_proj''',
'''decoder.layers.*.encoder_attn.v_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.v_proj''',
'''decoder.layers.*.encoder_attn.q_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.q_proj''',
'''decoder.layers.*.encoder_attn.out_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.out_proj''',
'''decoder.layers.*.encoder_attn_layer_norm''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn_layer_norm''',
'''decoder.layers.*.fc1''': '''speecht5.decoder.wrapped_decoder.layers.*.feed_forward.intermediate_dense''',
'''decoder.layers.*.fc2''': '''speecht5.decoder.wrapped_decoder.layers.*.feed_forward.output_dense''',
'''decoder.layers.*.final_layer_norm''': '''speecht5.decoder.wrapped_decoder.layers.*.final_layer_norm''',
}
__SCREAMING_SNAKE_CASE : List[Any] ={
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_TEXT_DECODER_PRENET,
**MAPPING_TEXT_DECODER_POSTNET,
}
__SCREAMING_SNAKE_CASE : List[str] ={
**MAPPING_TEXT_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
__SCREAMING_SNAKE_CASE : Optional[int] ={
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
__SCREAMING_SNAKE_CASE : Dict =[]
__SCREAMING_SNAKE_CASE : List[str] =[
'''encoder.version''',
'''encoder.layers.*.norm_k.weight''',
'''encoder.layers.*.norm_k.bias''',
'''decoder.version''',
'''decoder.layers.*.norm_k.weight''',
'''decoder.layers.*.norm_k.bias''',
'''decoder.pos_emb.pe_k''',
'''speech_encoder_prenet.embed_positions._float_tensor''',
'''text_decoder_prenet.embed_positions._float_tensor''',
]
__SCREAMING_SNAKE_CASE : List[str] =IGNORE_KEYS + [
'''encoder.proj''',
'''text_encoder_prenet.*''',
'''speech_decoder_prenet.*''',
'''speech_decoder_postnet.*''',
]
__SCREAMING_SNAKE_CASE : Any =IGNORE_KEYS + [
'''encoder.proj''',
'''speech_encoder_prenet.*''',
'''text_decoder_prenet.*''',
'''text_decoder_postnet.*''',
]
__SCREAMING_SNAKE_CASE : Any =IGNORE_KEYS + [
'''encoder.proj''',
'''text_encoder_prenet.*''',
'''text_decoder_prenet.*''',
'''text_decoder_postnet.*''',
]
def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ):
for attribute in key.split(""".""" ):
lowercase = getattr(lowerCAmelCase__ ,lowerCAmelCase__ )
if weight_type is not None:
lowercase = getattr(lowerCAmelCase__ ,lowerCAmelCase__ ).shape
else:
lowercase = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}""" )
if weight_type == "weight":
lowercase = value
elif weight_type == "weight_g":
lowercase = value
elif weight_type == "weight_v":
lowercase = value
elif weight_type == "bias":
lowercase = value
elif weight_type == "running_mean":
lowercase = value
elif weight_type == "running_var":
lowercase = value
elif weight_type == "num_batches_tracked":
lowercase = value
else:
lowercase = value
logger.info(f"""{key + ("." + weight_type if weight_type is not None else "")} was initialized from {full_name}.""" )
def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ):
for key in ignore_keys:
if key.endswith(""".*""" ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
lowercase , lowercase = key.split(""".*.""" )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ):
lowercase = []
if task == "s2t":
lowercase = hf_model.speechta.encoder.prenet.feature_encoder
lowercase = MAPPING_S2T
lowercase = IGNORE_KEYS_S2T
elif task == "t2s":
lowercase = None
lowercase = MAPPING_T2S
lowercase = IGNORE_KEYS_T2S
elif task == "s2s":
lowercase = hf_model.speechta.encoder.prenet.feature_encoder
lowercase = MAPPING_S2S
lowercase = IGNORE_KEYS_S2S
else:
raise ValueError(f"""Unsupported task: {task}""" )
for name, value in fairseq_dict.items():
if should_ignore(lowerCAmelCase__ ,lowerCAmelCase__ ):
logger.info(f"""{name} was ignored""" )
continue
lowercase = False
if "conv_layers" in name:
load_conv_layer(
lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,hf_model.config.feat_extract_norm == """group""" ,)
lowercase = True
else:
for key, mapped_key in MAPPING.items():
# mapped_key = "speecht5." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if "*" in key:
lowercase , lowercase = key.split(""".*.""" )
if prefix in name and suffix in name:
lowercase = suffix
# if key in name or key.split("w2v_model.")[-1] == name.split(".")[0]:
if key in name:
lowercase = True
if "*" in mapped_key:
lowercase = name.split(lowerCAmelCase__ )[0].split(""".""" )[-2]
lowercase = mapped_key.replace("""*""" ,lowerCAmelCase__ )
if "weight_g" in name:
lowercase = """weight_g"""
elif "weight_v" in name:
lowercase = """weight_v"""
elif "bias" in name:
lowercase = """bias"""
elif "weight" in name:
lowercase = """weight"""
elif "running_mean" in name:
lowercase = """running_mean"""
elif "running_var" in name:
lowercase = """running_var"""
elif "num_batches_tracked" in name:
lowercase = """num_batches_tracked"""
else:
lowercase = None
set_recursively(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ )
continue
if not is_used:
unused_weights.append(lowerCAmelCase__ )
logger.warning(f"""Unused weights: {unused_weights}""" )
def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ):
lowercase = full_name.split("""conv_layers.""" )[-1]
lowercase = name.split(""".""" )
lowercase = int(items[0] )
lowercase = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
lowercase = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
lowercase = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" )
lowercase = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" )
lowercase = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(lowerCAmelCase__ )
@torch.no_grad()
def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__=None ,lowerCAmelCase__=None ,lowerCAmelCase__=None ,):
if config_path is not None:
lowercase = SpeechTaConfig.from_pretrained(lowerCAmelCase__ )
else:
lowercase = SpeechTaConfig()
if task == "s2t":
lowercase = config.max_text_positions
lowercase = SpeechTaForSpeechToText(lowerCAmelCase__ )
elif task == "t2s":
lowercase = 1_876
lowercase = 600
lowercase = config.max_speech_positions
lowercase = SpeechTaForTextToSpeech(lowerCAmelCase__ )
elif task == "s2s":
lowercase = 1_876
lowercase = config.max_speech_positions
lowercase = SpeechTaForSpeechToSpeech(lowerCAmelCase__ )
else:
raise ValueError(f"""Unknown task name: {task}""" )
if vocab_path:
lowercase = SpeechTaTokenizer(lowerCAmelCase__ ,model_max_length=config.max_text_positions )
# Mask token behaves like a normal word, i.e. include the space before it
lowercase = AddedToken("""<mask>""" ,lstrip=lowerCAmelCase__ ,rstrip=lowerCAmelCase__ )
lowercase = mask_token
tokenizer.add_special_tokens({"""mask_token""": mask_token} )
tokenizer.add_tokens(["""<ctc_blank>"""] )
lowercase = SpeechTaFeatureExtractor()
lowercase = SpeechTaProcessor(tokenizer=lowerCAmelCase__ ,feature_extractor=lowerCAmelCase__ )
processor.save_pretrained(lowerCAmelCase__ )
lowercase = torch.load(lowerCAmelCase__ )
recursively_load_weights(fairseq_checkpoint["""model"""] ,lowerCAmelCase__ ,lowerCAmelCase__ )
model.save_pretrained(lowerCAmelCase__ )
if repo_id:
print("""Pushing to the hub...""" )
processor.push_to_hub(lowerCAmelCase__ )
model.push_to_hub(lowerCAmelCase__ )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : Optional[Any] =argparse.ArgumentParser()
parser.add_argument(
'''--task''',
default='''s2t''',
type=str,
help='''Type of the SpeechT5 model you\'d like to convert. Should be one of \'s2t\', \'t2s\', \'s2s\'.''',
)
parser.add_argument('''--checkpoint_path''', required=True, default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--vocab_path''', default=None, type=str, help='''Path to SentencePiece model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, default=None, type=str, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.'''
)
__SCREAMING_SNAKE_CASE : Optional[Any] =parser.parse_args()
convert_speechta_checkpoint(
args.task,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.vocab_path,
args.push_to_hub,
)
| 72
| 1
|
import argparse
import json
import subprocess
def __A(lowerCAmelCase , lowerCAmelCase ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = []
_UpperCamelCase = (
F'curl -H "Accept: application/vnd.github+json" -H "Authorization: Bearer {token}"'
""" https://api.github.com/repos/huggingface/transformers/actions/runners"""
)
_UpperCamelCase = subprocess.run(lowerCAmelCase , shell=lowerCAmelCase , stdout=subprocess.PIPE )
_UpperCamelCase = output.stdout.decode("""utf-8""" )
_UpperCamelCase = json.loads(lowerCAmelCase )
_UpperCamelCase = status["""runners"""]
for runner in runners:
if runner["name"] in target_runners:
if runner["status"] == "offline":
offline_runners.append(lowerCAmelCase )
# save the result so we can report them on Slack
with open("""offline_runners.txt""" , """w""" ) as fp:
fp.write(json.dumps(lowerCAmelCase ) )
if len(lowerCAmelCase ) > 0:
_UpperCamelCase = """\n""".join([x["""name"""] for x in offline_runners] )
raise ValueError(F'The following runners are offline:\n{failed}' )
if __name__ == "__main__":
def __A(lowerCAmelCase ) -> Tuple:
"""simple docstring"""
return values.split(""",""" )
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--target_runners",
default=None,
type=list_str,
required=True,
help="Comma-separated list of runners to check status.",
)
parser.add_argument(
"--token", default=None, type=str, required=True, help="A token that has actions:read permission."
)
lowerCamelCase__ = parser.parse_args()
get_runner_status(args.target_runners, args.token)
| 612
|
from __future__ import annotations
lowerCamelCase__ = "#"
class lowerCAmelCase__ :
def __init__( self ) -> None:
'''simple docstring'''
_UpperCamelCase = {}
def A_ ( self , a ) -> None:
'''simple docstring'''
_UpperCamelCase = self._trie
for char in text:
if char not in trie:
_UpperCamelCase = {}
_UpperCamelCase = trie[char]
_UpperCamelCase = True
def A_ ( self , a ) -> tuple | list:
'''simple docstring'''
_UpperCamelCase = self._trie
for char in prefix:
if char in trie:
_UpperCamelCase = trie[char]
else:
return []
return self._elements(a )
def A_ ( self , a ) -> tuple:
'''simple docstring'''
_UpperCamelCase = []
for c, v in d.items():
_UpperCamelCase = [""" """] if c == END else [(c + s) for s in self._elements(a )]
result.extend(a )
return tuple(a )
lowerCamelCase__ = Trie()
lowerCamelCase__ = ("depart", "detergent", "daring", "dog", "deer", "deal")
for word in words:
trie.insert_word(word)
def __A(lowerCAmelCase ) -> tuple:
"""simple docstring"""
_UpperCamelCase = trie.find_word(lowerCAmelCase )
return tuple(string + word for word in suffixes )
def __A() -> None:
"""simple docstring"""
print(autocomplete_using_trie("""de""" ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 612
| 1
|
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_UpperCamelCase : Optional[Any] = logging.get_logger(__name__)
_UpperCamelCase : Optional[Any] = """▁"""
_UpperCamelCase : Optional[int] = {"""vocab_file""": """sentencepiece.bpe.model"""}
_UpperCamelCase : Dict = {
"""vocab_file""": {
"""facebook/xglm-564M""": """https://huggingface.co/facebook/xglm-564M/resolve/main/sentencepiece.bpe.model""",
}
}
_UpperCamelCase : Optional[int] = {
"""facebook/xglm-564M""": 2_0_4_8,
}
class _lowerCAmelCase( _a):
"""simple docstring"""
lowerCamelCase__ = VOCAB_FILES_NAMES
lowerCamelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ = ['''input_ids''', '''attention_mask''']
def __init__( self , UpperCAmelCase , UpperCAmelCase="<s>" , UpperCAmelCase="</s>" , UpperCAmelCase="</s>" , UpperCAmelCase="<s>" , UpperCAmelCase="<unk>" , UpperCAmelCase="<pad>" , UpperCAmelCase = None , **UpperCAmelCase , )-> None:
__A = {} if sp_model_kwargs is None else sp_model_kwargs
# Compatibility with the original tokenizer
__A = 7
__A = [f"<madeupword{i}>" for i in range(self.num_madeup_words )]
__A = kwargs.get('''additional_special_tokens''' , [] )
kwargs["additional_special_tokens"] += [
word for word in madeup_words if word not in kwargs["additional_special_tokens"]
]
super().__init__(
bos_token=UpperCAmelCase , eos_token=UpperCAmelCase , unk_token=UpperCAmelCase , sep_token=UpperCAmelCase , cls_token=UpperCAmelCase , pad_token=UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **UpperCAmelCase , )
__A = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(UpperCAmelCase ) )
__A = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
__A = 1
# Mimic fairseq token-to-id alignment for the first 4 token
__A = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
__A = len(self.sp_model )
__A = {f"<madeupword{i}>": sp_size + i + self.fairseq_offset for i in range(self.num_madeup_words )}
self.fairseq_tokens_to_ids.update(UpperCAmelCase )
__A = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self )-> Union[str, Any]:
__A = self.__dict__.copy()
__A = None
__A = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , UpperCAmelCase )-> Optional[int]:
__A = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
__A = {}
__A = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def SCREAMING_SNAKE_CASE__ ( self , UpperCAmelCase , UpperCAmelCase = None )-> List[int]:
if token_ids_a is None:
return [self.sep_token_id] + token_ids_a
__A = [self.sep_token_id]
return sep + token_ids_a + sep + sep + token_ids_a
def SCREAMING_SNAKE_CASE__ ( self , UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = False )-> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCAmelCase , token_ids_a=UpperCAmelCase , already_has_special_tokens=UpperCAmelCase )
if token_ids_a is None:
return [1] + ([0] * len(UpperCAmelCase ))
return [1] + ([0] * len(UpperCAmelCase )) + [1, 1] + ([0] * len(UpperCAmelCase ))
def SCREAMING_SNAKE_CASE__ ( self , UpperCAmelCase , UpperCAmelCase = None )-> List[int]:
__A = [self.sep_token_id]
if token_ids_a is None:
return len(sep + token_ids_a ) * [0]
return len(sep + token_ids_a + sep + sep + token_ids_a ) * [0]
@property
def SCREAMING_SNAKE_CASE__ ( self )-> Any:
return len(self.sp_model ) + self.fairseq_offset + self.num_madeup_words
def SCREAMING_SNAKE_CASE__ ( self )-> Union[str, Any]:
__A = {self.convert_ids_to_tokens(UpperCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def SCREAMING_SNAKE_CASE__ ( self , UpperCAmelCase )-> List[str]:
return self.sp_model.encode(UpperCAmelCase , out_type=UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self , UpperCAmelCase )-> Dict:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
__A = self.sp_model.PieceToId(UpperCAmelCase )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def SCREAMING_SNAKE_CASE__ ( self , UpperCAmelCase )-> Tuple:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def SCREAMING_SNAKE_CASE__ ( self , UpperCAmelCase )-> Union[str, Any]:
__A = ''''''.join(UpperCAmelCase ).replace(UpperCAmelCase , ''' ''' ).strip()
return out_string
def SCREAMING_SNAKE_CASE__ ( self , UpperCAmelCase , UpperCAmelCase = None )-> Tuple[str]:
if not os.path.isdir(UpperCAmelCase ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
__A = os.path.join(
UpperCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCAmelCase , '''wb''' ) as fi:
__A = self.sp_model.serialized_model_proto()
fi.write(UpperCAmelCase )
return (out_vocab_file,)
| 341
|
import argparse
import json
import os
import fairseq
import torch
from torch import nn
from transformers import (
SpeechaTextaConfig,
SpeechaTextaForCausalLM,
SpeechaTextaTokenizer,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
_UpperCamelCase : Dict = logging.get_logger(__name__)
_UpperCamelCase : List[str] = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """lm_head""",
"""mask_emb""": """masked_spec_embed""",
}
_UpperCamelCase : Dict = [
"""lm_head""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
]
def __UpperCamelCase ( snake_case , snake_case , snake_case , snake_case , snake_case ) -> Optional[Any]:
'''simple docstring'''
for attribute in key.split('''.''' ):
__A = getattr(snake_case , snake_case )
if weight_type is not None:
__A = getattr(snake_case , snake_case ).shape
else:
__A = hf_pointer.shape
assert hf_shape == value.shape, (
F"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
F" {value.shape} for {full_name}"
)
if weight_type == "weight":
__A = value
elif weight_type == "weight_g":
__A = value
elif weight_type == "weight_v":
__A = value
elif weight_type == "bias":
__A = value
else:
__A = value
logger.info(F"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." )
def __UpperCamelCase ( snake_case , snake_case ) -> Union[str, Any]:
'''simple docstring'''
__A = []
__A = fairseq_model.state_dict()
__A = hf_model.feature_extractor
# if encoder has different dim to decoder -> use proj_weight
__A = None
for name, value in fairseq_dict.items():
__A = False
if "conv_layers" in name:
load_conv_layer(
snake_case , snake_case , snake_case , snake_case , hf_model.config.feat_extract_norm == '''group''' , )
__A = True
elif name.split('''.''' )[0] == "proj":
__A = fairseq_model.proj
__A = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
__A = True
if "*" in mapped_key:
__A = name.split(snake_case )[0].split('''.''' )[-2]
__A = mapped_key.replace('''*''' , snake_case )
if "weight_g" in name:
__A = '''weight_g'''
elif "weight_v" in name:
__A = '''weight_v'''
elif "bias" in name:
__A = '''bias'''
elif "weight" in name:
__A = '''weight'''
else:
__A = None
set_recursively(snake_case , snake_case , snake_case , snake_case , snake_case )
continue
if not is_used:
unused_weights.append(snake_case )
logger.warning(F"Unused weights: {unused_weights}" )
return proj_weight
def __UpperCamelCase ( snake_case , snake_case , snake_case , snake_case , snake_case ) -> Any:
'''simple docstring'''
__A = full_name.split('''conv_layers.''' )[-1]
__A = name.split('''.''' )
__A = int(items[0] )
__A = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."
)
__A = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."
)
__A = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F"{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"
" found."
)
__A = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."
)
__A = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(snake_case )
def __UpperCamelCase ( snake_case ) -> Union[str, Any]:
'''simple docstring'''
__A , __A = emb.weight.shape
__A = nn.Linear(snake_case , snake_case , bias=snake_case )
__A = emb.weight.data
return lin_layer
def __UpperCamelCase ( snake_case ) -> List[str]:
'''simple docstring'''
with open(snake_case , '''r''' , encoding='''utf-8''' ) as f:
__A = f.readlines()
__A = [line.split(''' ''' )[0] for line in lines]
__A = len(snake_case )
__A = {
'''<s>''': 0,
'''<pad>''': 1,
'''</s>''': 2,
'''<unk>''': 3,
}
vocab_dict.update(dict(zip(snake_case , range(4 , num_words + 4 ) ) ) )
return vocab_dict
@torch.no_grad()
def __UpperCamelCase ( snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , ) -> Dict:
'''simple docstring'''
__A = WavaVecaConfig.from_pretrained(snake_case )
__A = SpeechaTextaConfig.from_pretrained(
snake_case , vocab_size=snake_case , decoder_layers=snake_case , do_stable_layer_norm=snake_case )
__A = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6_0_0_0 , padding_value=0 , do_normalize=snake_case , return_attention_mask=snake_case , )
__A , __A , __A = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
__A = model[0].eval()
# set weights for wav2vec2 encoder
__A = WavaVecaModel(snake_case )
__A = recursively_load_weights_wavaveca(model.encoder , snake_case )
__A = SpeechaTextaForCausalLM(snake_case )
__A , __A = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=snake_case )
# set output linear layer
unexpected_keys.remove('''embed_out''' )
__A = nn.Parameter(model.decoder.embed_out.detach() )
# layer norm is init to identity matrix so leaving it is fine
logger.warning(F"The following keys are missing when loading the decoder weights: {missing_keys}" )
logger.warning(F"The following keys are unexpected when loading the decoder weights: {unexpected_keys}" )
__A = SpeechEncoderDecoderModel(encoder=snake_case , decoder=snake_case )
__A = False
# add projection layer
__A = nn.Parameter(projection_layer.weight )
__A = nn.Parameter(projection_layer.bias )
__A = create_vocab_dict(snake_case )
with open(os.path.join(snake_case , '''vocab.json''' ) , '''w''' ) as fp:
json.dump(snake_case , snake_case )
__A = SpeechaTextaTokenizer(os.path.join(snake_case , '''vocab.json''' ) )
tokenizer.save_pretrained(snake_case )
__A = hf_wavavec.config.to_dict()
__A = tokenizer.pad_token_id
__A = tokenizer.bos_token_id
__A = tokenizer.eos_token_id
__A = '''speech_to_text_2'''
__A = '''wav2vec2'''
__A = SpeechEncoderDecoderConfig.from_dict(snake_case )
hf_wavavec.save_pretrained(snake_case )
feature_extractor.save_pretrained(snake_case )
if __name__ == "__main__":
_UpperCamelCase : List[str] = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument(
"""--encoder_config_path""",
default="""facebook/wav2vec2-large-lv60""",
type=str,
help="""Path to hf encoder wav2vec2 checkpoint config""",
)
parser.add_argument(
"""--decoder_config_path""",
default="""facebook/s2t-small-mustc-en-fr-st""",
type=str,
help="""Path to hf decoder s2t checkpoint config""",
)
parser.add_argument("""--vocab_size""", default=1_0_2_2_4, type=int, help="""Vocab size of decoder""")
parser.add_argument("""--num_decoder_layers""", default=7, type=int, help="""Number of decoder layers""")
_UpperCamelCase : Dict = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
vocab_size=args.vocab_size,
num_decoder_layers=args.num_decoder_layers,
)
| 341
| 1
|
from __future__ import annotations
import time
from math import sqrt
# 1 for manhattan, 0 for euclidean
__UpperCAmelCase = 0
__UpperCAmelCase = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
__UpperCAmelCase = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
__UpperCAmelCase = tuple[int, int]
class lowerCAmelCase_ :
def __init__( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, ) -> None:
UpperCamelCase : int = pos_x
UpperCamelCase : Optional[Any] = pos_y
UpperCamelCase : Tuple = (pos_y, pos_x)
UpperCamelCase : Any = goal_x
UpperCamelCase : Union[str, Any] = goal_y
UpperCamelCase : Optional[int] = g_cost
UpperCamelCase : str = parent
UpperCamelCase : Optional[Any] = self.calculate_heuristic()
UpperCamelCase : str = self.g_cost + self.h_cost
def snake_case_ ( self ) -> float:
UpperCamelCase : str = self.pos_x - self.goal_x
UpperCamelCase : Union[str, Any] = self.pos_y - self.goal_y
if HEURISTIC == 1:
return abs(SCREAMING_SNAKE_CASE_ ) + abs(SCREAMING_SNAKE_CASE_ )
else:
return sqrt(dy**2 + dx**2 )
def __lt__( self, SCREAMING_SNAKE_CASE_ ) -> bool:
return self.f_cost < other.f_cost
class lowerCAmelCase_ :
def __init__( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> int:
UpperCamelCase : List[Any] = Node(start[1], start[0], goal[1], goal[0], 0, SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Dict = Node(goal[1], goal[0], goal[1], goal[0], 9_9999, SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = [self.start]
UpperCamelCase : list[Node] = []
UpperCamelCase : Optional[int] = False
def snake_case_ ( self ) -> list[TPosition]:
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
UpperCamelCase : Union[str, Any] = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
return self.retrace_path(SCREAMING_SNAKE_CASE_ )
self.closed_nodes.append(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[Any] = self.get_successors(SCREAMING_SNAKE_CASE_ )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(SCREAMING_SNAKE_CASE_ )
else:
# retrieve the best current path
UpperCamelCase : List[str] = self.open_nodes.pop(self.open_nodes.index(SCREAMING_SNAKE_CASE_ ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(SCREAMING_SNAKE_CASE_ )
else:
self.open_nodes.append(SCREAMING_SNAKE_CASE_ )
return [self.start.pos]
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> list[Node]:
UpperCamelCase : Any = []
for action in delta:
UpperCamelCase : str = parent.pos_x + action[1]
UpperCamelCase : int = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(SCREAMING_SNAKE_CASE_ ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, self.target.pos_y, self.target.pos_x, parent.g_cost + 1, SCREAMING_SNAKE_CASE_, ) )
return successors
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> list[TPosition]:
UpperCamelCase : Union[str, Any] = node
UpperCamelCase : str = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
UpperCamelCase : Union[str, Any] = current_node.parent
path.reverse()
return path
class lowerCAmelCase_ :
def __init__( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> None:
UpperCamelCase : Union[str, Any] = AStar(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Any = AStar(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
UpperCamelCase : str = False
def snake_case_ ( self ) -> list[TPosition]:
while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes:
self.fwd_astar.open_nodes.sort()
self.bwd_astar.open_nodes.sort()
UpperCamelCase : Union[str, Any] = self.fwd_astar.open_nodes.pop(0 )
UpperCamelCase : str = self.bwd_astar.open_nodes.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
return self.retrace_bidirectional_path(
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
self.fwd_astar.closed_nodes.append(SCREAMING_SNAKE_CASE_ )
self.bwd_astar.closed_nodes.append(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = current_bwd_node
UpperCamelCase : Tuple = current_fwd_node
UpperCamelCase : Tuple = {
self.fwd_astar: self.fwd_astar.get_successors(SCREAMING_SNAKE_CASE_ ),
self.bwd_astar: self.bwd_astar.get_successors(SCREAMING_SNAKE_CASE_ ),
}
for astar in [self.fwd_astar, self.bwd_astar]:
for child_node in successors[astar]:
if child_node in astar.closed_nodes:
continue
if child_node not in astar.open_nodes:
astar.open_nodes.append(SCREAMING_SNAKE_CASE_ )
else:
# retrieve the best current path
UpperCamelCase : Optional[Any] = astar.open_nodes.pop(
astar.open_nodes.index(SCREAMING_SNAKE_CASE_ ) )
if child_node.g_cost < better_node.g_cost:
astar.open_nodes.append(SCREAMING_SNAKE_CASE_ )
else:
astar.open_nodes.append(SCREAMING_SNAKE_CASE_ )
return [self.fwd_astar.start.pos]
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> list[TPosition]:
UpperCamelCase : Optional[Any] = self.fwd_astar.retrace_path(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Union[str, Any] = self.bwd_astar.retrace_path(SCREAMING_SNAKE_CASE_ )
bwd_path.pop()
bwd_path.reverse()
UpperCamelCase : Tuple = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
__UpperCAmelCase = (0, 0)
__UpperCAmelCase = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
__UpperCAmelCase = time.time()
__UpperCAmelCase = AStar(init, goal)
__UpperCAmelCase = a_star.search()
__UpperCAmelCase = time.time() - start_time
print(F"""AStar execution time = {end_time:f} seconds""")
__UpperCAmelCase = time.time()
__UpperCAmelCase = BidirectionalAStar(init, goal)
__UpperCAmelCase = time.time() - bd_start_time
print(F"""BidirectionalAStar execution time = {bd_end_time:f} seconds""")
| 40
|
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import floats_tensor, load_image, load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class SCREAMING_SNAKE_CASE__ ( __a , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase__ : Union[str, Any] = ShapEImgaImgPipeline
UpperCamelCase__ : Union[str, Any] = ['''image''']
UpperCamelCase__ : str = ['''image''']
UpperCamelCase__ : Dict = [
'''num_images_per_prompt''',
'''num_inference_steps''',
'''generator''',
'''latents''',
'''guidance_scale''',
'''frame_size''',
'''output_type''',
'''return_dict''',
]
UpperCamelCase__ : Optional[int] = False
@property
def UpperCAmelCase_ ( self : List[Any] ) -> Union[str, Any]:
return 32
@property
def UpperCAmelCase_ ( self : str ) -> List[Any]:
return 32
@property
def UpperCAmelCase_ ( self : Tuple ) -> Union[str, Any]:
return self.time_input_dim * 4
@property
def UpperCAmelCase_ ( self : List[Any] ) -> Union[str, Any]:
return 8
@property
def UpperCAmelCase_ ( self : Any ) -> str:
torch.manual_seed(0 )
snake_case__ = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=64 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1 , )
snake_case__ = CLIPVisionModel(lowerCAmelCase__ )
return model
@property
def UpperCAmelCase_ ( self : List[str] ) -> str:
snake_case__ = CLIPImageProcessor(
crop_size=224 , do_center_crop=lowerCAmelCase__ , do_normalize=lowerCAmelCase__ , do_resize=lowerCAmelCase__ , image_mean=[0.48_145_466, 0.4_578_275, 0.40_821_073] , image_std=[0.26_862_954, 0.26_130_258, 0.27_577_711] , resample=3 , size=224 , )
return image_processor
@property
def UpperCAmelCase_ ( self : int ) -> Tuple:
torch.manual_seed(0 )
snake_case__ = {
"""num_attention_heads""": 2,
"""attention_head_dim""": 16,
"""embedding_dim""": self.time_input_dim,
"""num_embeddings""": 32,
"""embedding_proj_dim""": self.text_embedder_hidden_size,
"""time_embed_dim""": self.time_embed_dim,
"""num_layers""": 1,
"""clip_embed_dim""": self.time_input_dim * 2,
"""additional_embeddings""": 0,
"""time_embed_act_fn""": """gelu""",
"""norm_in_type""": """layer""",
"""embedding_proj_norm_type""": """layer""",
"""encoder_hid_proj_type""": None,
"""added_emb_type""": None,
}
snake_case__ = PriorTransformer(**lowerCAmelCase__ )
return model
@property
def UpperCAmelCase_ ( self : Optional[int] ) -> Union[str, Any]:
torch.manual_seed(0 )
snake_case__ = {
"""param_shapes""": (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
"""d_latent""": self.time_input_dim,
"""d_hidden""": self.renderer_dim,
"""n_output""": 12,
"""background""": (
0.1,
0.1,
0.1,
),
}
snake_case__ = ShapERenderer(**lowerCAmelCase__ )
return model
def UpperCAmelCase_ ( self : str ) -> int:
snake_case__ = self.dummy_prior
snake_case__ = self.dummy_image_encoder
snake_case__ = self.dummy_image_processor
snake_case__ = self.dummy_renderer
snake_case__ = HeunDiscreteScheduler(
beta_schedule="""exp""" , num_train_timesteps=1024 , prediction_type="""sample""" , use_karras_sigmas=lowerCAmelCase__ , clip_sample=lowerCAmelCase__ , clip_sample_range=1.0 , )
snake_case__ = {
"""prior""": prior,
"""image_encoder""": image_encoder,
"""image_processor""": image_processor,
"""renderer""": renderer,
"""scheduler""": scheduler,
}
return components
def UpperCAmelCase_ ( self : Tuple , lowerCAmelCase__ : int , lowerCAmelCase__ : Dict=0 ) -> Dict:
snake_case__ = floats_tensor((1, 3, 64, 64) , rng=random.Random(lowerCAmelCase__ ) ).to(lowerCAmelCase__ )
if str(lowerCAmelCase__ ).startswith("""mps""" ):
snake_case__ = torch.manual_seed(lowerCAmelCase__ )
else:
snake_case__ = torch.Generator(device=lowerCAmelCase__ ).manual_seed(lowerCAmelCase__ )
snake_case__ = {
"""image""": input_image,
"""generator""": generator,
"""num_inference_steps""": 1,
"""frame_size""": 32,
"""output_type""": """np""",
}
return inputs
def UpperCAmelCase_ ( self : Optional[int] ) -> Union[str, Any]:
snake_case__ = """cpu"""
snake_case__ = self.get_dummy_components()
snake_case__ = self.pipeline_class(**lowerCAmelCase__ )
snake_case__ = pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
snake_case__ = pipe(**self.get_dummy_inputs(lowerCAmelCase__ ) )
snake_case__ = output.images[0]
snake_case__ = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
snake_case__ = np.array(
[
0.00_039_216,
0.00_039_216,
0.00_039_216,
0.00_039_216,
0.00_039_216,
0.00_039_216,
0.00_039_216,
0.00_039_216,
0.00_039_216,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCAmelCase_ ( self : str ) -> List[Any]:
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def UpperCAmelCase_ ( self : Tuple ) -> Tuple:
snake_case__ = torch_device == """cpu"""
snake_case__ = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=lowerCAmelCase__ , relax_max_difference=lowerCAmelCase__ , )
def UpperCAmelCase_ ( self : str ) -> Optional[int]:
snake_case__ = self.get_dummy_components()
snake_case__ = self.pipeline_class(**lowerCAmelCase__ )
snake_case__ = pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
snake_case__ = 1
snake_case__ = 2
snake_case__ = self.get_dummy_inputs(lowerCAmelCase__ )
for key in inputs.keys():
if key in self.batch_params:
snake_case__ = batch_size * [inputs[key]]
snake_case__ = pipe(**lowerCAmelCase__ , num_images_per_prompt=lowerCAmelCase__ )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase_ ( self : List[str] ) -> str:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase_ ( self : Tuple ) -> Tuple:
snake_case__ = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/shap_e/corgi.png""" )
snake_case__ = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/shap_e/test_shap_e_img2img_out.npy""" )
snake_case__ = ShapEImgaImgPipeline.from_pretrained("""openai/shap-e-img2img""" )
snake_case__ = pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
snake_case__ = torch.Generator(device=lowerCAmelCase__ ).manual_seed(0 )
snake_case__ = pipe(
lowerCAmelCase__ , generator=lowerCAmelCase__ , guidance_scale=3.0 , num_inference_steps=64 , frame_size=64 , output_type="""np""" , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(lowerCAmelCase__ , lowerCAmelCase__ )
| 214
| 0
|
import numpy as np
import torch
import tqdm
from ...models.unet_ad import UNetaDModel
from ...pipelines import DiffusionPipeline
from ...utils import randn_tensor
from ...utils.dummy_pt_objects import DDPMScheduler
class UpperCAmelCase_ ( _A ):
'''simple docstring'''
def __init__( self : Union[str, Any] , UpperCamelCase__ : UNetaDModel , UpperCamelCase__ : UNetaDModel , UpperCamelCase__ : DDPMScheduler , UpperCamelCase__ : List[Any] , ) -> Tuple:
"""simple docstring"""
super().__init__()
__magic_name__ = value_function
__magic_name__ = unet
__magic_name__ = scheduler
__magic_name__ = env
__magic_name__ = env.get_dataset()
__magic_name__ = {}
for key in self.data.keys():
try:
__magic_name__ = self.data[key].mean()
except: # noqa: E722
pass
__magic_name__ = {}
for key in self.data.keys():
try:
__magic_name__ = self.data[key].std()
except: # noqa: E722
pass
__magic_name__ = env.observation_space.shape[0]
__magic_name__ = env.action_space.shape[0]
def _lowercase ( self : List[str] , UpperCamelCase__ : Any , UpperCamelCase__ : Dict ) -> List[Any]:
"""simple docstring"""
return (x_in - self.means[key]) / self.stds[key]
def _lowercase ( self : int , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Tuple ) -> Tuple:
"""simple docstring"""
return x_in * self.stds[key] + self.means[key]
def _lowercase ( self : str , UpperCamelCase__ : List[str] ) -> List[Any]:
"""simple docstring"""
if type(UpperCamelCase__ ) is dict:
return {k: self.to_torch(UpperCamelCase__ ) for k, v in x_in.items()}
elif torch.is_tensor(UpperCamelCase__ ):
return x_in.to(self.unet.device )
return torch.tensor(UpperCamelCase__ , device=self.unet.device )
def _lowercase ( self : Union[str, Any] , UpperCamelCase__ : Any , UpperCamelCase__ : List[Any] , UpperCamelCase__ : List[Any] ) -> Tuple:
"""simple docstring"""
for key, val in cond.items():
__magic_name__ = val.clone()
return x_in
def _lowercase ( self : int , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Dict , UpperCamelCase__ : Any ) -> Dict:
"""simple docstring"""
__magic_name__ = x.shape[0]
__magic_name__ = None
for i in tqdm.tqdm(self.scheduler.timesteps ):
# create batch of timesteps to pass into model
__magic_name__ = torch.full((batch_size,) , UpperCamelCase__ , device=self.unet.device , dtype=torch.long )
for _ in range(UpperCamelCase__ ):
with torch.enable_grad():
x.requires_grad_()
# permute to match dimension for pre-trained models
__magic_name__ = self.value_function(x.permute(0 , 2 , 1 ) , UpperCamelCase__ ).sample
__magic_name__ = torch.autograd.grad([y.sum()] , [x] )[0]
__magic_name__ = self.scheduler._get_variance(UpperCamelCase__ )
__magic_name__ = torch.exp(0.5 * posterior_variance )
__magic_name__ = model_std * grad
__magic_name__ = 0
__magic_name__ = x.detach()
__magic_name__ = x + scale * grad
__magic_name__ = self.reset_xa(UpperCamelCase__ , UpperCamelCase__ , self.action_dim )
__magic_name__ = self.unet(x.permute(0 , 2 , 1 ) , UpperCamelCase__ ).sample.permute(0 , 2 , 1 )
# TODO: verify deprecation of this kwarg
__magic_name__ = self.scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , predict_epsilon=UpperCamelCase__ )["""prev_sample"""]
# apply conditions to the trajectory (set the initial state)
__magic_name__ = self.reset_xa(UpperCamelCase__ , UpperCamelCase__ , self.action_dim )
__magic_name__ = self.to_torch(UpperCamelCase__ )
return x, y
def __call__( self : Any , UpperCamelCase__ : int , UpperCamelCase__ : Any=64 , UpperCamelCase__ : Optional[int]=32 , UpperCamelCase__ : int=2 , UpperCamelCase__ : Optional[int]=0.1 ) -> Any:
"""simple docstring"""
__magic_name__ = self.normalize(UpperCamelCase__ , """observations""" )
__magic_name__ = obs[None].repeat(UpperCamelCase__ , axis=0 )
__magic_name__ = {0: self.to_torch(UpperCamelCase__ )}
__magic_name__ = (batch_size, planning_horizon, self.state_dim + self.action_dim)
# generate initial noise and apply our conditions (to make the trajectories start at current state)
__magic_name__ = randn_tensor(UpperCamelCase__ , device=self.unet.device )
__magic_name__ = self.reset_xa(UpperCamelCase__ , UpperCamelCase__ , self.action_dim )
__magic_name__ = self.to_torch(UpperCamelCase__ )
# run the diffusion process
__magic_name__ , __magic_name__ = self.run_diffusion(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# sort output trajectories by value
__magic_name__ = y.argsort(0 , descending=UpperCamelCase__ ).squeeze()
__magic_name__ = x[sorted_idx]
__magic_name__ = sorted_values[:, :, : self.action_dim]
__magic_name__ = actions.detach().cpu().numpy()
__magic_name__ = self.de_normalize(UpperCamelCase__ , key="""actions""" )
# select the action with the highest value
if y is not None:
__magic_name__ = 0
else:
# if we didn't run value guiding, select a random action
__magic_name__ = np.random.randint(0 , UpperCamelCase__ )
__magic_name__ = denorm_actions[selected_index, 0]
return denorm_actions
| 76
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase : Tuple = logging.get_logger(__name__)
__lowerCAmelCase : Tuple = {
'SCUT-DLVCLab/lilt-roberta-en-base': (
'https://huggingface.co/SCUT-DLVCLab/lilt-roberta-en-base/resolve/main/config.json'
),
}
class UpperCAmelCase_ ( _A ):
'''simple docstring'''
a__ = """lilt"""
def __init__( self : Dict , UpperCamelCase__ : List[str]=3_0522 , UpperCamelCase__ : Optional[Any]=768 , UpperCamelCase__ : Dict=12 , UpperCamelCase__ : Optional[Any]=12 , UpperCamelCase__ : Dict=3072 , UpperCamelCase__ : Tuple="gelu" , UpperCamelCase__ : int=0.1 , UpperCamelCase__ : Dict=0.1 , UpperCamelCase__ : Union[str, Any]=512 , UpperCamelCase__ : Dict=2 , UpperCamelCase__ : Dict=0.02 , UpperCamelCase__ : Any=1E-12 , UpperCamelCase__ : Optional[int]=0 , UpperCamelCase__ : str="absolute" , UpperCamelCase__ : Any=None , UpperCamelCase__ : Dict=4 , UpperCamelCase__ : Tuple=1024 , **UpperCamelCase__ : Optional[int] , ) -> Dict:
"""simple docstring"""
super().__init__(pad_token_id=UpperCamelCase__ , **UpperCamelCase__ )
__magic_name__ = vocab_size
__magic_name__ = hidden_size
__magic_name__ = num_hidden_layers
__magic_name__ = num_attention_heads
__magic_name__ = hidden_act
__magic_name__ = intermediate_size
__magic_name__ = hidden_dropout_prob
__magic_name__ = attention_probs_dropout_prob
__magic_name__ = max_position_embeddings
__magic_name__ = type_vocab_size
__magic_name__ = initializer_range
__magic_name__ = layer_norm_eps
__magic_name__ = position_embedding_type
__magic_name__ = classifier_dropout
__magic_name__ = channel_shrink_ratio
__magic_name__ = max_ad_position_embeddings
| 76
| 1
|
'''simple docstring'''
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
__lowerCAmelCase =logging.get_logger(__name__) # pylint: disable=invalid-name
__lowerCAmelCase ="\n Examples:\n ```py\n >>> from diffusers import KandinskyV22Pipeline, KandinskyV22PriorPipeline\n >>> import torch\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\"kandinsky-community/kandinsky-2-2-prior\")\n >>> pipe_prior.to(\"cuda\")\n >>> prompt = \"red cat, 4k photo\"\n >>> out = pipe_prior(prompt)\n >>> image_emb = out.image_embeds\n >>> zero_image_emb = out.negative_image_embeds\n >>> pipe = KandinskyV22Pipeline.from_pretrained(\"kandinsky-community/kandinsky-2-2-decoder\")\n >>> pipe.to(\"cuda\")\n >>> image = pipe(\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=50,\n ... ).images\n >>> image[0].save(\"cat.png\")\n ```\n"
def a ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=8 ) -> int:
"""simple docstring"""
a_ = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
a_ = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class _snake_case ( snake_case ):
"""simple docstring"""
def __init__( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , ) -> List[str]:
super().__init__()
self.register_modules(
unet=A_ , scheduler=A_ , movq=A_ , )
a_ = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) -> Any:
if latents is None:
a_ = randn_tensor(A_ , generator=A_ , device=A_ , dtype=A_ )
else:
if latents.shape != shape:
raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {shape}''' )
a_ = latents.to(A_ )
a_ = latents * scheduler.init_noise_sigma
return latents
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__=0 ) -> Optional[int]:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
a_ = torch.device(F'''cuda:{gpu_id}''' )
a_ = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(A_ , A_ )
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__=0 ) -> Dict:
if is_accelerate_available() and is_accelerate_version('>=' , '0.17.0.dev0' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.' )
a_ = torch.device(F'''cuda:{gpu_id}''' )
if self.device.type != "cpu":
self.to('cpu' , silence_dtype_warnings=A_ )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
a_ = None
for cpu_offloaded_model in [self.unet, self.movq]:
a_ , a_ = cpu_offload_with_hook(A_ , A_ , prev_module_hook=A_ )
# We'll offload the last model manually.
a_ = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
if not hasattr(self.unet , '_hf_hook' ):
return self.device
for module in self.unet.modules():
if (
hasattr(A_ , '_hf_hook' )
and hasattr(module._hf_hook , 'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(A_ )
def __call__( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = 512 , UpperCAmelCase__ = 512 , UpperCAmelCase__ = 100 , UpperCAmelCase__ = 4.0 , UpperCAmelCase__ = 1 , UpperCAmelCase__ = None , UpperCAmelCase__ = None , UpperCAmelCase__ = "pil" , UpperCAmelCase__ = True , ) -> Optional[int]:
a_ = self._execution_device
a_ = guidance_scale > 1.0
if isinstance(A_ , A_ ):
a_ = torch.cat(A_ , dim=0 )
a_ = image_embeds.shape[0] * num_images_per_prompt
if isinstance(A_ , A_ ):
a_ = torch.cat(A_ , dim=0 )
if do_classifier_free_guidance:
a_ = image_embeds.repeat_interleave(A_ , dim=0 )
a_ = negative_image_embeds.repeat_interleave(A_ , dim=0 )
a_ = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=A_ )
self.scheduler.set_timesteps(A_ , device=A_ )
a_ = self.scheduler.timesteps
a_ = self.unet.config.in_channels
a_ , a_ = downscale_height_and_width(A_ , A_ , self.movq_scale_factor )
# create initial latent
a_ = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , A_ , A_ , A_ , self.scheduler , )
for i, t in enumerate(self.progress_bar(A_ ) ):
# expand the latents if we are doing classifier free guidance
a_ = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
a_ = {'image_embeds': image_embeds}
a_ = self.unet(
sample=A_ , timestep=A_ , encoder_hidden_states=A_ , added_cond_kwargs=A_ , return_dict=A_ , )[0]
if do_classifier_free_guidance:
a_ , a_ = noise_pred.split(latents.shape[1] , dim=1 )
a_ , a_ = noise_pred.chunk(2 )
a_ , a_ = variance_pred.chunk(2 )
a_ = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
a_ = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , 'variance_type' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
a_ , a_ = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
a_ = self.scheduler.step(
A_ , A_ , A_ , generator=A_ , )[0]
# post-processing
a_ = self.movq.decode(A_ , force_not_quantize=A_ )['sample']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F'''Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}''' )
if output_type in ["np", "pil"]:
a_ = image * 0.5 + 0.5
a_ = image.clamp(0 , 1 )
a_ = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
a_ = self.numpy_to_pil(A_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=A_ )
| 697
|
from PIL import Image
def _SCREAMING_SNAKE_CASE ( lowercase : Image ):
'''simple docstring'''
lowerCamelCase_ , lowerCamelCase_ = image.size
lowerCamelCase_ = 0
lowerCamelCase_ = image.load()
for i in range(lowercase ):
for j in range(lowercase ):
lowerCamelCase_ = pixels[j, i]
mean += pixel
mean //= width * height
for j in range(lowercase ):
for i in range(lowercase ):
lowerCamelCase_ = 2_55 if pixels[i, j] > mean else 0
return image
if __name__ == "__main__":
lowerCamelCase : Optional[Any] = mean_threshold(Image.open("path_to_image").convert("L"))
image.save("output_image_path")
| 70
| 0
|
"""simple docstring"""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def __UpperCAmelCase ( self : int ):
# For consistency across different places the DisjunctiveConstraint is called,
# dc.token_ids is a list of integers. It is also initialized only by integers.
lowerCamelCase__ = [[1, 2, 4], [1, 2, 3, 4]]
lowerCamelCase__ = DisjunctiveConstraint(SCREAMING_SNAKE_CASE_ )
self.assertTrue(isinstance(dc.token_ids , SCREAMING_SNAKE_CASE_ ) )
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) )
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] )
def __UpperCAmelCase ( self : Union[str, Any] ):
# We can't have constraints that are complete subsets of another. This leads to a preverse
# interpretation of "constraint fulfillment": does generating [1,2,3] fulfill the constraint?
# It would mean that it generated [1,2] which fulfills it, but it's in the middle of potentially
# fulfilling [1,2,3,4]. If we believe that [1,2,3] does fulfill the constraint, then the algorithm
# will necessarily never reach [1,2,3,4], giving users a false sense of control (better to just not allow it).
lowerCamelCase__ = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
DisjunctiveConstraint(SCREAMING_SNAKE_CASE_ ) # fails here
def __UpperCAmelCase ( self : str ):
lowerCamelCase__ = [[1, 2, 3], [1, 2, 4]]
lowerCamelCase__ = DisjunctiveConstraint(SCREAMING_SNAKE_CASE_ )
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = dc.update(1 )
lowerCamelCase__ = stepped is True and completed is False and reset is False
self.assertTrue(SCREAMING_SNAKE_CASE_ )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = dc.update(2 )
lowerCamelCase__ = stepped is True and completed is False and reset is False
self.assertTrue(SCREAMING_SNAKE_CASE_ )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = dc.update(3 )
lowerCamelCase__ = stepped is True and completed is True and reset is False
self.assertTrue(SCREAMING_SNAKE_CASE_ )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3] )
def __UpperCAmelCase ( self : Union[str, Any] ):
lowerCamelCase__ = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
lowerCamelCase__ = DisjunctiveConstraint(SCREAMING_SNAKE_CASE_ )
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = dc.update(4 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2, 4] )
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5] )
dc.reset()
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 3 )
self.assertTrue(dc.current_seq == [1] )
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 2 )
self.assertTrue(dc.current_seq == [1, 2] )
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.remaining() == 0 )
self.assertTrue(dc.current_seq == [1, 2, 5] )
| 258
|
"""simple docstring"""
import argparse
import requests
import torch
from PIL import Image
from torchvision.transforms import Compose, Normalize, Resize, ToTensor
from transformers import SwinaSRConfig, SwinaSRForImageSuperResolution, SwinaSRImageProcessor
def _A ( __lowercase ):
"""simple docstring"""
lowerCamelCase__ = SwinaSRConfig()
if "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
lowerCamelCase__ = 4
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
lowerCamelCase__ = 4
lowerCamelCase__ = 48
lowerCamelCase__ = """pixelshuffle_aux"""
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
lowerCamelCase__ = [6, 6, 6, 6]
lowerCamelCase__ = 60
lowerCamelCase__ = [6, 6, 6, 6]
lowerCamelCase__ = """pixelshuffledirect"""
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
lowerCamelCase__ = 4
lowerCamelCase__ = """nearest+conv"""
elif "Swin2SR_Jpeg_dynamic" in checkpoint_url:
lowerCamelCase__ = 1
lowerCamelCase__ = 1
lowerCamelCase__ = 126
lowerCamelCase__ = 7
lowerCamelCase__ = 2_55.0
lowerCamelCase__ = """"""
return config
def _A ( __lowercase , __lowercase ):
"""simple docstring"""
if "patch_embed.proj" in name and "layers" not in name:
lowerCamelCase__ = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" )
if "patch_embed.norm" in name:
lowerCamelCase__ = name.replace("""patch_embed.norm""" , """embeddings.patch_embeddings.layernorm""" )
if "layers" in name:
lowerCamelCase__ = name.replace("""layers""" , """encoder.stages""" )
if "residual_group.blocks" in name:
lowerCamelCase__ = name.replace("""residual_group.blocks""" , """layers""" )
if "attn.proj" in name:
lowerCamelCase__ = name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name:
lowerCamelCase__ = name.replace("""attn""" , """attention.self""" )
if "norm1" in name:
lowerCamelCase__ = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
lowerCamelCase__ = name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
lowerCamelCase__ = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
lowerCamelCase__ = name.replace("""mlp.fc2""" , """output.dense""" )
if "q_bias" in name:
lowerCamelCase__ = name.replace("""q_bias""" , """query.bias""" )
if "k_bias" in name:
lowerCamelCase__ = name.replace("""k_bias""" , """key.bias""" )
if "v_bias" in name:
lowerCamelCase__ = name.replace("""v_bias""" , """value.bias""" )
if "cpb_mlp" in name:
lowerCamelCase__ = name.replace("""cpb_mlp""" , """continuous_position_bias_mlp""" )
if "patch_embed.proj" in name:
lowerCamelCase__ = name.replace("""patch_embed.proj""" , """patch_embed.projection""" )
if name == "norm.weight":
lowerCamelCase__ = """layernorm.weight"""
if name == "norm.bias":
lowerCamelCase__ = """layernorm.bias"""
if "conv_first" in name:
lowerCamelCase__ = name.replace("""conv_first""" , """first_convolution""" )
if (
"upsample" in name
or "conv_before_upsample" in name
or "conv_bicubic" in name
or "conv_up" in name
or "conv_hr" in name
or "conv_last" in name
or "aux" in name
):
# heads
if "conv_last" in name:
lowerCamelCase__ = name.replace("""conv_last""" , """final_convolution""" )
if config.upsampler in ["pixelshuffle", "pixelshuffle_aux", "nearest+conv"]:
if "conv_before_upsample.0" in name:
lowerCamelCase__ = name.replace("""conv_before_upsample.0""" , """conv_before_upsample""" )
if "upsample.0" in name:
lowerCamelCase__ = name.replace("""upsample.0""" , """upsample.convolution_0""" )
if "upsample.2" in name:
lowerCamelCase__ = name.replace("""upsample.2""" , """upsample.convolution_1""" )
lowerCamelCase__ = """upsample.""" + name
elif config.upsampler == "pixelshuffledirect":
lowerCamelCase__ = name.replace("""upsample.0.weight""" , """upsample.conv.weight""" )
lowerCamelCase__ = name.replace("""upsample.0.bias""" , """upsample.conv.bias""" )
else:
pass
else:
lowerCamelCase__ = """swin2sr.""" + name
return name
def _A ( __lowercase , __lowercase ):
"""simple docstring"""
for key in orig_state_dict.copy().keys():
lowerCamelCase__ = orig_state_dict.pop(__lowercase )
if "qkv" in key:
lowerCamelCase__ = key.split(""".""" )
lowerCamelCase__ = int(key_split[1] )
lowerCamelCase__ = int(key_split[4] )
lowerCamelCase__ = config.embed_dim
if "weight" in key:
lowerCamelCase__ = val[:dim, :]
lowerCamelCase__ = val[dim : dim * 2, :]
lowerCamelCase__ = val[-dim:, :]
else:
lowerCamelCase__ = val[:dim]
lowerCamelCase__ = val[dim : dim * 2]
lowerCamelCase__ = val[-dim:]
pass
else:
lowerCamelCase__ = val
return orig_state_dict
def _A ( __lowercase , __lowercase , __lowercase ):
"""simple docstring"""
lowerCamelCase__ = get_config(__lowercase )
lowerCamelCase__ = SwinaSRForImageSuperResolution(__lowercase )
model.eval()
lowerCamelCase__ = torch.hub.load_state_dict_from_url(__lowercase , map_location="""cpu""" )
lowerCamelCase__ = convert_state_dict(__lowercase , __lowercase )
lowerCamelCase__ , lowerCamelCase__ = model.load_state_dict(__lowercase , strict=__lowercase )
if len(__lowercase ) > 0:
raise ValueError("""Missing keys when converting: {}""".format(__lowercase ) )
for key in unexpected_keys:
if not ("relative_position_index" in key or "relative_coords_table" in key or "self_mask" in key):
raise ValueError(f"""Unexpected key {key} in state_dict""" )
# verify values
lowerCamelCase__ = """https://github.com/mv-lab/swin2sr/blob/main/testsets/real-inputs/shanghai.jpg?raw=true"""
lowerCamelCase__ = Image.open(requests.get(__lowercase , stream=__lowercase ).raw ).convert("""RGB""" )
lowerCamelCase__ = SwinaSRImageProcessor()
# pixel_values = processor(image, return_tensors="pt").pixel_values
lowerCamelCase__ = 126 if """Jpeg""" in checkpoint_url else 256
lowerCamelCase__ = Compose(
[
Resize((image_size, image_size) ),
ToTensor(),
Normalize(mean=[0.4_85, 0.4_56, 0.4_06] , std=[0.2_29, 0.2_24, 0.2_25] ),
] )
lowerCamelCase__ = transforms(__lowercase ).unsqueeze(0 )
if config.num_channels == 1:
lowerCamelCase__ = pixel_values[:, 0, :, :].unsqueeze(1 )
lowerCamelCase__ = model(__lowercase )
# assert values
if "Swin2SR_ClassicalSR_X2_64" in checkpoint_url:
lowerCamelCase__ = torch.Size([1, 3, 512, 512] )
lowerCamelCase__ = torch.tensor(
[[-0.70_87, -0.71_38, -0.67_21], [-0.83_40, -0.80_95, -0.72_98], [-0.91_49, -0.84_14, -0.79_40]] )
elif "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
lowerCamelCase__ = torch.Size([1, 3, 1024, 1024] )
lowerCamelCase__ = torch.tensor(
[[-0.77_75, -0.81_05, -0.89_33], [-0.77_64, -0.83_56, -0.92_25], [-0.79_76, -0.86_86, -0.95_79]] )
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
# TODO values didn't match exactly here
lowerCamelCase__ = torch.Size([1, 3, 1024, 1024] )
lowerCamelCase__ = torch.tensor(
[[-0.80_35, -0.75_04, -0.74_91], [-0.85_38, -0.81_24, -0.77_82], [-0.88_04, -0.86_51, -0.84_93]] )
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
lowerCamelCase__ = torch.Size([1, 3, 512, 512] )
lowerCamelCase__ = torch.tensor(
[[-0.76_69, -0.86_62, -0.87_67], [-0.88_10, -0.99_62, -0.98_20], [-0.93_40, -1.03_22, -1.11_49]] )
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
lowerCamelCase__ = torch.Size([1, 3, 1024, 1024] )
lowerCamelCase__ = torch.tensor(
[[-0.52_38, -0.55_57, -0.63_21], [-0.60_16, -0.59_03, -0.63_91], [-0.62_44, -0.63_34, -0.68_89]] )
assert (
outputs.reconstruction.shape == expected_shape
), f"""Shape of reconstruction should be {expected_shape}, but is {outputs.reconstruction.shape}"""
assert torch.allclose(outputs.reconstruction[0, 0, :3, :3] , __lowercase , atol=1e-3 )
print("""Looks ok!""" )
lowerCamelCase__ = {
"""https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth""": (
"""swin2SR-classical-sr-x2-64"""
),
"""https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X4_64.pth""": (
"""swin2SR-classical-sr-x4-64"""
),
"""https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_CompressedSR_X4_48.pth""": (
"""swin2SR-compressed-sr-x4-48"""
),
"""https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_Lightweight_X2_64.pth""": (
"""swin2SR-lightweight-x2-64"""
),
"""https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR.pth""": (
"""swin2SR-realworld-sr-x4-64-bsrgan-psnr"""
),
}
lowerCamelCase__ = url_to_name[checkpoint_url]
if pytorch_dump_folder_path is not None:
print(f"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(__lowercase )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
processor.save_pretrained(__lowercase )
if push_to_hub:
model.push_to_hub(f"""caidas/{model_name}""" )
processor.push_to_hub(f"""caidas/{model_name}""" )
if __name__ == "__main__":
__magic_name__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint_url""",
default="""https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth""",
type=str,
help="""URL of the original Swin2SR checkpoint you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Whether to push the converted model to the hub.""")
__magic_name__ = parser.parse_args()
convert_swinasr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 258
| 1
|
"""simple docstring"""
import argparse
import os
import shutil
import torch
from emmental.modules import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer
def A_ ( snake_case__ ) -> List[Any]:
_UpperCamelCase :Any = args.pruning_method
_UpperCamelCase :Any = args.threshold
_UpperCamelCase :Optional[int] = args.model_name_or_path.rstrip('''/''' )
_UpperCamelCase :Tuple = args.target_model_path
print(f"Load fine-pruned model from {model_name_or_path}" )
_UpperCamelCase :Optional[int] = torch.load(os.path.join(snake_case__ , '''pytorch_model.bin''' ) )
_UpperCamelCase :Union[str, Any] = {}
for name, tensor in model.items():
if "embeddings" in name or "LayerNorm" in name or "pooler" in name:
_UpperCamelCase :Any = tensor
print(f"Copied layer {name}" )
elif "classifier" in name or "qa_output" in name:
_UpperCamelCase :Optional[int] = tensor
print(f"Copied layer {name}" )
elif "bias" in name:
_UpperCamelCase :Tuple = tensor
print(f"Copied layer {name}" )
else:
if pruning_method == "magnitude":
_UpperCamelCase :Optional[Any] = MagnitudeBinarizer.apply(inputs=snake_case__ , threshold=snake_case__ )
_UpperCamelCase :Tuple = tensor * mask
print(f"Pruned layer {name}" )
elif pruning_method == "topK":
if "mask_scores" in name:
continue
_UpperCamelCase :int = name[:-6]
_UpperCamelCase :Tuple = model[f"{prefix_}mask_scores"]
_UpperCamelCase :Dict = TopKBinarizer.apply(snake_case__ , snake_case__ )
_UpperCamelCase :Any = tensor * mask
print(f"Pruned layer {name}" )
elif pruning_method == "sigmoied_threshold":
if "mask_scores" in name:
continue
_UpperCamelCase :Optional[Any] = name[:-6]
_UpperCamelCase :Tuple = model[f"{prefix_}mask_scores"]
_UpperCamelCase :Any = ThresholdBinarizer.apply(snake_case__ , snake_case__ , snake_case__ )
_UpperCamelCase :Dict = tensor * mask
print(f"Pruned layer {name}" )
elif pruning_method == "l0":
if "mask_scores" in name:
continue
_UpperCamelCase :Any = name[:-6]
_UpperCamelCase :List[str] = model[f"{prefix_}mask_scores"]
_UpperCamelCase , _UpperCamelCase :Union[str, Any] = -0.1, 1.1
_UpperCamelCase :Dict = torch.sigmoid(snake_case__ )
_UpperCamelCase :Dict = s * (r - l) + l
_UpperCamelCase :Dict = s_bar.clamp(min=0.0 , max=1.0 )
_UpperCamelCase :Any = tensor * mask
print(f"Pruned layer {name}" )
else:
raise ValueError('''Unknown pruning method''' )
if target_model_path is None:
_UpperCamelCase :Tuple = os.path.join(
os.path.dirname(snake_case__ ) , f"bertarized_{os.path.basename(snake_case__ )}" )
if not os.path.isdir(snake_case__ ):
shutil.copytree(snake_case__ , snake_case__ )
print(f"\nCreated folder {target_model_path}" )
torch.save(snake_case__ , os.path.join(snake_case__ , '''pytorch_model.bin''' ) )
print('''\nPruned model saved! See you later!''' )
if __name__ == "__main__":
UpperCamelCase__ :Optional[Any] = argparse.ArgumentParser()
parser.add_argument(
"""--pruning_method""",
choices=["""l0""", """magnitude""", """topK""", """sigmoied_threshold"""],
type=str,
required=True,
help=(
"""Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning,"""
""" sigmoied_threshold = Soft movement pruning)"""
),
)
parser.add_argument(
"""--threshold""",
type=float,
required=False,
help=(
"""For `magnitude` and `topK`, it is the level of remaining weights (in %) in the fine-pruned model."""
"""For `sigmoied_threshold`, it is the threshold \tau against which the (sigmoied) scores are compared."""
"""Not needed for `l0`"""
),
)
parser.add_argument(
"""--model_name_or_path""",
type=str,
required=True,
help="""Folder containing the model that was previously fine-pruned""",
)
parser.add_argument(
"""--target_model_path""",
default=None,
type=str,
required=False,
help="""Folder containing the model that was previously fine-pruned""",
)
UpperCamelCase__ :Any = parser.parse_args()
main(args)
| 355
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ :str = logging.get_logger(__name__)
UpperCamelCase__ :Optional[int] = {
"""microsoft/markuplm-base""": """https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json""",
"""microsoft/markuplm-large""": """https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json""",
}
class A( lowerCamelCase__ ):
"""simple docstring"""
A = "markuplm"
def __init__( self , SCREAMING_SNAKE_CASE__=3_05_22 , SCREAMING_SNAKE_CASE__=7_68 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=30_72 , SCREAMING_SNAKE_CASE__="gelu" , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=5_12 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=0.0_2 , SCREAMING_SNAKE_CASE__=1E-12 , SCREAMING_SNAKE_CASE__=0 , SCREAMING_SNAKE_CASE__=0 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=2_56 , SCREAMING_SNAKE_CASE__=10_24 , SCREAMING_SNAKE_CASE__=2_16 , SCREAMING_SNAKE_CASE__=10_01 , SCREAMING_SNAKE_CASE__=32 , SCREAMING_SNAKE_CASE__=50 , SCREAMING_SNAKE_CASE__="absolute" , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=None , **SCREAMING_SNAKE_CASE__ , ) -> Optional[Any]:
"""simple docstring"""
super().__init__(
pad_token_id=SCREAMING_SNAKE_CASE__ , bos_token_id=SCREAMING_SNAKE_CASE__ , eos_token_id=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
_UpperCamelCase :Any = vocab_size
_UpperCamelCase :Union[str, Any] = hidden_size
_UpperCamelCase :List[Any] = num_hidden_layers
_UpperCamelCase :int = num_attention_heads
_UpperCamelCase :Tuple = hidden_act
_UpperCamelCase :str = intermediate_size
_UpperCamelCase :Optional[int] = hidden_dropout_prob
_UpperCamelCase :Any = attention_probs_dropout_prob
_UpperCamelCase :Union[str, Any] = max_position_embeddings
_UpperCamelCase :Dict = type_vocab_size
_UpperCamelCase :int = initializer_range
_UpperCamelCase :Optional[int] = layer_norm_eps
_UpperCamelCase :Any = position_embedding_type
_UpperCamelCase :Any = use_cache
_UpperCamelCase :List[str] = classifier_dropout
# additional properties
_UpperCamelCase :Union[str, Any] = max_depth
_UpperCamelCase :Union[str, Any] = max_xpath_tag_unit_embeddings
_UpperCamelCase :Optional[Any] = max_xpath_subs_unit_embeddings
_UpperCamelCase :int = tag_pad_id
_UpperCamelCase :str = subs_pad_id
_UpperCamelCase :List[str] = xpath_unit_hidden_size
| 355
| 1
|
"""simple docstring"""
a_ = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
def a__ ( ) -> None:
_A = input("Enter message: " )
_A = input("Enter key [alphanumeric]: " )
_A = input("Encrypt/Decrypt [e/d]: " )
if mode.lower().startswith("e" ):
_A = "encrypt"
_A = encrypt_message(__lowercase , __lowercase )
elif mode.lower().startswith("d" ):
_A = "decrypt"
_A = decrypt_message(__lowercase , __lowercase )
print(f"""\n{mode.title()}ed message:""" )
print(__lowercase )
def a__ ( __lowercase , __lowercase ) -> str:
return translate_message(__lowercase , __lowercase , "encrypt" )
def a__ ( __lowercase , __lowercase ) -> str:
return translate_message(__lowercase , __lowercase , "decrypt" )
def a__ ( __lowercase , __lowercase , __lowercase ) -> str:
_A = []
_A = 0
_A = key.upper()
for symbol in message:
_A = LETTERS.find(symbol.upper() )
if num != -1:
if mode == "encrypt":
num += LETTERS.find(key[key_index] )
elif mode == "decrypt":
num -= LETTERS.find(key[key_index] )
num %= len(__lowercase )
if symbol.isupper():
translated.append(LETTERS[num] )
elif symbol.islower():
translated.append(LETTERS[num].lower() )
key_index += 1
if key_index == len(__lowercase ):
_A = 0
else:
translated.append(__lowercase )
return "".join(__lowercase )
if __name__ == "__main__":
main()
| 621
|
"""simple docstring"""
import argparse
import torch
from torch import nn
from transformers import MBartConfig, MBartForConditionalGeneration
def a__ ( __lowercase ) -> Optional[int]:
_A = [
"encoder.version",
"decoder.version",
"model.encoder.version",
"model.decoder.version",
"_float_tensor",
"decoder.output_projection.weight",
]
for k in ignore_keys:
state_dict.pop(__lowercase , __lowercase )
def a__ ( __lowercase ) -> List[Any]:
_A , _A = emb.weight.shape
_A = nn.Linear(__lowercase , __lowercase , bias=__lowercase )
_A = emb.weight.data
return lin_layer
def a__ ( __lowercase , __lowercase="facebook/mbart-large-en-ro" , __lowercase=False , __lowercase=False ) -> List[str]:
_A = torch.load(__lowercase , map_location="cpu" )["model"]
remove_ignore_keys_(__lowercase )
_A = state_dict["encoder.embed_tokens.weight"].shape[0]
_A = MBartConfig.from_pretrained(__lowercase , vocab_size=__lowercase )
if mbart_aa and finetuned:
_A = "relu"
_A = state_dict["decoder.embed_tokens.weight"]
_A = MBartForConditionalGeneration(__lowercase )
model.model.load_state_dict(__lowercase )
if finetuned:
_A = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"fairseq_path", type=str, help="bart.large, bart.large.cnn or a path to a model.pt on local filesystem."
)
parser.add_argument("pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument(
"--hf_config",
default="facebook/mbart-large-cc25",
type=str,
help="Which huggingface architecture to use: mbart-large",
)
parser.add_argument("--mbart_50", action="store_true", help="whether the model is mMART-50 checkpoint")
parser.add_argument("--finetuned", action="store_true", help="whether the model is a fine-tuned checkpoint")
a_ = parser.parse_args()
a_ = convert_fairseq_mbart_checkpoint_from_disk(
args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa
)
model.save_pretrained(args.pytorch_dump_folder_path)
| 621
| 1
|
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
torch.set_grad_enabled(False)
def _a ( lowerCAmelCase , lowerCAmelCase=False )-> int:
SCREAMING_SNAKE_CASE_ = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'''module.blocks.{i}.norm1.weight''', F'''vit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((F'''module.blocks.{i}.norm1.bias''', F'''vit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append(
(F'''module.blocks.{i}.attn.proj.weight''', F'''vit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((F'''module.blocks.{i}.attn.proj.bias''', F'''vit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((F'''module.blocks.{i}.norm2.weight''', F'''vit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((F'''module.blocks.{i}.norm2.bias''', F'''vit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((F'''module.blocks.{i}.mlp.fc1.weight''', F'''vit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((F'''module.blocks.{i}.mlp.fc1.bias''', F'''vit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((F'''module.blocks.{i}.mlp.fc2.weight''', F'''vit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((F'''module.blocks.{i}.mlp.fc2.bias''', F'''vit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
('module.cls_token', 'vit.embeddings.cls_token'),
('module.patch_embed.proj.weight', 'vit.embeddings.patch_embeddings.projection.weight'),
('module.patch_embed.proj.bias', 'vit.embeddings.patch_embeddings.projection.bias'),
('module.pos_embed', 'vit.embeddings.position_embeddings'),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('module.norm.weight', 'layernorm.weight'),
('module.norm.bias', 'layernorm.bias'),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
SCREAMING_SNAKE_CASE_ = [(pair[0], pair[1][4:]) if pair[1].startswith('vit' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('norm.weight', 'vit.layernorm.weight'),
('norm.bias', 'vit.layernorm.bias'),
('head.weight', 'classifier.weight'),
('head.bias', 'classifier.bias'),
] )
return rename_keys
def _a ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=False )-> List[str]:
for i in range(config.num_hidden_layers ):
if base_model:
SCREAMING_SNAKE_CASE_ = ""
else:
SCREAMING_SNAKE_CASE_ = "vit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
SCREAMING_SNAKE_CASE_ = state_dict.pop(F'''module.blocks.{i}.attn.qkv.weight''' )
SCREAMING_SNAKE_CASE_ = state_dict.pop(F'''module.blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE_ = in_proj_weight[
: config.hidden_size, :
]
SCREAMING_SNAKE_CASE_ = in_proj_bias[: config.hidden_size]
SCREAMING_SNAKE_CASE_ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
SCREAMING_SNAKE_CASE_ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
SCREAMING_SNAKE_CASE_ = in_proj_weight[
-config.hidden_size :, :
]
SCREAMING_SNAKE_CASE_ = in_proj_bias[-config.hidden_size :]
def _a ( lowerCAmelCase )-> Any:
SCREAMING_SNAKE_CASE_ = ["head.weight", "head.bias"]
for k in ignore_keys:
state_dict.pop(lowerCAmelCase , lowerCAmelCase )
def _a ( lowerCAmelCase )-> Any:
SCREAMING_SNAKE_CASE_ = [
"module.fc.fc1.weight",
"module.fc.fc1.bias",
"module.fc.bn1.weight",
"module.fc.bn1.bias",
"module.fc.bn1.running_mean",
"module.fc.bn1.running_var",
"module.fc.bn1.num_batches_tracked",
"module.fc.fc2.weight",
"module.fc.fc2.bias",
"module.fc.bn2.weight",
"module.fc.bn2.bias",
"module.fc.bn2.running_mean",
"module.fc.bn2.running_var",
"module.fc.bn2.num_batches_tracked",
"module.fc.fc3.weight",
"module.fc.fc3.bias",
]
for k in ignore_keys:
state_dict.pop(lowerCAmelCase , lowerCAmelCase )
def _a ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )-> List[str]:
SCREAMING_SNAKE_CASE_ = dct.pop(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = val
def _a ( lowerCAmelCase , lowerCAmelCase )-> Optional[int]:
SCREAMING_SNAKE_CASE_ = ViTMSNConfig()
SCREAMING_SNAKE_CASE_ = 1000
SCREAMING_SNAKE_CASE_ = "datasets/huggingface/label-files"
SCREAMING_SNAKE_CASE_ = "imagenet-1k-id2label.json"
SCREAMING_SNAKE_CASE_ = json.load(open(hf_hub_download(lowerCAmelCase , lowerCAmelCase ) , 'r' ) )
SCREAMING_SNAKE_CASE_ = {int(lowerCAmelCase ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE_ = idalabel
SCREAMING_SNAKE_CASE_ = {v: k for k, v in idalabel.items()}
if "s16" in checkpoint_url:
SCREAMING_SNAKE_CASE_ = 384
SCREAMING_SNAKE_CASE_ = 1536
SCREAMING_SNAKE_CASE_ = 6
elif "l16" in checkpoint_url:
SCREAMING_SNAKE_CASE_ = 1024
SCREAMING_SNAKE_CASE_ = 4096
SCREAMING_SNAKE_CASE_ = 24
SCREAMING_SNAKE_CASE_ = 16
SCREAMING_SNAKE_CASE_ = 0.1
elif "b4" in checkpoint_url:
SCREAMING_SNAKE_CASE_ = 4
elif "l7" in checkpoint_url:
SCREAMING_SNAKE_CASE_ = 7
SCREAMING_SNAKE_CASE_ = 1024
SCREAMING_SNAKE_CASE_ = 4096
SCREAMING_SNAKE_CASE_ = 24
SCREAMING_SNAKE_CASE_ = 16
SCREAMING_SNAKE_CASE_ = 0.1
SCREAMING_SNAKE_CASE_ = ViTMSNModel(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = torch.hub.load_state_dict_from_url(lowerCAmelCase , map_location='cpu' )["target_encoder"]
SCREAMING_SNAKE_CASE_ = ViTImageProcessor(size=config.image_size )
remove_projection_head(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = create_rename_keys(lowerCAmelCase , base_model=lowerCAmelCase )
for src, dest in rename_keys:
rename_key(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
read_in_q_k_v(lowerCAmelCase , lowerCAmelCase , base_model=lowerCAmelCase )
model.load_state_dict(lowerCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE_ = "http://images.cocodataset.org/val2017/000000039769.jpg"
SCREAMING_SNAKE_CASE_ = Image.open(requests.get(lowerCAmelCase , stream=lowerCAmelCase ).raw )
SCREAMING_SNAKE_CASE_ = ViTImageProcessor(
size=config.image_size , image_mean=lowerCAmelCase , image_std=lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = image_processor(images=lowerCAmelCase , return_tensors='pt' )
# forward pass
torch.manual_seed(2 )
SCREAMING_SNAKE_CASE_ = model(**lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = outputs.last_hidden_state
# The following Colab Notebook was used to generate these outputs:
# https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb
if "s16" in checkpoint_url:
SCREAMING_SNAKE_CASE_ = torch.tensor([[-1.0_9_1_5, -1.4_8_7_6, -1.1_8_0_9]] )
elif "b16" in checkpoint_url:
SCREAMING_SNAKE_CASE_ = torch.tensor([[1_4.2_8_8_9, -1_8.9_0_4_5, 1_1.7_2_8_1]] )
elif "l16" in checkpoint_url:
SCREAMING_SNAKE_CASE_ = torch.tensor([[4_1.5_0_2_8, -2_2.8_6_8_1, 4_5.6_4_7_5]] )
elif "b4" in checkpoint_url:
SCREAMING_SNAKE_CASE_ = torch.tensor([[-4.3_8_6_8, 5.2_9_3_2, -0.4_1_3_7]] )
else:
SCREAMING_SNAKE_CASE_ = torch.tensor([[-0.1_7_9_2, -0.6_4_6_5, 2.4_2_6_3]] )
# verify logits
assert torch.allclose(last_hidden_state[:, 0, :3] , lowerCAmelCase , atol=1E-4 )
print(F'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(lowerCAmelCase )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(lowerCAmelCase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE: str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar''',
type=str,
help='''URL of the checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
SCREAMING_SNAKE_CASE: str = parser.parse_args()
convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 360
|
import os
from bleurt import score # From: git+https://github.com/google-research/bleurt.git
import datasets
SCREAMING_SNAKE_CASE__ = datasets.logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = "\\n@inproceedings{bleurt,\n title={BLEURT: Learning Robust Metrics for Text Generation},\n author={Thibault Sellam and Dipanjan Das and Ankur P. Parikh},\n booktitle={ACL},\n year={2020},\n url={https://arxiv.org/abs/2004.04696}\n}\n"
SCREAMING_SNAKE_CASE__ = "\\nBLEURT a learnt evaluation metric for Natural Language Generation. It is built using multiple phases of transfer learning starting from a pretrained BERT model (Devlin et al. 2018)\nand then employing another pre-training phrase using synthetic data. Finally it is trained on WMT human annotations. You may run BLEURT out-of-the-box or fine-tune\nit for your specific application (the latter is expected to perform better).\n\nSee the project's README at https://github.com/google-research/bleurt#readme for more information.\n"
SCREAMING_SNAKE_CASE__ = "\nBLEURT score.\n\nArgs:\n `predictions` (list of str): prediction/candidate sentences\n `references` (list of str): reference sentences\n `checkpoint` BLEURT checkpoint. Will default to BLEURT-tiny if None.\n\nReturns:\n 'scores': List of scores.\nExamples:\n\n >>> predictions = [\"hello there\", \"general kenobi\"]\n >>> references = [\"hello there\", \"general kenobi\"]\n >>> bleurt = datasets.load_metric(\"bleurt\")\n >>> results = bleurt.compute(predictions=predictions, references=references)\n >>> print([round(v, 2) for v in results[\"scores\"]])\n [1.03, 1.04]\n"
SCREAMING_SNAKE_CASE__ = {
"bleurt-tiny-128": "https://storage.googleapis.com/bleurt-oss/bleurt-tiny-128.zip",
"bleurt-tiny-512": "https://storage.googleapis.com/bleurt-oss/bleurt-tiny-512.zip",
"bleurt-base-128": "https://storage.googleapis.com/bleurt-oss/bleurt-base-128.zip",
"bleurt-base-512": "https://storage.googleapis.com/bleurt-oss/bleurt-base-512.zip",
"bleurt-large-128": "https://storage.googleapis.com/bleurt-oss/bleurt-large-128.zip",
"bleurt-large-512": "https://storage.googleapis.com/bleurt-oss/bleurt-large-512.zip",
"BLEURT-20-D3": "https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D3.zip",
"BLEURT-20-D6": "https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D6.zip",
"BLEURT-20-D12": "https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D12.zip",
"BLEURT-20": "https://storage.googleapis.com/bleurt-oss-21/BLEURT-20.zip",
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _UpperCAmelCase ( datasets.Metric ):
def _snake_case ( self : List[str]):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="https://github.com/google-research/bleurt" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence"),
"references": datasets.Value("string" , id="sequence"),
}) , codebase_urls=["https://github.com/google-research/bleurt"] , reference_urls=["https://github.com/google-research/bleurt", "https://arxiv.org/abs/2004.04696"] , )
def _snake_case ( self : Any , UpperCAmelCase : Any):
# check that config name specifies a valid BLEURT model
if self.config_name == "default":
logger.warning(
"Using default BLEURT-Base checkpoint for sequence maximum length 128. "
"You can use a bigger model for better results with e.g.: datasets.load_metric('bleurt', 'bleurt-large-512').")
SCREAMING_SNAKE_CASE_ :str = "bleurt-base-128"
if self.config_name.lower() in CHECKPOINT_URLS:
SCREAMING_SNAKE_CASE_ :Dict = self.config_name.lower()
elif self.config_name.upper() in CHECKPOINT_URLS:
SCREAMING_SNAKE_CASE_ :Dict = self.config_name.upper()
else:
raise KeyError(
F"{self.config_name} model not found. You should supply the name of a model checkpoint for bleurt in {CHECKPOINT_URLS.keys()}")
# download the model checkpoint specified by self.config_name and set up the scorer
SCREAMING_SNAKE_CASE_ :Optional[Any] = dl_manager.download_and_extract(CHECKPOINT_URLS[checkpoint_name])
SCREAMING_SNAKE_CASE_ :Dict = score.BleurtScorer(os.path.join(UpperCAmelCase , UpperCAmelCase))
def _snake_case ( self : Union[str, Any] , UpperCAmelCase : List[str] , UpperCAmelCase : Union[str, Any]):
SCREAMING_SNAKE_CASE_ :Optional[int] = self.scorer.score(references=UpperCAmelCase , candidates=UpperCAmelCase)
return {"scores": scores}
| 631
| 0
|
'''simple docstring'''
from __future__ import annotations
def UpperCamelCase__ ( _lowercase : Tuple ) -> list[int]: # This function is recursive
__UpperCAmelCase: int = len(_lowercase )
# If the array contains only one element, we return it (it's the stop condition of
# recursion)
if array_length <= 1:
return array
# Else
__UpperCAmelCase: str = array[0]
__UpperCAmelCase: Optional[Any] = False
__UpperCAmelCase: Any = 1
__UpperCAmelCase: list[int] = []
while not is_found and i < array_length:
if array[i] < pivot:
__UpperCAmelCase: Union[str, Any] = True
__UpperCAmelCase: List[str] = [element for element in array[i:] if element >= array[i]]
__UpperCAmelCase: Union[str, Any] = longest_subsequence(_lowercase )
if len(_lowercase ) > len(_lowercase ):
__UpperCAmelCase: List[str] = temp_array
else:
i += 1
__UpperCAmelCase: List[str] = [element for element in array[1:] if element >= pivot]
__UpperCAmelCase: List[str] = [pivot, *longest_subsequence(_lowercase )]
if len(_lowercase ) > len(_lowercase ):
return temp_array
else:
return longest_subseq
if __name__ == "__main__":
import doctest
doctest.testmod()
| 719
|
'''simple docstring'''
import numpy
# List of input, output pairs
SCREAMING_SNAKE_CASE_ = (
((5, 2, 3), 15),
((6, 5, 9), 25),
((11, 12, 13), 41),
((1, 1, 1), 8),
((11, 12, 13), 41),
)
SCREAMING_SNAKE_CASE_ = (((5_15, 22, 13), 5_55), ((61, 35, 49), 1_50))
SCREAMING_SNAKE_CASE_ = [2, 4, 1, 5]
SCREAMING_SNAKE_CASE_ = len(train_data)
SCREAMING_SNAKE_CASE_ = 0.009
def UpperCamelCase__ ( _lowercase : Union[str, Any] , _lowercase : List[Any]="train" ) -> int:
return calculate_hypothesis_value(_lowercase , _lowercase ) - output(
_lowercase , _lowercase )
def UpperCamelCase__ ( _lowercase : Dict ) -> Optional[Any]:
__UpperCAmelCase: List[Any] = 0
for i in range(len(_lowercase ) - 1 ):
hyp_val += data_input_tuple[i] * parameter_vector[i + 1]
hyp_val += parameter_vector[0]
return hyp_val
def UpperCamelCase__ ( _lowercase : List[str] , _lowercase : List[str] ) -> Optional[Any]:
if data_set == "train":
return train_data[example_no][1]
elif data_set == "test":
return test_data[example_no][1]
return None
def UpperCamelCase__ ( _lowercase : Dict , _lowercase : str ) -> Dict:
if data_set == "train":
return _hypothesis_value(train_data[example_no][0] )
elif data_set == "test":
return _hypothesis_value(test_data[example_no][0] )
return None
def UpperCamelCase__ ( _lowercase : Optional[int] , _lowercase : Union[str, Any]=m ) -> Optional[Any]:
__UpperCAmelCase: int = 0
for i in range(_lowercase ):
if index == -1:
summation_value += _error(_lowercase )
else:
summation_value += _error(_lowercase ) * train_data[i][0][index]
return summation_value
def UpperCamelCase__ ( _lowercase : Tuple ) -> Optional[int]:
__UpperCAmelCase: Any = summation_of_cost_derivative(_lowercase , _lowercase ) / m
return cost_derivative_value
def UpperCamelCase__ ( ) -> Tuple:
global parameter_vector
# Tune these values to set a tolerance value for predicted output
__UpperCAmelCase: str = 0.00_00_02
__UpperCAmelCase: List[Any] = 0
__UpperCAmelCase: Optional[Any] = 0
while True:
j += 1
__UpperCAmelCase: Any = [0, 0, 0, 0]
for i in range(0 , len(_lowercase ) ):
__UpperCAmelCase: str = get_cost_derivative(i - 1 )
__UpperCAmelCase: List[str] = (
parameter_vector[i] - LEARNING_RATE * cost_derivative
)
if numpy.allclose(
_lowercase , _lowercase , atol=_lowercase , rtol=_lowercase , ):
break
__UpperCAmelCase: List[Any] = temp_parameter_vector
print(("""Number of iterations:""", j) )
def UpperCamelCase__ ( ) -> Dict:
for i in range(len(_lowercase ) ):
print(("""Actual output value:""", output(_lowercase , """test""" )) )
print(("""Hypothesis output:""", calculate_hypothesis_value(_lowercase , """test""" )) )
if __name__ == "__main__":
run_gradient_descent()
print('\nTesting gradient descent for a linear hypothesis function.\n')
test_gradient_descent()
| 466
| 0
|
"""simple docstring"""
# Note: if you intend to run this script make sure you look under scripts/fsmt/
# to locate the appropriate script to do the work correctly. There is a set of scripts to:
# - download and prepare data and run the conversion script
# - perform eval to get the best hparam into the config
# - generate model_cards - useful if you have multiple models from the same paper
import argparse
import json
import os
import re
from collections import OrderedDict
from os.path import basename, dirname
import fairseq
import torch
from fairseq import hub_utils
from fairseq.data.dictionary import Dictionary
from transformers import FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
__UpperCAmelCase = 2
# based on the results of a search on a range of `num_beams`, `length_penalty` and `early_stopping`
# values against wmt19 test data to obtain the best BLEU scores, we will use the following defaults:
#
# * `num_beams`: 5 (higher scores better, but requires more memory/is slower, can be adjusted by users)
# * `early_stopping`: `False` consistently scored better
# * `length_penalty` varied, so will assign the best one depending on the model
__UpperCAmelCase = {
# fairseq:
'''wmt19-ru-en''': {'''length_penalty''': 1.1},
'''wmt19-en-ru''': {'''length_penalty''': 1.15},
'''wmt19-en-de''': {'''length_penalty''': 1.0},
'''wmt19-de-en''': {'''length_penalty''': 1.1},
# allenai:
'''wmt16-en-de-dist-12-1''': {'''length_penalty''': 0.6},
'''wmt16-en-de-dist-6-1''': {'''length_penalty''': 0.6},
'''wmt16-en-de-12-1''': {'''length_penalty''': 0.8},
'''wmt19-de-en-6-6-base''': {'''length_penalty''': 0.6},
'''wmt19-de-en-6-6-big''': {'''length_penalty''': 0.6},
}
# this remaps the different models to their organization names
__UpperCAmelCase = {}
for m in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
__UpperCAmelCase = '''facebook'''
for m in [
"wmt16-en-de-dist-12-1",
"wmt16-en-de-dist-6-1",
"wmt16-en-de-12-1",
"wmt19-de-en-6-6-base",
"wmt19-de-en-6-6-big",
]:
__UpperCAmelCase = '''allenai'''
def lowercase__ ( lowerCAmelCase__ : Tuple ) -> Optional[Any]:
'''simple docstring'''
# (1) remove word breaking symbol, (2) add word ending symbol where the word is not broken up,
# e.g.: d = {'le@@': 5, 'tt@@': 6, 'er': 7} => {'le': 5, 'tt': 6, 'er</w>': 7}
a__ : List[str] = dict((re.sub(R"@@$" , "" , _a ), v) if k.endswith("@@" ) else (re.sub(R"$" , "</w>" , _a ), v) for k, v in d.items() )
a__ : Optional[Any] = "<s> <pad> </s> <unk>".split()
# restore the special tokens
for k in keep_keys:
del da[F"{k}</w>"]
a__ : List[str] = d[k] # restore
return da
def lowercase__ ( lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : str ) -> List[str]:
'''simple docstring'''
# prep
assert os.path.exists(_a )
os.makedirs(_a , exist_ok=_a )
print(F"Writing results to {pytorch_dump_folder_path}" )
# handle various types of models
a__ : int = basename(_a )
a__ : str = dirname(_a )
a__ : Tuple = fairseq.model_parallel.models.transformer.ModelParallelTransformerModel
a__ : str = cls.hub_models()
a__ : Optional[int] = {"bpe": "fastbpe", "tokenizer": "moses"}
a__ : Optional[int] = "."
# note: since the model dump is old, fairseq has upgraded its model some
# time later, and it does a whole lot of rewrites and splits on the saved
# weights, therefore we can't use torch.load() directly on the model file.
# see: upgrade_state_dict(state_dict) in fairseq_model.py
print(F"using checkpoint {checkpoint_file}" )
a__ : Dict = hub_utils.from_pretrained(
_a , _a , _a , archive_map=_a , **_a )
a__ : List[Any] = vars(chkpt["args"]["model"] )
a__ : List[Any] = args["source_lang"]
a__ : Dict = args["target_lang"]
a__ : List[Any] = dirname(_a )
a__ : Optional[int] = basename(_a )
# dicts
a__ : Optional[Any] = os.path.join(_a , F"dict.{src_lang}.txt" )
a__ : int = os.path.join(_a , F"dict.{tgt_lang}.txt" )
a__ : Tuple = Dictionary.load(_a )
a__ : Optional[Any] = rewrite_dict_keys(src_dict.indices )
a__ : Tuple = len(_a )
a__ : Any = os.path.join(_a , "vocab-src.json" )
print(F"Generating {src_vocab_file} of {src_vocab_size} of {src_lang} records" )
with open(_a , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(_a , ensure_ascii=_a , indent=_a ) )
# detect whether this is a do_lower_case situation, which can be derived by checking whether we
# have at least one uppercase letter in the source vocab
a__ : Tuple = True
for k in src_vocab.keys():
if not k.islower():
a__ : Tuple = False
break
a__ : Any = Dictionary.load(_a )
a__ : Tuple = rewrite_dict_keys(tgt_dict.indices )
a__ : List[str] = len(_a )
a__ : List[Any] = os.path.join(_a , "vocab-tgt.json" )
print(F"Generating {tgt_vocab_file} of {tgt_vocab_size} of {tgt_lang} records" )
with open(_a , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(_a , ensure_ascii=_a , indent=_a ) )
# merges_file (bpecodes)
a__ : str = os.path.join(_a , VOCAB_FILES_NAMES["merges_file"] )
for fn in ["bpecodes", "code"]: # older fairseq called the merges file "code"
a__ : int = os.path.join(_a , _a )
if os.path.exists(_a ):
break
with open(_a , encoding="utf-8" ) as fin:
a__ : List[Any] = fin.read()
a__ : List[str] = re.sub(R" \d+$" , "" , _a , 0 , re.M ) # remove frequency number
print(F"Generating {merges_file}" )
with open(_a , "w" , encoding="utf-8" ) as fout:
fout.write(_a )
# model config
a__ : List[Any] = os.path.join(_a , "config.json" )
# validate bpe/tokenizer config, as currently it's hardcoded to moses+fastbpe -
# may have to modify the tokenizer if a different type is used by a future model
assert args["bpe"] == "fastbpe", F"need to extend tokenizer to support bpe={args['bpe']}"
assert args["tokenizer"] == "moses", F"need to extend tokenizer to support bpe={args['tokenizer']}"
a__ : str = {
"architectures": ["FSMTForConditionalGeneration"],
"model_type": "fsmt",
"activation_dropout": args["activation_dropout"],
"activation_function": "relu",
"attention_dropout": args["attention_dropout"],
"d_model": args["decoder_embed_dim"],
"dropout": args["dropout"],
"init_std": 0.02,
"max_position_embeddings": args["max_source_positions"],
"num_hidden_layers": args["encoder_layers"],
"src_vocab_size": src_vocab_size,
"tgt_vocab_size": tgt_vocab_size,
"langs": [src_lang, tgt_lang],
"encoder_attention_heads": args["encoder_attention_heads"],
"encoder_ffn_dim": args["encoder_ffn_embed_dim"],
"encoder_layerdrop": args["encoder_layerdrop"],
"encoder_layers": args["encoder_layers"],
"decoder_attention_heads": args["decoder_attention_heads"],
"decoder_ffn_dim": args["decoder_ffn_embed_dim"],
"decoder_layerdrop": args["decoder_layerdrop"],
"decoder_layers": args["decoder_layers"],
"bos_token_id": 0,
"pad_token_id": 1,
"eos_token_id": 2,
"is_encoder_decoder": True,
"scale_embedding": not args["no_scale_embedding"],
"tie_word_embeddings": args["share_all_embeddings"],
}
# good hparam defaults to start with
a__ : Any = 5
a__ : Tuple = False
if model_dir in best_score_hparams and "length_penalty" in best_score_hparams[model_dir]:
a__ : List[str] = best_score_hparams[model_dir]["length_penalty"]
else:
a__ : Tuple = 1.0
print(F"Generating {fsmt_model_config_file}" )
with open(_a , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(_a , ensure_ascii=_a , indent=_a ) )
# tokenizer config
a__ : int = os.path.join(_a , _a )
a__ : List[str] = {
"langs": [src_lang, tgt_lang],
"model_max_length": 1_0_2_4,
"do_lower_case": do_lower_case,
}
print(F"Generating {fsmt_tokenizer_config_file}" )
with open(_a , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(_a , ensure_ascii=_a , indent=_a ) )
# model
a__ : str = chkpt["models"][0]
a__ : Tuple = model.state_dict()
# rename keys to start with 'model.'
a__ : Tuple = OrderedDict(("model." + k, v) for k, v in model_state_dict.items() )
# remove unneeded keys
a__ : Tuple = [
"model.model",
"model.encoder.version",
"model.decoder.version",
"model.encoder_embed_tokens.weight",
"model.decoder_embed_tokens.weight",
"model.encoder.embed_positions._float_tensor",
"model.decoder.embed_positions._float_tensor",
]
for k in ignore_keys:
model_state_dict.pop(_a , _a )
a__ : List[Any] = FSMTConfig.from_pretrained(_a )
a__ : Optional[Any] = FSMTForConditionalGeneration(_a )
# check that it loads ok
model_new.load_state_dict(_a , strict=_a )
# save
a__ : str = os.path.join(_a , _a )
print(F"Generating {pytorch_weights_dump_path}" )
torch.save(_a , _a )
print("Conversion is done!" )
print("\nLast step is to upload the files to s3" )
print(F"cd {data_root}" )
print(F"transformers-cli upload {model_dir}" )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--fsmt_checkpoint_path''',
default=None,
type=str,
required=True,
help=(
'''Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,'''
''' bpecodes, etc.'''
),
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
__UpperCAmelCase = parser.parse_args()
convert_fsmt_checkpoint_to_pytorch(args.fsmt_checkpoint_path, args.pytorch_dump_folder_path)
| 642
|
def lowercase ( _a ,_a ) -> str:
if number < 0 or shift_amount < 0:
raise ValueError("both inputs must be positive integers" )
UpperCAmelCase_: str = str(bin(_a ) )
binary_number += "0" * shift_amount
return binary_number
def lowercase ( _a ,_a ) -> str:
if number < 0 or shift_amount < 0:
raise ValueError("both inputs must be positive integers" )
UpperCAmelCase_: Union[str, Any] = str(bin(_a ) )[2:]
if shift_amount >= len(_a ):
return "0b0"
UpperCAmelCase_: List[str] = binary_number[: len(_a ) - shift_amount]
return "0b" + shifted_binary_number
def lowercase ( _a ,_a ) -> str:
if number >= 0: # Get binary representation of positive number
UpperCAmelCase_: Dict = "0" + str(bin(_a ) ).strip("-" )[2:]
else: # Get binary (2's complement) representation of negative number
UpperCAmelCase_: Optional[int] = len(bin(_a )[3:] ) # Find 2's complement of number
UpperCAmelCase_: Dict = bin(abs(_a ) - (1 << binary_number_length) )[3:]
UpperCAmelCase_: Union[str, Any] = (
"1" + "0" * (binary_number_length - len(_a )) + binary_number
)
if shift_amount >= len(_a ):
return "0b" + binary_number[0] * len(_a )
return (
"0b"
+ binary_number[0] * shift_amount
+ binary_number[: len(_a ) - shift_amount]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 137
| 0
|
'''simple docstring'''
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class __A ( UpperCamelCase__ ):
a__ : Any = """ClapFeatureExtractor"""
a__ : Tuple = ("""RobertaTokenizer""", """RobertaTokenizerFast""")
def __init__(self : Dict , __a : Any , __a : Optional[int] ):
super().__init__(__a , __a )
def __call__(self : List[str] , __a : Any=None , __a : Optional[Any]=None , __a : int=None , **__a : Any ):
UpperCAmelCase_ = kwargs.pop("sampling_rate" , __a )
if text is None and audios is None:
raise ValueError("You have to specify either text or audios. Both cannot be none." )
if text is not None:
UpperCAmelCase_ = self.tokenizer(__a , return_tensors=__a , **__a )
if audios is not None:
UpperCAmelCase_ = self.feature_extractor(
__a , sampling_rate=__a , return_tensors=__a , **__a )
if text is not None and audios is not None:
UpperCAmelCase_ = audio_features.input_features
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__a ) , tensor_type=__a )
def _lowercase (self : Union[str, Any] , *__a : int , **__a : Tuple ):
return self.tokenizer.batch_decode(*__a , **__a )
def _lowercase (self : Dict , *__a : Dict , **__a : int ):
return self.tokenizer.decode(*__a , **__a )
@property
def _lowercase (self : List[str] ):
UpperCAmelCase_ = self.tokenizer.model_input_names
UpperCAmelCase_ = self.feature_extractor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + feature_extractor_input_names ) )
| 415
|
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
SCREAMING_SNAKE_CASE_: Union[str, Any] =logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_: Optional[Any] ={
'ut/deta': 'https://huggingface.co/ut/deta/resolve/main/config.json',
}
class __A ( UpperCamelCase__ ):
a__ : Tuple = """deta"""
a__ : Dict = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__(self : List[str] , __a : Optional[int]=None , __a : List[str]=900 , __a : Optional[Any]=2048 , __a : Tuple=6 , __a : Dict=2048 , __a : Dict=8 , __a : List[Any]=6 , __a : Tuple=1024 , __a : int=8 , __a : Union[str, Any]=0.0 , __a : Dict=True , __a : Any="relu" , __a : Any=256 , __a : Optional[int]=0.1 , __a : Union[str, Any]=0.0 , __a : Optional[int]=0.0 , __a : Tuple=0.02 , __a : Dict=1.0 , __a : int=True , __a : List[str]=False , __a : Any="sine" , __a : Optional[int]=5 , __a : List[str]=4 , __a : Dict=4 , __a : int=True , __a : Tuple=300 , __a : int=True , __a : Tuple=True , __a : int=1 , __a : List[str]=5 , __a : str=2 , __a : Any=1 , __a : Optional[Any]=1 , __a : Optional[Any]=5 , __a : List[str]=2 , __a : Optional[Any]=0.1 , __a : Tuple=0.25 , **__a : Union[str, Any] , ):
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
UpperCAmelCase_ = CONFIG_MAPPING["resnet"](out_features=["stage2", "stage3", "stage4"] )
else:
if isinstance(__a , __a ):
UpperCAmelCase_ = backbone_config.pop("model_type" )
UpperCAmelCase_ = CONFIG_MAPPING[backbone_model_type]
UpperCAmelCase_ = config_class.from_dict(__a )
UpperCAmelCase_ = backbone_config
UpperCAmelCase_ = num_queries
UpperCAmelCase_ = max_position_embeddings
UpperCAmelCase_ = d_model
UpperCAmelCase_ = encoder_ffn_dim
UpperCAmelCase_ = encoder_layers
UpperCAmelCase_ = encoder_attention_heads
UpperCAmelCase_ = decoder_ffn_dim
UpperCAmelCase_ = decoder_layers
UpperCAmelCase_ = decoder_attention_heads
UpperCAmelCase_ = dropout
UpperCAmelCase_ = attention_dropout
UpperCAmelCase_ = activation_dropout
UpperCAmelCase_ = activation_function
UpperCAmelCase_ = init_std
UpperCAmelCase_ = init_xavier_std
UpperCAmelCase_ = encoder_layerdrop
UpperCAmelCase_ = auxiliary_loss
UpperCAmelCase_ = position_embedding_type
# deformable attributes
UpperCAmelCase_ = num_feature_levels
UpperCAmelCase_ = encoder_n_points
UpperCAmelCase_ = decoder_n_points
UpperCAmelCase_ = two_stage
UpperCAmelCase_ = two_stage_num_proposals
UpperCAmelCase_ = with_box_refine
UpperCAmelCase_ = assign_first_stage
if two_stage is True and with_box_refine is False:
raise ValueError("If two_stage is True, with_box_refine must be True." )
# Hungarian matcher
UpperCAmelCase_ = class_cost
UpperCAmelCase_ = bbox_cost
UpperCAmelCase_ = giou_cost
# Loss coefficients
UpperCAmelCase_ = mask_loss_coefficient
UpperCAmelCase_ = dice_loss_coefficient
UpperCAmelCase_ = bbox_loss_coefficient
UpperCAmelCase_ = giou_loss_coefficient
UpperCAmelCase_ = eos_coefficient
UpperCAmelCase_ = focal_alpha
super().__init__(is_encoder_decoder=__a , **__a )
@property
def _lowercase (self : Dict ):
return self.encoder_attention_heads
@property
def _lowercase (self : Any ):
return self.d_model
def _lowercase (self : int ):
UpperCAmelCase_ = copy.deepcopy(self.__dict__ )
UpperCAmelCase_ = self.backbone_config.to_dict()
UpperCAmelCase_ = self.__class__.model_type
return output
| 415
| 1
|
def A ( _lowercase , _lowercase ):
SCREAMING_SNAKE_CASE : Dict = ''''''
for word_or_phrase in separated:
if not isinstance(_lowercase , _lowercase ):
raise Exception('''join() accepts only strings to be joined''' )
joined += word_or_phrase + separator
return joined.strip(_lowercase )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 248
|
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModel, PreTrainedModel
from ...utils import logging
__UpperCamelCase : List[Any] = logging.get_logger(__name__)
def A ( _lowercase , _lowercase ):
SCREAMING_SNAKE_CASE : Union[str, Any] = nn.functional.normalize(_lowercase )
SCREAMING_SNAKE_CASE : List[str] = nn.functional.normalize(_lowercase )
return torch.mm(_lowercase , normalized_text_embeds.t() )
class lowercase__ ( UpperCamelCase_):
UpperCamelCase_ = CLIPConfig
UpperCamelCase_ = ["""CLIPEncoderLayer"""]
def __init__( self : str , UpperCamelCase__ : CLIPConfig ):
'''simple docstring'''
super().__init__(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[int] = CLIPVisionModel(config.vision_config )
SCREAMING_SNAKE_CASE : Tuple = nn.Linear(config.vision_config.hidden_size , config.projection_dim , bias=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = nn.Parameter(torch.ones(17 , config.projection_dim ) , requires_grad=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Dict = nn.Parameter(torch.ones(3 , config.projection_dim ) , requires_grad=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Any = nn.Parameter(torch.ones(17 ) , requires_grad=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : str = nn.Parameter(torch.ones(3 ) , requires_grad=UpperCamelCase__ )
@torch.no_grad()
def __A ( self : Union[str, Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.vision_model(UpperCamelCase__ )[1] # pooled_output
SCREAMING_SNAKE_CASE : Any = self.visual_projection(UpperCamelCase__ )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
SCREAMING_SNAKE_CASE : str = cosine_distance(UpperCamelCase__ , self.special_care_embeds ).cpu().float().numpy()
SCREAMING_SNAKE_CASE : Optional[int] = cosine_distance(UpperCamelCase__ , self.concept_embeds ).cpu().float().numpy()
SCREAMING_SNAKE_CASE : Union[str, Any] = []
SCREAMING_SNAKE_CASE : str = image_embeds.shape[0]
for i in range(UpperCamelCase__ ):
SCREAMING_SNAKE_CASE : Dict = {'''special_scores''': {}, '''special_care''': [], '''concept_scores''': {}, '''bad_concepts''': []}
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign images
SCREAMING_SNAKE_CASE : Optional[Any] = 0.0
for concept_idx in range(len(special_cos_dist[0] ) ):
SCREAMING_SNAKE_CASE : Dict = special_cos_dist[i][concept_idx]
SCREAMING_SNAKE_CASE : Union[str, Any] = self.special_care_embeds_weights[concept_idx].item()
SCREAMING_SNAKE_CASE : Optional[int] = round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["special_scores"][concept_idx] > 0:
result_img["special_care"].append({concept_idx, result_img['''special_scores'''][concept_idx]} )
SCREAMING_SNAKE_CASE : Optional[Any] = 0.01
for concept_idx in range(len(cos_dist[0] ) ):
SCREAMING_SNAKE_CASE : Optional[int] = cos_dist[i][concept_idx]
SCREAMING_SNAKE_CASE : List[str] = self.concept_embeds_weights[concept_idx].item()
SCREAMING_SNAKE_CASE : Dict = round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["concept_scores"][concept_idx] > 0:
result_img["bad_concepts"].append(UpperCamelCase__ )
result.append(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[Any] = [len(res['''bad_concepts'''] ) > 0 for res in result]
return images, has_nsfw_concepts
@torch.no_grad()
def __A ( self : int , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : torch.FloatTensor ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.vision_model(UpperCamelCase__ )[1] # pooled_output
SCREAMING_SNAKE_CASE : Union[str, Any] = self.visual_projection(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : str = cosine_distance(UpperCamelCase__ , self.special_care_embeds )
SCREAMING_SNAKE_CASE : Any = cosine_distance(UpperCamelCase__ , self.concept_embeds )
# increase this value to create a stronger `nsfw` filter
# at the cost of increasing the possibility of filtering benign images
SCREAMING_SNAKE_CASE : int = 0.0
SCREAMING_SNAKE_CASE : Union[str, Any] = special_cos_dist - self.special_care_embeds_weights + adjustment
# special_scores = special_scores.round(decimals=3)
SCREAMING_SNAKE_CASE : Any = torch.any(special_scores > 0 , dim=1 )
SCREAMING_SNAKE_CASE : Any = special_care * 0.01
SCREAMING_SNAKE_CASE : Optional[int] = special_adjustment.unsqueeze(1 ).expand(-1 , cos_dist.shape[1] )
SCREAMING_SNAKE_CASE : List[str] = (cos_dist - self.concept_embeds_weights) + special_adjustment
# concept_scores = concept_scores.round(decimals=3)
SCREAMING_SNAKE_CASE : Tuple = torch.any(concept_scores > 0 , dim=1 )
return images, has_nsfw_concepts
| 248
| 1
|
import argparse
import logging
import pickle
import random
import time
import numpy as np
from transformers import BertTokenizer, GPTaTokenizer, RobertaTokenizer
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO
)
__A =logging.getLogger(__name__)
def lowerCamelCase_ ( ):
lowerCamelCase_ = argparse.ArgumentParser(
description="Preprocess the data to avoid re-doing it several times by (tokenization + token_to_ids)." )
parser.add_argument("--file_path" , type=lowerCamelCase__ , default="data/dump.txt" , help="The path to the data." )
parser.add_argument("--tokenizer_type" , type=lowerCamelCase__ , default="bert" , choices=["bert", "roberta", "gpt2"] )
parser.add_argument("--tokenizer_name" , type=lowerCamelCase__ , default="bert-base-uncased" , help="The tokenizer to use." )
parser.add_argument("--dump_file" , type=lowerCamelCase__ , default="data/dump" , help="The dump file prefix." )
lowerCamelCase_ = parser.parse_args()
logger.info(F'Loading Tokenizer ({args.tokenizer_name})' )
if args.tokenizer_type == "bert":
lowerCamelCase_ = BertTokenizer.from_pretrained(args.tokenizer_name )
lowerCamelCase_ = tokenizer.special_tokens_map["cls_token"] # `[CLS]`
lowerCamelCase_ = tokenizer.special_tokens_map["sep_token"] # `[SEP]`
elif args.tokenizer_type == "roberta":
lowerCamelCase_ = RobertaTokenizer.from_pretrained(args.tokenizer_name )
lowerCamelCase_ = tokenizer.special_tokens_map["cls_token"] # `<s>`
lowerCamelCase_ = tokenizer.special_tokens_map["sep_token"] # `</s>`
elif args.tokenizer_type == "gpt2":
lowerCamelCase_ = GPTaTokenizer.from_pretrained(args.tokenizer_name )
lowerCamelCase_ = tokenizer.special_tokens_map["bos_token"] # `<|endoftext|>`
lowerCamelCase_ = tokenizer.special_tokens_map["eos_token"] # `<|endoftext|>`
logger.info(F'Loading text from {args.file_path}' )
with open(args.file_path , "r" , encoding="utf8" ) as fp:
lowerCamelCase_ = fp.readlines()
logger.info("Start encoding" )
logger.info(F'{len(lowerCamelCase__ )} examples to process.' )
lowerCamelCase_ = []
lowerCamelCase_ = 0
lowerCamelCase_ = 1_0_0_0_0
lowerCamelCase_ = time.time()
for text in data:
lowerCamelCase_ = F'{bos} {text.strip()} {sep}'
lowerCamelCase_ = tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ )
rslt.append(lowerCamelCase__ )
iter += 1
if iter % interval == 0:
lowerCamelCase_ = time.time()
logger.info(F'{iter} examples processed. - {(end-start):.2f}s/{interval}expl' )
lowerCamelCase_ = time.time()
logger.info("Finished binarization" )
logger.info(F'{len(lowerCamelCase__ )} examples processed.' )
lowerCamelCase_ = F'{args.dump_file}.{args.tokenizer_name}.pickle'
lowerCamelCase_ = tokenizer.vocab_size
if vocab_size < (1 << 1_6):
lowerCamelCase_ = [np.uintaa(lowerCamelCase__ ) for d in rslt]
else:
lowerCamelCase_ = [np.intaa(lowerCamelCase__ ) for d in rslt]
random.shuffle(rslt_ )
logger.info(F'Dump to {dp_file}' )
with open(lowerCamelCase__ , "wb" ) as handle:
pickle.dump(rslt_ , lowerCamelCase__ , protocol=pickle.HIGHEST_PROTOCOL )
if __name__ == "__main__":
main()
| 313
|
from __future__ import annotations
from numpy import array, cos, cross, floataa, radians, sin
from numpy.typing import NDArray
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = False ):
if radian_mode:
return [magnitude * cos(lowerCamelCase__ ), magnitude * sin(lowerCamelCase__ )]
return [magnitude * cos(radians(lowerCamelCase__ ) ), magnitude * sin(radians(lowerCamelCase__ ) )]
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = 1_0**-1 ):
lowerCamelCase_ = cross(lowerCamelCase__ , lowerCamelCase__ )
lowerCamelCase_ = sum(lowerCamelCase__ )
return abs(lowerCamelCase__ ) < eps
if __name__ == "__main__":
# Test to check if it works
__A =array(
[
polar_force(718.4, 1_8_0 - 3_0),
polar_force(879.54, 4_5),
polar_force(1_0_0, -9_0),
]
)
__A =array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem 1 in image_data/2D_problems.jpg
__A =array(
[
polar_force(3_0 * 9.81, 1_5),
polar_force(2_1_5, 1_8_0 - 4_5),
polar_force(2_6_4, 9_0 - 3_0),
]
)
__A =array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem in image_data/2D_problems_1.jpg
__A =array([[0, -2_0_0_0], [0, -1_2_0_0], [0, 1_5_6_0_0], [0, -1_2_4_0_0]])
__A =array([[0, 0], [6, 0], [1_0, 0], [1_2, 0]])
assert in_static_equilibrium(forces, location)
import doctest
doctest.testmod()
| 313
| 1
|
'''simple docstring'''
from __future__ import annotations
from typing import Any
def A_ ( SCREAMING_SNAKE_CASE_ ) ->int:
if not postfix_notation:
return 0
lowercase_ = {"""+""", """-""", """*""", """/"""}
lowercase_ = []
for token in postfix_notation:
if token in operations:
lowercase_ , lowercase_ = stack.pop(), stack.pop()
if token == "+":
stack.append(a + b )
elif token == "-":
stack.append(a - b )
elif token == "*":
stack.append(a * b )
else:
if a * b < 0 and a % b != 0:
stack.append(a // b + 1 )
else:
stack.append(a // b )
else:
stack.append(int(SCREAMING_SNAKE_CASE_ ) )
return stack.pop()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 451
|
'''simple docstring'''
import os
import re
import unicodedata
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import is_torch_available, logging
if is_torch_available():
import torch
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__snake_case = logging.get_logger(__name__)
__snake_case = {"""vocab_file""": """spiece.model"""}
__snake_case = {
"""vocab_file""": {
"""AI-Sweden/gpt-sw3-126m""": """https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model""",
"""AI-Sweden/gpt-sw3-350m""": """https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model""",
"""AI-Sweden/gpt-sw3-1.6b""": """https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model""",
"""AI-Sweden/gpt-sw3-6.7b""": """https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model""",
"""AI-Sweden/gpt-sw3-20b""": """https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model""",
}
}
__snake_case = {
"""AI-Sweden/gpt-sw3-126m""": 2048,
"""AI-Sweden/gpt-sw3-350m""": 2048,
"""AI-Sweden/gpt-sw3-1.6b""": 2048,
"""AI-Sweden/gpt-sw3-6.7b""": 2048,
"""AI-Sweden/gpt-sw3-20b""": 2048,
}
class _a ( __a ):
"""simple docstring"""
A_ = VOCAB_FILES_NAMES
A_ = PRETRAINED_VOCAB_FILES_MAP
A_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A_ = ['''input_ids''', '''attention_mask''']
def __init__( self : str , lowercase_ : List[str] , lowercase_ : List[str]=False , lowercase_ : List[str]=False , lowercase_ : Optional[int]=False , lowercase_ : Dict=None , lowercase_ : Union[str, Any]=None , lowercase_ : List[Any]=None , lowercase_ : Union[str, Any]=None , lowercase_ : Optional[Dict[str, Any]] = None , **lowercase_ : Tuple , ):
'''simple docstring'''
lowercase_ = {} if sp_model_kwargs is None else sp_model_kwargs
lowercase_ = kwargs.get("""name_or_path""" )
if name_or_path is None:
logger.warning(
"""name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,"""
""" you are testing the model, this can safely be ignored""" )
lowercase_ = """None"""
# Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing
lowercase_ = """<|endoftext|>""" if eos_token is None else eos_token
lowercase_ = """<unk>""" if unk_token is None else unk_token
if "gpt-sw3-7b" in name_or_path:
lowercase_ = unk_token if pad_token is None else pad_token
lowercase_ = eos_token if bos_token is None else bos_token
else:
lowercase_ = """<pad>""" if pad_token is None else pad_token
lowercase_ = """<s>""" if bos_token is None else bos_token
super().__init__(
do_lower_case=lowercase_ , remove_space=lowercase_ , keep_accents=lowercase_ , bos_token=lowercase_ , eos_token=lowercase_ , unk_token=lowercase_ , pad_token=lowercase_ , sp_model_kwargs=self.sp_model_kwargs , **lowercase_ , )
lowercase_ = do_lower_case
lowercase_ = remove_space
lowercase_ = keep_accents
lowercase_ = vocab_file
lowercase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowercase_ )
# Used for whitespace normalization in input texts
# fmt : off
lowercase_ = {""" """, """ """, """ """, """ """, """ """, """ """, """ """, """ """, """ """, """ """, """""", """"""}
# fmt : on
# Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing
lowercase_ = re.compile(
F"""[{"".join(map(lowercase_ , list(range(0 , 9 ) ) + list(range(11 , 32 ) ) + list(range(127 , 160 ) ) + [160, 173, 8_203] ) )}]""" )
def __getstate__( self : Tuple ):
'''simple docstring'''
lowercase_ = self.__dict__.copy()
lowercase_ = None
return state
def __setstate__( self : Tuple , lowercase_ : Union[str, Any] ):
'''simple docstring'''
lowercase_ = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
lowercase_ = {}
lowercase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
@property
# Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size
def lowerCamelCase__ ( self : int ):
'''simple docstring'''
return len(self.sp_model )
def lowerCamelCase__ ( self : List[Any] , lowercase_ : str ):
'''simple docstring'''
lowercase_ = self.non_printing_characters_re.sub("""""" , lowercase_ )
# Normalize whitespaces
lowercase_ = """""".join([char if char not in self.whitespaces else """ """ for char in text] )
# NFC Unicode normalization
lowercase_ = unicodedata.normalize("""NFC""" , lowercase_ )
return text
def lowerCamelCase__ ( self : Any , lowercase_ : str , **lowercase_ : Any ):
'''simple docstring'''
lowercase_ = self.preprocess_text(lowercase_ )
return self.sp_model.encode(lowercase_ , out_type=lowercase_ )
def lowerCamelCase__ ( self : Dict , lowercase_ : str ):
'''simple docstring'''
return self.sp_model.PieceToId(lowercase_ )
def lowerCamelCase__ ( self : Any , lowercase_ : int ):
'''simple docstring'''
return self.sp_model.IdToPiece(lowercase_ )
@staticmethod
def lowerCamelCase__ ( lowercase_ : str ):
'''simple docstring'''
return out_string
def lowerCamelCase__ ( self : Dict , lowercase_ : List[str] ):
'''simple docstring'''
lowercase_ = []
lowercase_ = """"""
lowercase_ = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
# TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(lowercase_ ) + token
lowercase_ = True
lowercase_ = []
else:
current_sub_tokens.append(lowercase_ )
lowercase_ = False
out_string += self.sp_model.decode(lowercase_ )
return out_string
def lowerCamelCase__ ( self : Tuple ):
'''simple docstring'''
lowercase_ = {self.convert_ids_to_tokens(lowercase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowerCamelCase__ ( self : Tuple , lowercase_ : str , lowercase_ : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(lowercase_ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowercase_ = os.path.join(
lowercase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowercase_ )
elif not os.path.isfile(self.vocab_file ):
with open(lowercase_ , """wb""" ) as fi:
lowercase_ = self.sp_model.serialized_model_proto()
fi.write(lowercase_ )
return (out_vocab_file,)
def lowerCamelCase__ ( self : Any , lowercase_ : Union[str, List[str]] , lowercase_ : Union[str, bool] = False ):
'''simple docstring'''
if isinstance(lowercase_ , lowercase_ ):
lowercase_ = self.preprocess_text(lowercase_ )
lowercase_ = self.sp_model.encode(lowercase_ )
else:
lowercase_ = [self.preprocess_text(lowercase_ ) for t in text]
lowercase_ = self.sp_model.encode(lowercase_ )
if return_tensors is True or return_tensors == "pt":
lowercase_ = torch.tensor(lowercase_ )
return token_ids
def lowerCamelCase__ ( self : Union[str, Any] , lowercase_ : Union[int, List[int]] ):
'''simple docstring'''
return self.sp_model.decode(lowercase_ )
def lowerCamelCase__ ( self : Any , lowercase_ : "Conversation" ):
'''simple docstring'''
lowercase_ = [F"""User: {text}""" if is_user else F"""Bot: {text}""" for is_user, text in conversation.iter_texts()]
lowercase_ = (
F"""{self.eos_token}{self.bos_token}""" + F"""{self.bos_token}""".join(lowercase_ ) + F"""{self.bos_token}Bot:"""
)
return self.encode(text=lowercase_ )
| 451
| 1
|
import argparse
import torch
from transformers import LxmertConfig, LxmertForPreTraining, load_tf_weights_in_lxmert
from transformers.utils import logging
logging.set_verbosity_info()
def UpperCAmelCase__( __UpperCAmelCase : List[str] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : List[Any] ) -> Optional[int]:
# Initialise PyTorch model
__snake_case : List[str] = LxmertConfig.from_json_file(__UpperCAmelCase )
print(F"""Building PyTorch model from configuration: {config}""" )
__snake_case : Optional[int] = LxmertForPreTraining(__UpperCAmelCase )
# Load weights from tf checkpoint
load_tf_weights_in_lxmert(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
# Save pytorch-model
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() , __UpperCAmelCase )
if __name__ == "__main__":
__magic_name__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
__magic_name__ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 718
|
import argparse
from transformers import TaConfig, TaForConditionalGeneration, load_tf_weights_in_ta
from transformers.utils import logging
logging.set_verbosity_info()
def UpperCAmelCase__( __UpperCAmelCase : int , __UpperCAmelCase : int , __UpperCAmelCase : Any ):
# Initialise PyTorch model
__snake_case : List[str] = TaConfig.from_json_file(__UpperCAmelCase )
print(F"""Building PyTorch model from configuration: {config}""" )
__snake_case : int = TaForConditionalGeneration(__UpperCAmelCase )
# Load weights from tf checkpoint
load_tf_weights_in_ta(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
# Save pytorch-model
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
model.save_pretrained(__UpperCAmelCase )
if __name__ == "__main__":
__magic_name__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained T5 model. \nThis specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
__magic_name__ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 679
| 0
|
'''simple docstring'''
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, Features, Value
from .base import TaskTemplate
@dataclass(frozen=lowerCAmelCase )
class __magic_name__ ( lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase__ = field(default='automatic-speech-recognition' , metadata={'include_in_asdict_even_if_is_default': True} )
lowerCamelCase__ = Features({'audio': Audio()} )
lowerCamelCase__ = Features({'transcription': Value('string' )} )
lowerCamelCase__ = "audio"
lowerCamelCase__ = "transcription"
def lowerCAmelCase__ ( self , lowerCamelCase ):
'''simple docstring'''
if self.audio_column not in features:
raise ValueError(f"Column {self.audio_column} is not present in features." )
if not isinstance(features[self.audio_column] , lowerCamelCase ):
raise ValueError(f"Column {self.audio_column} is not an Audio type." )
__A : List[Any] = copy.deepcopy(self )
__A : str = self.input_schema.copy()
__A : List[Any] = features[self.audio_column]
__A : List[str] = input_schema
return task_template
@property
def lowerCAmelCase__ ( self ):
'''simple docstring'''
return {self.audio_column: "audio", self.transcription_column: "transcription"}
| 111
|
'''simple docstring'''
import unittest
from transformers import CamembertTokenizer, CamembertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
_UpperCamelCase = get_tests_dir("""fixtures/test_sentencepiece.model""")
_UpperCamelCase = get_tests_dir("""fixtures/test_sentencepiece_bpe.model""")
_UpperCamelCase = """pt""" if is_torch_available() else """tf"""
@require_sentencepiece
@require_tokenizers
class __magic_name__ ( lowerCAmelCase , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase__ = CamembertTokenizer
lowerCamelCase__ = CamembertTokenizerFast
lowerCamelCase__ = True
lowerCamelCase__ = True
def lowerCAmelCase__ ( self ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
__A : List[Any] = CamembertTokenizer(lowerCamelCase )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
__A : Optional[Any] = "<pad>"
__A : Union[str, Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCamelCase ) , lowerCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCamelCase ) , lowerCamelCase )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
__A : Union[str, Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<s>NOTUSED" )
self.assertEqual(vocab_keys[1] , "<pad>" )
self.assertEqual(vocab_keys[-1] , "<mask>" )
self.assertEqual(len(lowerCamelCase ) , 1004 )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 1005 )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
__A : Optional[Any] = CamembertTokenizer(lowerCamelCase )
tokenizer.save_pretrained(self.tmpdirname )
__A : Optional[int] = CamembertTokenizerFast.from_pretrained(self.tmpdirname )
__A : str = "I was born in 92000, and this is falsé."
__A : List[Any] = tokenizer.encode(lowerCamelCase )
__A : Tuple = rust_tokenizer.encode(lowerCamelCase )
self.assertListEqual(lowerCamelCase , lowerCamelCase )
__A : Dict = tokenizer.encode(lowerCamelCase , add_special_tokens=lowerCamelCase )
__A : str = rust_tokenizer.encode(lowerCamelCase , add_special_tokens=lowerCamelCase )
self.assertListEqual(lowerCamelCase , lowerCamelCase )
# <unk> tokens are not the same for `rust` than for `slow`.
# Because spm gives back raw token instead of `unk` in EncodeAsPieces
# tokens = tokenizer.tokenize(sequence)
__A : int = tokenizer.convert_ids_to_tokens(lowerCamelCase )
__A : Optional[int] = rust_tokenizer.tokenize(lowerCamelCase )
self.assertListEqual(lowerCamelCase , lowerCamelCase )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
if not self.test_rust_tokenizer:
return
__A : Tuple = self.get_tokenizer()
__A : List[Any] = self.get_rust_tokenizer()
__A : Any = "I was born in 92000, and this is falsé."
__A : int = tokenizer.tokenize(lowerCamelCase )
__A : Any = rust_tokenizer.tokenize(lowerCamelCase )
self.assertListEqual(lowerCamelCase , lowerCamelCase )
__A : Any = tokenizer.encode(lowerCamelCase , add_special_tokens=lowerCamelCase )
__A : Optional[int] = rust_tokenizer.encode(lowerCamelCase , add_special_tokens=lowerCamelCase )
self.assertListEqual(lowerCamelCase , lowerCamelCase )
__A : Union[str, Any] = self.get_rust_tokenizer()
__A : Optional[Any] = tokenizer.encode(lowerCamelCase )
__A : Dict = rust_tokenizer.encode(lowerCamelCase )
self.assertListEqual(lowerCamelCase , lowerCamelCase )
@slow
def lowerCAmelCase__ ( self ):
'''simple docstring'''
__A : Any = {"input_ids": [[5, 54, 7196, 297, 30, 23, 776, 18, 11, 3215, 3705, 8252, 22, 3164, 1181, 2116, 29, 16, 813, 25, 791, 3314, 20, 3446, 38, 2_7575, 120, 6, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [5, 468, 17, 11, 9088, 20, 1517, 8, 2_2804, 1_8818, 10, 38, 629, 607, 607, 142, 19, 7196, 867, 56, 1_0326, 24, 2267, 20, 416, 5072, 1_5612, 233, 734, 7, 2399, 27, 16, 3015, 1649, 7, 24, 20, 4338, 2399, 27, 13, 3400, 14, 13, 6189, 8, 930, 9, 6]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# camembert is a french model. So we also use french texts.
__A : int = [
"Le transformeur est un modèle d'apprentissage profond introduit en 2017, "
"utilisé principalement dans le domaine du traitement automatique des langues (TAL).",
"À l'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus "
"pour gérer des données séquentielles, telles que le langage naturel, pour des tâches "
"telles que la traduction et la synthèse de texte.",
]
self.tokenizer_integration_test_util(
expected_encoding=lowerCamelCase , model_name="camembert-base" , revision="3a0641d9a1aeb7e848a74299e7e4c4bca216b4cf" , sequences=lowerCamelCase , )
| 111
| 1
|
from ..utils import DummyObject, requires_backends
class __lowercase ( metaclass=lowerCamelCase__ ):
lowerCAmelCase__ = ["""note_seq"""]
def __init__( self , *lowercase_ , **lowercase_) -> Any:
requires_backends(self , ['note_seq'])
@classmethod
def _a ( cls , *lowercase_ , **lowercase_) -> int:
requires_backends(cls , ['note_seq'])
@classmethod
def _a ( cls , *lowercase_ , **lowercase_) -> List[str]:
requires_backends(cls , ['note_seq'])
| 716
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
UpperCAmelCase__ : int = {
"configuration_trocr": ["TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP", "TrOCRConfig"],
"processing_trocr": ["TrOCRProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Tuple = [
"TROCR_PRETRAINED_MODEL_ARCHIVE_LIST",
"TrOCRForCausalLM",
"TrOCRPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
UpperCAmelCase__ : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 676
| 0
|
from jiwer import compute_measures
import datasets
A : Dict = '''\
@inproceedings{inproceedings,
author = {Morris, Andrew and Maier, Viktoria and Green, Phil},
year = {2004},
month = {01},
pages = {},
title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}
}
'''
A : Optional[int] = '''\
Word error rate (WER) is a common metric of the performance of an automatic speech recognition system.
The general difficulty of measuring performance lies in the fact that the recognized word sequence can have a different length from the reference word sequence (supposedly the correct one). The WER is derived from the Levenshtein distance, working at the word level instead of the phoneme level. The WER is a valuable tool for comparing different systems as well as for evaluating improvements within one system. This kind of measurement, however, provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort.
This problem is solved by first aligning the recognized word sequence with the reference (spoken) word sequence using dynamic string alignment. Examination of this issue is seen through a theory called the power law that states the correlation between perplexity and word error rate.
Word error rate can then be computed as:
WER = (S + D + I) / N = (S + D + I) / (S + D + C)
where
S is the number of substitutions,
D is the number of deletions,
I is the number of insertions,
C is the number of correct words,
N is the number of words in the reference (N=S+D+C).
This value indicates the average number of errors per reference word. The lower the value, the better the
performance of the ASR system with a WER of 0 being a perfect score.
'''
A : List[Any] = '''
Compute WER score of transcribed segments against references.
Args:
references: List of references for each speech input.
predictions: List of transcriptions to score.
concatenate_texts (bool, default=False): Whether to concatenate all input texts or compute WER iteratively.
Returns:
(float): the word error rate
Examples:
>>> predictions = ["this is the prediction", "there is an other sample"]
>>> references = ["this is the reference", "there is another one"]
>>> wer = datasets.load_metric("wer")
>>> wer_score = wer.compute(predictions=predictions, references=references)
>>> print(wer_score)
0.5
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a_ ( datasets.Metric ):
def UpperCamelCase_ ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Value("""string""" , id="""sequence""" ),
} ) , codebase_urls=["""https://github.com/jitsi/jiwer/"""] , reference_urls=[
"""https://en.wikipedia.org/wiki/Word_error_rate""",
] , )
def UpperCamelCase_ ( self , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=False ):
if concatenate_texts:
return compute_measures(__UpperCamelCase , __UpperCamelCase )["wer"]
else:
_lowercase = 0
_lowercase = 0
for prediction, reference in zip(__UpperCamelCase , __UpperCamelCase ):
_lowercase = compute_measures(__UpperCamelCase , __UpperCamelCase )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 287
|
from ...processing_utils import ProcessorMixin
class a_ ( _a ):
a : Optional[int] = '''SpeechT5FeatureExtractor'''
a : List[Any] = '''SpeechT5Tokenizer'''
def __init__( self , __UpperCamelCase , __UpperCamelCase ):
super().__init__(__UpperCamelCase , __UpperCamelCase )
def __call__( self , *__UpperCamelCase , **__UpperCamelCase ):
_lowercase = kwargs.pop("""audio""" , __UpperCamelCase )
_lowercase = kwargs.pop("""text""" , __UpperCamelCase )
_lowercase = kwargs.pop("""text_target""" , __UpperCamelCase )
_lowercase = kwargs.pop("""audio_target""" , __UpperCamelCase )
_lowercase = kwargs.pop("""sampling_rate""" , __UpperCamelCase )
if audio is not None and text is not None:
raise ValueError(
"""Cannot process both `audio` and `text` inputs. Did you mean `audio_target` or `text_target`?""" )
if audio_target is not None and text_target is not None:
raise ValueError(
"""Cannot process both `audio_target` and `text_target` inputs. Did you mean `audio` or `text`?""" )
if audio is None and audio_target is None and text is None and text_target is None:
raise ValueError(
"""You need to specify either an `audio`, `audio_target`, `text`, or `text_target` input to process.""" )
if audio is not None:
_lowercase = self.feature_extractor(__UpperCamelCase , *__UpperCamelCase , sampling_rate=__UpperCamelCase , **__UpperCamelCase )
elif text is not None:
_lowercase = self.tokenizer(__UpperCamelCase , **__UpperCamelCase )
else:
_lowercase = None
if audio_target is not None:
_lowercase = self.feature_extractor(audio_target=__UpperCamelCase , *__UpperCamelCase , sampling_rate=__UpperCamelCase , **__UpperCamelCase )
_lowercase = targets["""input_values"""]
elif text_target is not None:
_lowercase = self.tokenizer(__UpperCamelCase , **__UpperCamelCase )
_lowercase = targets["""input_ids"""]
else:
_lowercase = None
if inputs is None:
return targets
if targets is not None:
_lowercase = labels
_lowercase = targets.get("""attention_mask""" )
if decoder_attention_mask is not None:
_lowercase = decoder_attention_mask
return inputs
def UpperCamelCase_ ( self , *__UpperCamelCase , **__UpperCamelCase ):
_lowercase = kwargs.pop("""input_values""" , __UpperCamelCase )
_lowercase = kwargs.pop("""input_ids""" , __UpperCamelCase )
_lowercase = kwargs.pop("""labels""" , __UpperCamelCase )
if input_values is not None and input_ids is not None:
raise ValueError("""Cannot process both `input_values` and `input_ids` inputs.""" )
if input_values is None and input_ids is None and labels is None:
raise ValueError(
"""You need to specify either an `input_values`, `input_ids`, or `labels` input to be padded.""" )
if input_values is not None:
_lowercase = self.feature_extractor.pad(__UpperCamelCase , *__UpperCamelCase , **__UpperCamelCase )
elif input_ids is not None:
_lowercase = self.tokenizer.pad(__UpperCamelCase , **__UpperCamelCase )
else:
_lowercase = None
if labels is not None:
if "input_ids" in labels or (isinstance(__UpperCamelCase , __UpperCamelCase ) and "input_ids" in labels[0]):
_lowercase = self.tokenizer.pad(__UpperCamelCase , **__UpperCamelCase )
_lowercase = targets["""input_ids"""]
else:
_lowercase = self.feature_extractor.feature_size
_lowercase = self.feature_extractor.num_mel_bins
_lowercase = self.feature_extractor.pad(__UpperCamelCase , *__UpperCamelCase , **__UpperCamelCase )
_lowercase = feature_size_hack
_lowercase = targets["""input_values"""]
else:
_lowercase = None
if inputs is None:
return targets
if targets is not None:
_lowercase = labels
_lowercase = targets.get("""attention_mask""" )
if decoder_attention_mask is not None:
_lowercase = decoder_attention_mask
return inputs
def UpperCamelCase_ ( self , *__UpperCamelCase , **__UpperCamelCase ):
return self.tokenizer.batch_decode(*__UpperCamelCase , **__UpperCamelCase )
def UpperCamelCase_ ( self , *__UpperCamelCase , **__UpperCamelCase ):
return self.tokenizer.decode(*__UpperCamelCase , **__UpperCamelCase )
| 287
| 1
|
"""simple docstring"""
from binascii import hexlify
from hashlib import shaaaa
from os import urandom
# RFC 3526 - More Modular Exponential (MODP) Diffie-Hellman groups for
# Internet Key Exchange (IKE) https://tools.ietf.org/html/rfc3526
SCREAMING_SNAKE_CASE_ : Optional[int] = {
# 1536-bit
5: {
'''prime''': int(
'''FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'''
+ '''29024E088A67CC74020BBEA63B139B22514A08798E3404DD'''
+ '''EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'''
+ '''E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'''
+ '''EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'''
+ '''C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'''
+ '''83655D23DCA3AD961C62F356208552BB9ED529077096966D'''
+ '''670C354E4ABC9804F1746C08CA237327FFFFFFFFFFFFFFFF''',
base=16,
),
'''generator''': 2,
},
# 2048-bit
14: {
'''prime''': int(
'''FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'''
+ '''29024E088A67CC74020BBEA63B139B22514A08798E3404DD'''
+ '''EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'''
+ '''E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'''
+ '''EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'''
+ '''C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'''
+ '''83655D23DCA3AD961C62F356208552BB9ED529077096966D'''
+ '''670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'''
+ '''E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'''
+ '''DE2BCBF6955817183995497CEA956AE515D2261898FA0510'''
+ '''15728E5A8AACAA68FFFFFFFFFFFFFFFF''',
base=16,
),
'''generator''': 2,
},
# 3072-bit
15: {
'''prime''': int(
'''FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'''
+ '''29024E088A67CC74020BBEA63B139B22514A08798E3404DD'''
+ '''EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'''
+ '''E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'''
+ '''EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'''
+ '''C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'''
+ '''83655D23DCA3AD961C62F356208552BB9ED529077096966D'''
+ '''670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'''
+ '''E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'''
+ '''DE2BCBF6955817183995497CEA956AE515D2261898FA0510'''
+ '''15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64'''
+ '''ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7'''
+ '''ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B'''
+ '''F12FFA06D98A0864D87602733EC86A64521F2B18177B200C'''
+ '''BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31'''
+ '''43DB5BFCE0FD108E4B82D120A93AD2CAFFFFFFFFFFFFFFFF''',
base=16,
),
'''generator''': 2,
},
# 4096-bit
16: {
'''prime''': int(
'''FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'''
+ '''29024E088A67CC74020BBEA63B139B22514A08798E3404DD'''
+ '''EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'''
+ '''E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'''
+ '''EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'''
+ '''C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'''
+ '''83655D23DCA3AD961C62F356208552BB9ED529077096966D'''
+ '''670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'''
+ '''E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'''
+ '''DE2BCBF6955817183995497CEA956AE515D2261898FA0510'''
+ '''15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64'''
+ '''ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7'''
+ '''ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B'''
+ '''F12FFA06D98A0864D87602733EC86A64521F2B18177B200C'''
+ '''BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31'''
+ '''43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7'''
+ '''88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA'''
+ '''2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6'''
+ '''287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED'''
+ '''1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9'''
+ '''93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934063199'''
+ '''FFFFFFFFFFFFFFFF''',
base=16,
),
'''generator''': 2,
},
# 6144-bit
17: {
'''prime''': int(
'''FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E08'''
+ '''8A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B'''
+ '''302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9'''
+ '''A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE6'''
+ '''49286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8'''
+ '''FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D'''
+ '''670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C'''
+ '''180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF695581718'''
+ '''3995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D'''
+ '''04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7D'''
+ '''B3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D226'''
+ '''1AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200C'''
+ '''BBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFC'''
+ '''E0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B26'''
+ '''99C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB'''
+ '''04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2'''
+ '''233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127'''
+ '''D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492'''
+ '''36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BDF8FF9406'''
+ '''AD9E530EE5DB382F413001AEB06A53ED9027D831179727B0865A8918'''
+ '''DA3EDBEBCF9B14ED44CE6CBACED4BB1BDB7F1447E6CC254B33205151'''
+ '''2BD7AF426FB8F401378CD2BF5983CA01C64B92ECF032EA15D1721D03'''
+ '''F482D7CE6E74FEF6D55E702F46980C82B5A84031900B1C9E59E7C97F'''
+ '''BEC7E8F323A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA'''
+ '''CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE32806A1D58B'''
+ '''B7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55CDA56C9EC2EF29632'''
+ '''387FE8D76E3C0468043E8F663F4860EE12BF2D5B0B7474D6E694F91E'''
+ '''6DCC4024FFFFFFFFFFFFFFFF''',
base=16,
),
'''generator''': 2,
},
# 8192-bit
18: {
'''prime''': int(
'''FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'''
+ '''29024E088A67CC74020BBEA63B139B22514A08798E3404DD'''
+ '''EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'''
+ '''E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'''
+ '''EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'''
+ '''C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'''
+ '''83655D23DCA3AD961C62F356208552BB9ED529077096966D'''
+ '''670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'''
+ '''E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'''
+ '''DE2BCBF6955817183995497CEA956AE515D2261898FA0510'''
+ '''15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64'''
+ '''ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7'''
+ '''ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B'''
+ '''F12FFA06D98A0864D87602733EC86A64521F2B18177B200C'''
+ '''BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31'''
+ '''43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7'''
+ '''88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA'''
+ '''2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6'''
+ '''287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED'''
+ '''1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9'''
+ '''93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492'''
+ '''36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BD'''
+ '''F8FF9406AD9E530EE5DB382F413001AEB06A53ED9027D831'''
+ '''179727B0865A8918DA3EDBEBCF9B14ED44CE6CBACED4BB1B'''
+ '''DB7F1447E6CC254B332051512BD7AF426FB8F401378CD2BF'''
+ '''5983CA01C64B92ECF032EA15D1721D03F482D7CE6E74FEF6'''
+ '''D55E702F46980C82B5A84031900B1C9E59E7C97FBEC7E8F3'''
+ '''23A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA'''
+ '''CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE328'''
+ '''06A1D58BB7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55C'''
+ '''DA56C9EC2EF29632387FE8D76E3C0468043E8F663F4860EE'''
+ '''12BF2D5B0B7474D6E694F91E6DBE115974A3926F12FEE5E4'''
+ '''38777CB6A932DF8CD8BEC4D073B931BA3BC832B68D9DD300'''
+ '''741FA7BF8AFC47ED2576F6936BA424663AAB639C5AE4F568'''
+ '''3423B4742BF1C978238F16CBE39D652DE3FDB8BEFC848AD9'''
+ '''22222E04A4037C0713EB57A81A23F0C73473FC646CEA306B'''
+ '''4BCBC8862F8385DDFA9D4B7FA2C087E879683303ED5BDD3A'''
+ '''062B3CF5B3A278A66D2A13F83F44F82DDF310EE074AB6A36'''
+ '''4597E899A0255DC164F31CC50846851DF9AB48195DED7EA1'''
+ '''B1D510BD7EE74D73FAF36BC31ECFA268359046F4EB879F92'''
+ '''4009438B481C6CD7889A002ED5EE382BC9190DA6FC026E47'''
+ '''9558E4475677E9AA9E3050E2765694DFC81F56E880B96E71'''
+ '''60C980DD98EDD3DFFFFFFFFFFFFFFFFF''',
base=16,
),
'''generator''': 2,
},
}
class _A :
def __init__( self , SCREAMING_SNAKE_CASE__ = 14 ) -> str:
if group not in primes:
raise ValueError("Unsupported Group" )
lowerCamelCase__ = primes[group]["""prime"""]
lowerCamelCase__ = primes[group]["""generator"""]
lowerCamelCase__ = int(hexlify(urandom(32 ) ) , base=16 )
def _lowerCamelCase ( self ) -> Any:
return hex(self.__private_key )[2:]
def _lowerCamelCase ( self ) -> int:
lowerCamelCase__ = pow(self.generator , self.__private_key , self.prime )
return hex(lowerCamelCase_ )[2:]
def _lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ) -> Union[str, Any]:
return (
2 <= key <= self.prime - 2
and pow(lowerCamelCase_ , (self.prime - 1) // 2 , self.prime ) == 1
)
def _lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ) -> List[str]:
lowerCamelCase__ = int(lowerCamelCase_ , base=16 )
if not self.is_valid_public_key(lowerCamelCase_ ):
raise ValueError("Invalid public key" )
lowerCamelCase__ = pow(lowerCamelCase_ , self.__private_key , self.prime )
return shaaaa(str(lowerCamelCase_ ).encode() ).hexdigest()
@staticmethod
def _lowerCamelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Optional[int]:
return (
2 <= remote_public_key_str <= prime - 2
and pow(lowerCamelCase_ , (prime - 1) // 2 , lowerCamelCase_ ) == 1
)
@staticmethod
def _lowerCamelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = 14 ) -> int:
lowerCamelCase__ = int(lowerCamelCase_ , base=16 )
lowerCamelCase__ = int(lowerCamelCase_ , base=16 )
lowerCamelCase__ = primes[group]["""prime"""]
if not DiffieHellman.is_valid_public_key_static(lowerCamelCase_ , lowerCamelCase_ ):
raise ValueError("Invalid public key" )
lowerCamelCase__ = pow(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
return shaaaa(str(lowerCamelCase_ ).encode() ).hexdigest()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 713
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_nllb import NllbTokenizer
else:
SCREAMING_SNAKE_CASE_ : Dict = None
SCREAMING_SNAKE_CASE_ : int = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ : int = {'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''}
SCREAMING_SNAKE_CASE_ : Optional[int] = {
'''vocab_file''': {
'''facebook/nllb-200-distilled-600M''': (
'''https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model'''
),
},
'''tokenizer_file''': {
'''facebook/nllb-200-distilled-600M''': (
'''https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json'''
),
},
}
SCREAMING_SNAKE_CASE_ : Any = {
'''facebook/nllb-large-en-ro''': 1024,
'''facebook/nllb-200-distilled-600M''': 1024,
}
# fmt: off
SCREAMING_SNAKE_CASE_ : Optional[int] = ['''ace_Arab''', '''ace_Latn''', '''acm_Arab''', '''acq_Arab''', '''aeb_Arab''', '''afr_Latn''', '''ajp_Arab''', '''aka_Latn''', '''amh_Ethi''', '''apc_Arab''', '''arb_Arab''', '''ars_Arab''', '''ary_Arab''', '''arz_Arab''', '''asm_Beng''', '''ast_Latn''', '''awa_Deva''', '''ayr_Latn''', '''azb_Arab''', '''azj_Latn''', '''bak_Cyrl''', '''bam_Latn''', '''ban_Latn''', '''bel_Cyrl''', '''bem_Latn''', '''ben_Beng''', '''bho_Deva''', '''bjn_Arab''', '''bjn_Latn''', '''bod_Tibt''', '''bos_Latn''', '''bug_Latn''', '''bul_Cyrl''', '''cat_Latn''', '''ceb_Latn''', '''ces_Latn''', '''cjk_Latn''', '''ckb_Arab''', '''crh_Latn''', '''cym_Latn''', '''dan_Latn''', '''deu_Latn''', '''dik_Latn''', '''dyu_Latn''', '''dzo_Tibt''', '''ell_Grek''', '''eng_Latn''', '''epo_Latn''', '''est_Latn''', '''eus_Latn''', '''ewe_Latn''', '''fao_Latn''', '''pes_Arab''', '''fij_Latn''', '''fin_Latn''', '''fon_Latn''', '''fra_Latn''', '''fur_Latn''', '''fuv_Latn''', '''gla_Latn''', '''gle_Latn''', '''glg_Latn''', '''grn_Latn''', '''guj_Gujr''', '''hat_Latn''', '''hau_Latn''', '''heb_Hebr''', '''hin_Deva''', '''hne_Deva''', '''hrv_Latn''', '''hun_Latn''', '''hye_Armn''', '''ibo_Latn''', '''ilo_Latn''', '''ind_Latn''', '''isl_Latn''', '''ita_Latn''', '''jav_Latn''', '''jpn_Jpan''', '''kab_Latn''', '''kac_Latn''', '''kam_Latn''', '''kan_Knda''', '''kas_Arab''', '''kas_Deva''', '''kat_Geor''', '''knc_Arab''', '''knc_Latn''', '''kaz_Cyrl''', '''kbp_Latn''', '''kea_Latn''', '''khm_Khmr''', '''kik_Latn''', '''kin_Latn''', '''kir_Cyrl''', '''kmb_Latn''', '''kon_Latn''', '''kor_Hang''', '''kmr_Latn''', '''lao_Laoo''', '''lvs_Latn''', '''lij_Latn''', '''lim_Latn''', '''lin_Latn''', '''lit_Latn''', '''lmo_Latn''', '''ltg_Latn''', '''ltz_Latn''', '''lua_Latn''', '''lug_Latn''', '''luo_Latn''', '''lus_Latn''', '''mag_Deva''', '''mai_Deva''', '''mal_Mlym''', '''mar_Deva''', '''min_Latn''', '''mkd_Cyrl''', '''plt_Latn''', '''mlt_Latn''', '''mni_Beng''', '''khk_Cyrl''', '''mos_Latn''', '''mri_Latn''', '''zsm_Latn''', '''mya_Mymr''', '''nld_Latn''', '''nno_Latn''', '''nob_Latn''', '''npi_Deva''', '''nso_Latn''', '''nus_Latn''', '''nya_Latn''', '''oci_Latn''', '''gaz_Latn''', '''ory_Orya''', '''pag_Latn''', '''pan_Guru''', '''pap_Latn''', '''pol_Latn''', '''por_Latn''', '''prs_Arab''', '''pbt_Arab''', '''quy_Latn''', '''ron_Latn''', '''run_Latn''', '''rus_Cyrl''', '''sag_Latn''', '''san_Deva''', '''sat_Beng''', '''scn_Latn''', '''shn_Mymr''', '''sin_Sinh''', '''slk_Latn''', '''slv_Latn''', '''smo_Latn''', '''sna_Latn''', '''snd_Arab''', '''som_Latn''', '''sot_Latn''', '''spa_Latn''', '''als_Latn''', '''srd_Latn''', '''srp_Cyrl''', '''ssw_Latn''', '''sun_Latn''', '''swe_Latn''', '''swh_Latn''', '''szl_Latn''', '''tam_Taml''', '''tat_Cyrl''', '''tel_Telu''', '''tgk_Cyrl''', '''tgl_Latn''', '''tha_Thai''', '''tir_Ethi''', '''taq_Latn''', '''taq_Tfng''', '''tpi_Latn''', '''tsn_Latn''', '''tso_Latn''', '''tuk_Latn''', '''tum_Latn''', '''tur_Latn''', '''twi_Latn''', '''tzm_Tfng''', '''uig_Arab''', '''ukr_Cyrl''', '''umb_Latn''', '''urd_Arab''', '''uzn_Latn''', '''vec_Latn''', '''vie_Latn''', '''war_Latn''', '''wol_Latn''', '''xho_Latn''', '''ydd_Hebr''', '''yor_Latn''', '''yue_Hant''', '''zho_Hans''', '''zho_Hant''', '''zul_Latn''']
class _A ( __a ):
__a = VOCAB_FILES_NAMES
__a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__a = PRETRAINED_VOCAB_FILES_MAP
__a = ['input_ids', 'attention_mask']
__a = NllbTokenizer
__a = []
__a = []
def __init__( self , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__="<s>" , SCREAMING_SNAKE_CASE__="</s>" , SCREAMING_SNAKE_CASE__="</s>" , SCREAMING_SNAKE_CASE__="<s>" , SCREAMING_SNAKE_CASE__="<unk>" , SCREAMING_SNAKE_CASE__="<pad>" , SCREAMING_SNAKE_CASE__="<mask>" , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=False , **SCREAMING_SNAKE_CASE__ , ) -> Any:
# Mask token behave like a normal word, i.e. include the space before it
lowerCamelCase__ = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else mask_token
lowerCamelCase__ = legacy_behaviour
super().__init__(
vocab_file=SCREAMING_SNAKE_CASE__ , tokenizer_file=SCREAMING_SNAKE_CASE__ , bos_token=SCREAMING_SNAKE_CASE__ , eos_token=SCREAMING_SNAKE_CASE__ , sep_token=SCREAMING_SNAKE_CASE__ , cls_token=SCREAMING_SNAKE_CASE__ , unk_token=SCREAMING_SNAKE_CASE__ , pad_token=SCREAMING_SNAKE_CASE__ , mask_token=SCREAMING_SNAKE_CASE__ , src_lang=SCREAMING_SNAKE_CASE__ , tgt_lang=SCREAMING_SNAKE_CASE__ , additional_special_tokens=SCREAMING_SNAKE_CASE__ , legacy_behaviour=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
lowerCamelCase__ = vocab_file
lowerCamelCase__ = False if not self.vocab_file else True
lowerCamelCase__ = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({"additional_special_tokens": _additional_special_tokens} )
lowerCamelCase__ = {
lang_code: self.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
lowerCamelCase__ = src_lang if src_lang is not None else "eng_Latn"
lowerCamelCase__ = self.convert_tokens_to_ids(self._src_lang )
lowerCamelCase__ = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def _lowerCamelCase ( self ) -> str:
return self._src_lang
@src_lang.setter
def _lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ) -> None:
lowerCamelCase__ = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def _lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def _lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ) -> List[int]:
lowerCamelCase__ = [self.sep_token_id]
lowerCamelCase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) -> Any:
if src_lang is None or tgt_lang is None:
raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model" )
lowerCamelCase__ = src_lang
lowerCamelCase__ = self(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = self.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = tgt_lang_id
return inputs
def _lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = "eng_Latn" , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = "fra_Latn" , **SCREAMING_SNAKE_CASE__ , ) -> BatchEncoding:
lowerCamelCase__ = src_lang
lowerCamelCase__ = tgt_lang
return super().prepare_seqaseq_batch(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def _lowerCamelCase ( self ) -> Any:
return self.set_src_lang_special_tokens(self.src_lang )
def _lowerCamelCase ( self ) -> Tuple:
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def _lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ) -> None:
lowerCamelCase__ = self.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ )
if self.legacy_behaviour:
lowerCamelCase__ = []
lowerCamelCase__ = [self.eos_token_id, self.cur_lang_code]
else:
lowerCamelCase__ = [self.cur_lang_code]
lowerCamelCase__ = [self.eos_token_id]
lowerCamelCase__ = self.convert_ids_to_tokens(self.prefix_tokens )
lowerCamelCase__ = self.convert_ids_to_tokens(self.suffix_tokens )
lowerCamelCase__ = processors.TemplateProcessing(
single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def _lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ) -> None:
lowerCamelCase__ = self.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ )
if self.legacy_behaviour:
lowerCamelCase__ = []
lowerCamelCase__ = [self.eos_token_id, self.cur_lang_code]
else:
lowerCamelCase__ = [self.cur_lang_code]
lowerCamelCase__ = [self.eos_token_id]
lowerCamelCase__ = self.convert_ids_to_tokens(self.prefix_tokens )
lowerCamelCase__ = self.convert_ids_to_tokens(self.suffix_tokens )
lowerCamelCase__ = processors.TemplateProcessing(
single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def _lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(SCREAMING_SNAKE_CASE__ ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory.' )
return
lowerCamelCase__ = os.path.join(
SCREAMING_SNAKE_CASE__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE__ ):
copyfile(self.vocab_file , SCREAMING_SNAKE_CASE__ )
return (out_vocab_file,)
| 274
| 0
|
"""simple docstring"""
import argparse
from transformers import (
TapasConfig,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasTokenizer,
load_tf_weights_in_tapas,
)
from transformers.utils import logging
logging.set_verbosity_info()
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->Any:
"""simple docstring"""
lowerCAmelCase__ :Tuple = TapasConfig.from_json_file(_SCREAMING_SNAKE_CASE )
# set absolute/relative position embeddings parameter
lowerCAmelCase__ :int = reset_position_index_per_cell
# set remaining parameters of TapasConfig as well as the model based on the task
if task == "SQA":
lowerCAmelCase__ :Optional[int] = TapasForQuestionAnswering(config=_SCREAMING_SNAKE_CASE )
elif task == "WTQ":
# run_task_main.py hparams
lowerCAmelCase__ :Optional[int] = 4
lowerCAmelCase__ :Tuple = True
# hparam_utils.py hparams
lowerCAmelCase__ :Optional[int] = 0.6_6_4_6_9_4
lowerCAmelCase__ :Tuple = 0.2_0_7_9_5_1
lowerCAmelCase__ :List[str] = 0.1_2_1_1_9_4
lowerCAmelCase__ :Optional[Any] = True
lowerCAmelCase__ :Dict = True
lowerCAmelCase__ :Union[str, Any] = False
lowerCAmelCase__ :List[str] = 0.0_3_5_2_5_1_3
lowerCAmelCase__ :Optional[Any] = TapasForQuestionAnswering(config=_SCREAMING_SNAKE_CASE )
elif task == "WIKISQL_SUPERVISED":
# run_task_main.py hparams
lowerCAmelCase__ :Optional[Any] = 4
lowerCAmelCase__ :str = False
# hparam_utils.py hparams
lowerCAmelCase__ :Any = 3_6.4_5_1_9
lowerCAmelCase__ :Tuple = 0.9_0_3_4_2_1
lowerCAmelCase__ :Optional[int] = 2_2_2.0_8_8
lowerCAmelCase__ :int = True
lowerCAmelCase__ :List[str] = True
lowerCAmelCase__ :Optional[Any] = True
lowerCAmelCase__ :List[str] = 0.7_6_3_1_4_1
lowerCAmelCase__ :Tuple = TapasForQuestionAnswering(config=_SCREAMING_SNAKE_CASE )
elif task == "TABFACT":
lowerCAmelCase__ :List[str] = TapasForSequenceClassification(config=_SCREAMING_SNAKE_CASE )
elif task == "MLM":
lowerCAmelCase__ :Optional[int] = TapasForMaskedLM(config=_SCREAMING_SNAKE_CASE )
elif task == "INTERMEDIATE_PRETRAINING":
lowerCAmelCase__ :Dict = TapasModel(config=_SCREAMING_SNAKE_CASE )
else:
raise ValueError(F"Task {task} not supported." )
print(F"Building PyTorch model from configuration: {config}" )
# Load weights from tf checkpoint
load_tf_weights_in_tapas(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Save pytorch-model (weights and configuration)
print(F"Save PyTorch model to {pytorch_dump_path}" )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
# Save tokenizer files
print(F"Save tokenizer files to {pytorch_dump_path}" )
lowerCAmelCase__ :str = TapasTokenizer(vocab_file=tf_checkpoint_path[:-10] + 'vocab.txt' , model_max_length=512 )
tokenizer.save_pretrained(_SCREAMING_SNAKE_CASE )
print('Used relative position embeddings:' , model.config.reset_position_index_per_cell )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--task""", default="""SQA""", type=str, help="""Model task for which to convert a checkpoint. Defaults to SQA."""
)
parser.add_argument(
"""--reset_position_index_per_cell""",
default=False,
action="""store_true""",
help="""Whether to use relative position embeddings or not. Defaults to True.""",
)
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--tapas_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained TAPAS model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
__A = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.task,
args.reset_position_index_per_cell,
args.tf_checkpoint_path,
args.tapas_config_file,
args.pytorch_dump_path,
)
| 93
|
import logging
import numpy as np
import pytest
from scipy.linalg import eigh
logging.basicConfig(level=logging.INFO, format="%(message)s")
def _a ( lowercase__ : np.ndarray ):
'''simple docstring'''
return input_array.reshape((input_array.size, 1) )
def _a ( lowercase__ : np.ndarray , lowercase__ : np.ndarray , lowercase__ : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = np.nan
for i in range(lowercase__ ):
SCREAMING_SNAKE_CASE__ : int = features[:, labels == i]
SCREAMING_SNAKE_CASE__ : int = data.mean(1 )
# Centralize the data of class i
SCREAMING_SNAKE_CASE__ : Optional[Any] = data - column_reshape(lowercase__ )
if i > 0:
# If covariance_sum is not None
covariance_sum += np.dot(lowercase__ , centered_data.T )
else:
# If covariance_sum is np.nan (i.e. first loop)
SCREAMING_SNAKE_CASE__ : Any = np.dot(lowercase__ , centered_data.T )
return covariance_sum / features.shape[1]
def _a ( lowercase__ : np.ndarray , lowercase__ : np.ndarray , lowercase__ : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] = features.mean(1 )
SCREAMING_SNAKE_CASE__ : List[str] = np.nan
for i in range(lowercase__ ):
SCREAMING_SNAKE_CASE__ : Tuple = features[:, labels == i]
SCREAMING_SNAKE_CASE__ : int = data.shape[1]
SCREAMING_SNAKE_CASE__ : List[Any] = data.mean(1 )
if i > 0:
# If covariance_sum is not None
covariance_sum += device_data * np.dot(
column_reshape(lowercase__ ) - column_reshape(lowercase__ ) , (column_reshape(lowercase__ ) - column_reshape(lowercase__ )).T , )
else:
# If covariance_sum is np.nan (i.e. first loop)
SCREAMING_SNAKE_CASE__ : str = device_data * np.dot(
column_reshape(lowercase__ ) - column_reshape(lowercase__ ) , (column_reshape(lowercase__ ) - column_reshape(lowercase__ )).T , )
return covariance_sum / features.shape[1]
def _a ( lowercase__ : np.ndarray , lowercase__ : int ):
'''simple docstring'''
if features.any():
SCREAMING_SNAKE_CASE__ : Any = features.mean(1 )
# Center the dataset
SCREAMING_SNAKE_CASE__ : Optional[Any] = features - np.reshape(lowercase__ , (data_mean.size, 1) )
SCREAMING_SNAKE_CASE__ : List[Any] = np.dot(lowercase__ , centered_data.T ) / features.shape[1]
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] = np.linalg.eigh(lowercase__ )
# Take all the columns in the reverse order (-1), and then takes only the first
SCREAMING_SNAKE_CASE__ : List[Any] = eigenvectors[:, ::-1][:, 0:dimensions]
# Project the database on the new space
SCREAMING_SNAKE_CASE__ : Union[str, Any] = np.dot(filtered_eigenvectors.T , lowercase__ )
logging.info('Principal Component Analysis computed' )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format='%(message)s' , force=lowercase__ )
logging.error('Dataset empty' )
raise AssertionError
def _a ( lowercase__ : np.ndarray , lowercase__ : np.ndarray , lowercase__ : int , lowercase__ : int ):
'''simple docstring'''
assert classes > dimensions
# Check if features have been already loaded
if features.any:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] = eigh(
covariance_between_classes(lowercase__ , lowercase__ , lowercase__ ) , covariance_within_classes(lowercase__ , lowercase__ , lowercase__ ) , )
SCREAMING_SNAKE_CASE__ : Tuple = eigenvectors[:, ::-1][:, :dimensions]
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] = np.linalg.svd(lowercase__ )
SCREAMING_SNAKE_CASE__ : List[Any] = svd_matrix[:, 0:dimensions]
SCREAMING_SNAKE_CASE__ : int = np.dot(filtered_svd_matrix.T , lowercase__ )
logging.info('Linear Discriminant Analysis computed' )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format='%(message)s' , force=lowercase__ )
logging.error('Dataset empty' )
raise AssertionError
def _a ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = np.array([[1, 2, 3, 4, 5], [2, 3, 4, 5, 6], [3, 4, 5, 6, 7]] )
SCREAMING_SNAKE_CASE__ : Tuple = np.array([0, 0, 0, 1, 1] )
SCREAMING_SNAKE_CASE__ : str = 2
SCREAMING_SNAKE_CASE__ : Dict = 2
# Assert that the function raises an AssertionError if dimensions > classes
with pytest.raises(lowercase__ ) as error_info:
SCREAMING_SNAKE_CASE__ : Optional[int] = linear_discriminant_analysis(
lowercase__ , lowercase__ , lowercase__ , lowercase__ )
if isinstance(lowercase__ , np.ndarray ):
raise AssertionError(
'Did not raise AssertionError for dimensions > classes' )
assert error_info.type is AssertionError
def _a ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]] )
SCREAMING_SNAKE_CASE__ : List[str] = 2
SCREAMING_SNAKE_CASE__ : Union[str, Any] = np.array([[6.92820323, 8.66025404, 10.39230485], [3.0, 3.0, 3.0]] )
with pytest.raises(lowercase__ ) as error_info:
SCREAMING_SNAKE_CASE__ : int = principal_component_analysis(lowercase__ , lowercase__ )
if not np.allclose(lowercase__ , lowercase__ ):
raise AssertionError
assert error_info.type is AssertionError
if __name__ == "__main__":
import doctest
doctest.testmod()
| 85
| 0
|
'''simple docstring'''
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMInverseScheduler,
DDIMScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
StableDiffusionDiffEditPipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_image, slow
from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, require_torch_gpu, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class a_ ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
lowercase_ : Union[str, Any] = StableDiffusionDiffEditPipeline
lowercase_ : Dict = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'''height''', '''width''', '''image'''} | {'''image_latents'''}
lowercase_ : Tuple = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS - {'''image'''} | {'''image_latents'''}
lowercase_ : Union[str, Any] = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
lowercase_ : List[str] = frozenset([] )
def lowercase__ ( self : Optional[Any] ):
torch.manual_seed(0 )
__snake_case = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=3_2 , attention_head_dim=(2, 4) , use_linear_projection=__lowerCAmelCase , )
__snake_case = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=__lowerCAmelCase , set_alpha_to_one=__lowerCAmelCase , )
__snake_case = DDIMInverseScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=__lowerCAmelCase , set_alpha_to_zero=__lowerCAmelCase , )
torch.manual_seed(0 )
__snake_case = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=1_2_8 , )
torch.manual_seed(0 )
__snake_case = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act='gelu' , projection_dim=5_1_2 , )
__snake_case = CLIPTextModel(__lowerCAmelCase )
__snake_case = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
__snake_case = {
'unet': unet,
'scheduler': scheduler,
'inverse_scheduler': inverse_scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def lowercase__ ( self : Union[str, Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Union[str, Any]=0 ):
__snake_case = floats_tensor((1, 1_6, 1_6) , rng=random.Random(__lowerCAmelCase ) ).to(__lowerCAmelCase )
__snake_case = floats_tensor((1, 2, 4, 1_6, 1_6) , rng=random.Random(__lowerCAmelCase ) ).to(__lowerCAmelCase )
if str(__lowerCAmelCase ).startswith('mps' ):
__snake_case = torch.manual_seed(__lowerCAmelCase )
else:
__snake_case = torch.Generator(device=__lowerCAmelCase ).manual_seed(__lowerCAmelCase )
__snake_case = {
'prompt': 'a dog and a newt',
'mask_image': mask,
'image_latents': latents,
'generator': generator,
'num_inference_steps': 2,
'inpaint_strength': 1.0,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def lowercase__ ( self : str , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[int]=0 ):
__snake_case = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(__lowerCAmelCase ) ).to(__lowerCAmelCase )
__snake_case = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__snake_case = Image.fromarray(np.uinta(__lowerCAmelCase ) ).convert('RGB' )
if str(__lowerCAmelCase ).startswith('mps' ):
__snake_case = torch.manual_seed(__lowerCAmelCase )
else:
__snake_case = torch.Generator(device=__lowerCAmelCase ).manual_seed(__lowerCAmelCase )
__snake_case = {
'image': image,
'source_prompt': 'a cat and a frog',
'target_prompt': 'a dog and a newt',
'generator': generator,
'num_inference_steps': 2,
'num_maps_per_mask': 2,
'mask_encode_strength': 1.0,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def lowercase__ ( self : Optional[Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : List[str]=0 ):
__snake_case = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(__lowerCAmelCase ) ).to(__lowerCAmelCase )
__snake_case = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__snake_case = Image.fromarray(np.uinta(__lowerCAmelCase ) ).convert('RGB' )
if str(__lowerCAmelCase ).startswith('mps' ):
__snake_case = torch.manual_seed(__lowerCAmelCase )
else:
__snake_case = torch.Generator(device=__lowerCAmelCase ).manual_seed(__lowerCAmelCase )
__snake_case = {
'image': image,
'prompt': 'a cat and a frog',
'generator': generator,
'num_inference_steps': 2,
'inpaint_strength': 1.0,
'guidance_scale': 6.0,
'decode_latents': True,
'output_type': 'numpy',
}
return inputs
def lowercase__ ( self : Dict ):
if not hasattr(self.pipeline_class , '_optional_components' ):
return
__snake_case = self.get_dummy_components()
__snake_case = self.pipeline_class(**__lowerCAmelCase )
pipe.to(__lowerCAmelCase )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
# set all optional components to None and update pipeline config accordingly
for optional_component in pipe._optional_components:
setattr(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
pipe.register_modules(**{optional_component: None for optional_component in pipe._optional_components} )
__snake_case = self.get_dummy_inputs(__lowerCAmelCase )
__snake_case = pipe(**__lowerCAmelCase )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(__lowerCAmelCase )
__snake_case = self.pipeline_class.from_pretrained(__lowerCAmelCase )
pipe_loaded.to(__lowerCAmelCase )
pipe_loaded.set_progress_bar_config(disable=__lowerCAmelCase )
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(__lowerCAmelCase , __lowerCAmelCase ) is None , F'`{optional_component}` did not stay set to None after loading.' , )
__snake_case = self.get_dummy_inputs(__lowerCAmelCase )
__snake_case = pipe_loaded(**__lowerCAmelCase )[0]
__snake_case = np.abs(output - output_loaded ).max()
self.assertLess(__lowerCAmelCase , 1E-4 )
def lowercase__ ( self : List[Any] ):
__snake_case = 'cpu'
__snake_case = self.get_dummy_components()
__snake_case = self.pipeline_class(**__lowerCAmelCase )
pipe.to(__lowerCAmelCase )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
__snake_case = self.get_dummy_mask_inputs(__lowerCAmelCase )
__snake_case = pipe.generate_mask(**__lowerCAmelCase )
__snake_case = mask[0, -3:, -3:]
self.assertEqual(mask.shape , (1, 1_6, 1_6) )
__snake_case = np.array([0] * 9 )
__snake_case = np.abs(mask_slice.flatten() - expected_slice ).max()
self.assertLessEqual(__lowerCAmelCase , 1E-3 )
self.assertEqual(mask[0, -3, -4] , 0 )
def lowercase__ ( self : int ):
__snake_case = 'cpu'
__snake_case = self.get_dummy_components()
__snake_case = self.pipeline_class(**__lowerCAmelCase )
pipe.to(__lowerCAmelCase )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
__snake_case = self.get_dummy_inversion_inputs(__lowerCAmelCase )
__snake_case = pipe.invert(**__lowerCAmelCase ).images
__snake_case = image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 3_2, 3_2, 3) )
__snake_case = np.array(
[0.5150, 0.5134, 0.5043, 0.5376, 0.4694, 0.51050, 0.5015, 0.4407, 0.4799] , )
__snake_case = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(__lowerCAmelCase , 1E-3 )
def lowercase__ ( self : Dict ):
super().test_inference_batch_single_identical(expected_max_diff=5E-3 )
def lowercase__ ( self : Any ):
__snake_case = 'cpu'
__snake_case = self.get_dummy_components()
__snake_case = {'beta_start': 0.00085, 'beta_end': 0.012, 'beta_schedule': 'scaled_linear'}
__snake_case = DPMSolverMultistepScheduler(**__lowerCAmelCase )
__snake_case = DPMSolverMultistepInverseScheduler(**__lowerCAmelCase )
__snake_case = self.pipeline_class(**__lowerCAmelCase )
pipe.to(__lowerCAmelCase )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
__snake_case = self.get_dummy_inversion_inputs(__lowerCAmelCase )
__snake_case = pipe.invert(**__lowerCAmelCase ).images
__snake_case = image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 3_2, 3_2, 3) )
__snake_case = np.array(
[0.5150, 0.5134, 0.5043, 0.5376, 0.4694, 0.51050, 0.5015, 0.4407, 0.4799] , )
__snake_case = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(__lowerCAmelCase , 1E-3 )
@require_torch_gpu
@slow
class a_ ( unittest.TestCase ):
def lowercase__ ( self : Optional[int] ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@classmethod
def lowercase__ ( cls : Union[str, Any] ):
__snake_case = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/diffedit/fruit.png' )
__snake_case = raw_image.convert('RGB' ).resize((7_6_8, 7_6_8) )
__snake_case = raw_image
def lowercase__ ( self : List[str] ):
__snake_case = torch.manual_seed(0 )
__snake_case = StableDiffusionDiffEditPipeline.from_pretrained(
'stabilityai/stable-diffusion-2-1' , safety_checker=__lowerCAmelCase , torch_dtype=torch.floataa )
__snake_case = DDIMScheduler.from_config(pipe.scheduler.config )
__snake_case = DDIMInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
__snake_case = 'a bowl of fruit'
__snake_case = 'a bowl of pears'
__snake_case = pipe.generate_mask(
image=self.raw_image , source_prompt=__lowerCAmelCase , target_prompt=__lowerCAmelCase , generator=__lowerCAmelCase , )
__snake_case = pipe.invert(
prompt=__lowerCAmelCase , image=self.raw_image , inpaint_strength=0.7 , generator=__lowerCAmelCase ).latents
__snake_case = pipe(
prompt=__lowerCAmelCase , mask_image=__lowerCAmelCase , image_latents=__lowerCAmelCase , generator=__lowerCAmelCase , negative_prompt=__lowerCAmelCase , inpaint_strength=0.7 , output_type='numpy' , ).images[0]
__snake_case = (
np.array(
load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/diffedit/pears.png' ).resize((7_6_8, 7_6_8) ) )
/ 2_5_5
)
assert np.abs((expected_image - image).max() ) < 5E-1
def lowercase__ ( self : Optional[int] ):
__snake_case = torch.manual_seed(0 )
__snake_case = StableDiffusionDiffEditPipeline.from_pretrained(
'stabilityai/stable-diffusion-2-1' , safety_checker=__lowerCAmelCase , torch_dtype=torch.floataa )
__snake_case = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
__snake_case = DPMSolverMultistepInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
__snake_case = 'a bowl of fruit'
__snake_case = 'a bowl of pears'
__snake_case = pipe.generate_mask(
image=self.raw_image , source_prompt=__lowerCAmelCase , target_prompt=__lowerCAmelCase , generator=__lowerCAmelCase , )
__snake_case = pipe.invert(
prompt=__lowerCAmelCase , image=self.raw_image , inpaint_strength=0.7 , generator=__lowerCAmelCase , num_inference_steps=2_5 , ).latents
__snake_case = pipe(
prompt=__lowerCAmelCase , mask_image=__lowerCAmelCase , image_latents=__lowerCAmelCase , generator=__lowerCAmelCase , negative_prompt=__lowerCAmelCase , inpaint_strength=0.7 , num_inference_steps=2_5 , output_type='numpy' , ).images[0]
__snake_case = (
np.array(
load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/diffedit/pears.png' ).resize((7_6_8, 7_6_8) ) )
/ 2_5_5
)
assert np.abs((expected_image - image).max() ) < 5E-1
| 427
|
'''simple docstring'''
import importlib
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Union
import torch
from ..utils import BaseOutput
_lowercase = """scheduler_config.json"""
class a_ ( UpperCAmelCase__ ):
lowercase_ : int = 1
lowercase_ : Tuple = 2
lowercase_ : List[Any] = 3
lowercase_ : Optional[int] = 4
lowercase_ : Dict = 5
lowercase_ : str = 6
lowercase_ : Tuple = 7
lowercase_ : Tuple = 8
lowercase_ : List[Any] = 9
lowercase_ : Dict = 10
lowercase_ : Optional[int] = 11
lowercase_ : List[Any] = 12
lowercase_ : Union[str, Any] = 13
lowercase_ : List[str] = 14
@dataclass
class a_ ( UpperCAmelCase__ ):
lowercase_ : torch.FloatTensor
class a_ :
lowercase_ : str = SCHEDULER_CONFIG_NAME
lowercase_ : Union[str, Any] = []
lowercase_ : Dict = True
@classmethod
def lowercase__ ( cls : Optional[Any] , __lowerCAmelCase : Dict[str, Any] = None , __lowerCAmelCase : Optional[str] = None , __lowerCAmelCase : str=False , **__lowerCAmelCase : Optional[int] , ):
__snake_case , __snake_case , __snake_case = cls.load_config(
pretrained_model_name_or_path=__lowerCAmelCase , subfolder=__lowerCAmelCase , return_unused_kwargs=__lowerCAmelCase , return_commit_hash=__lowerCAmelCase , **__lowerCAmelCase , )
return cls.from_config(__lowerCAmelCase , return_unused_kwargs=__lowerCAmelCase , **__lowerCAmelCase )
def lowercase__ ( self : List[Any] , __lowerCAmelCase : Union[str, os.PathLike] , __lowerCAmelCase : bool = False , **__lowerCAmelCase : Tuple ):
self.save_config(save_directory=__lowerCAmelCase , push_to_hub=__lowerCAmelCase , **__lowerCAmelCase )
@property
def lowercase__ ( self : Union[str, Any] ):
return self._get_compatibles()
@classmethod
def lowercase__ ( cls : Union[str, Any] ):
__snake_case = list(set([cls.__name__] + cls._compatibles ) )
__snake_case = importlib.import_module(__name__.split('.' )[0] )
__snake_case = [
getattr(__lowerCAmelCase , __lowerCAmelCase ) for c in compatible_classes_str if hasattr(__lowerCAmelCase , __lowerCAmelCase )
]
return compatible_classes
| 427
| 1
|
import numpy as np
def lowerCAmelCase_ ( __a ) -> np.array:
"""simple docstring"""
return 1 / (1 + np.exp(-vector ))
def lowerCAmelCase_ ( __a ) -> np.array:
"""simple docstring"""
return vector * sigmoid(1.7_0_2 * vector )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 59
|
from packaging import version
from .import_utils import is_accelerate_available
if is_accelerate_available():
import accelerate
def _lowerCamelCase( lowercase__ ) -> List[Any]:
'''simple docstring'''
if not is_accelerate_available():
return method
__lowercase= version.parse(accelerate.__version__ ).base_version
if version.parse(lowercase__ ) < version.parse('0.17.0' ):
return method
def wrapper(self , *lowercase__ , **lowercase__ ):
if hasattr(self , '_hf_hook' ) and hasattr(self._hf_hook , 'pre_forward' ):
self._hf_hook.pre_forward(self )
return method(self , *lowercase__ , **lowercase__ )
return wrapper
| 230
| 0
|
import argparse
import collections
import json
import os
import re
import string
import sys
import numpy as np
lowerCAmelCase = re.compile(R"""\b(a|an|the)\b""", re.UNICODE)
lowerCAmelCase = None
def __SCREAMING_SNAKE_CASE ( ) -> Union[str, Any]:
'''simple docstring'''
__UpperCAmelCase : Any = argparse.ArgumentParser('''Official evaluation script for SQuAD version 2.0.''' )
parser.add_argument('''data_file''' , metavar='''data.json''' , help='''Input data JSON file.''' )
parser.add_argument('''pred_file''' , metavar='''pred.json''' , help='''Model predictions.''' )
parser.add_argument(
'''--out-file''' , '''-o''' , metavar='''eval.json''' , help='''Write accuracy metrics to file (default is stdout).''' )
parser.add_argument(
'''--na-prob-file''' , '''-n''' , metavar='''na_prob.json''' , help='''Model estimates of probability of no answer.''' )
parser.add_argument(
'''--na-prob-thresh''' , '''-t''' , type=lowercase_ , default=1.0 , help='''Predict "" if no-answer probability exceeds this (default = 1.0).''' , )
parser.add_argument(
'''--out-image-dir''' , '''-p''' , metavar='''out_images''' , default=lowercase_ , help='''Save precision-recall curves to directory.''' )
parser.add_argument('''--verbose''' , '''-v''' , action='''store_true''' )
if len(sys.argv ) == 1:
parser.print_help()
sys.exit(1 )
return parser.parse_args()
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> List[str]:
'''simple docstring'''
__UpperCAmelCase : Dict = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
__UpperCAmelCase : Optional[Any] = bool(qa['''answers''']['''text'''] )
return qid_to_has_ans
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> List[str]:
'''simple docstring'''
def remove_articles(lowercase_ ):
return ARTICLES_REGEX.sub(''' ''' , lowercase_ )
def white_space_fix(lowercase_ ):
return " ".join(text.split() )
def remove_punc(lowercase_ ):
__UpperCAmelCase : str = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(lowercase_ ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(lowercase_ ) ) ) )
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> Tuple:
'''simple docstring'''
if not s:
return []
return normalize_answer(lowercase_ ).split()
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> List[str]:
'''simple docstring'''
return int(normalize_answer(lowercase_ ) == normalize_answer(lowercase_ ) )
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> Optional[int]:
'''simple docstring'''
__UpperCAmelCase : List[Any] = get_tokens(lowercase_ )
__UpperCAmelCase : Optional[Any] = get_tokens(lowercase_ )
__UpperCAmelCase : Union[str, Any] = collections.Counter(lowercase_ ) & collections.Counter(lowercase_ )
__UpperCAmelCase : List[Any] = sum(common.values() )
if len(lowercase_ ) == 0 or len(lowercase_ ) == 0:
# If either is no-answer, then F1 is 1 if they agree, 0 otherwise
return int(gold_toks == pred_toks )
if num_same == 0:
return 0
__UpperCAmelCase : str = 1.0 * num_same / len(lowercase_ )
__UpperCAmelCase : Optional[int] = 1.0 * num_same / len(lowercase_ )
__UpperCAmelCase : List[str] = (2 * precision * recall) / (precision + recall)
return fa
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> int:
'''simple docstring'''
__UpperCAmelCase : Dict = {}
__UpperCAmelCase : List[str] = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
__UpperCAmelCase : List[Any] = qa['''id''']
__UpperCAmelCase : Optional[int] = [t for t in qa['''answers''']['''text'''] if normalize_answer(lowercase_ )]
if not gold_answers:
# For unanswerable questions, only correct answer is empty string
__UpperCAmelCase : Union[str, Any] = ['''''']
if qid not in preds:
print(f"Missing prediction for {qid}" )
continue
__UpperCAmelCase : str = preds[qid]
# Take max over all gold answers
__UpperCAmelCase : Tuple = max(compute_exact(lowercase_ , lowercase_ ) for a in gold_answers )
__UpperCAmelCase : str = max(compute_fa(lowercase_ , lowercase_ ) for a in gold_answers )
return exact_scores, fa_scores
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> Tuple:
'''simple docstring'''
__UpperCAmelCase : Tuple = {}
for qid, s in scores.items():
__UpperCAmelCase : Tuple = na_probs[qid] > na_prob_thresh
if pred_na:
__UpperCAmelCase : Union[str, Any] = float(not qid_to_has_ans[qid] )
else:
__UpperCAmelCase : List[str] = s
return new_scores
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_=None ) -> int:
'''simple docstring'''
if not qid_list:
__UpperCAmelCase : int = len(lowercase_ )
return collections.OrderedDict(
[
('''exact''', 1_0_0.0 * sum(exact_scores.values() ) / total),
('''f1''', 1_0_0.0 * sum(fa_scores.values() ) / total),
('''total''', total),
] )
else:
__UpperCAmelCase : int = len(lowercase_ )
return collections.OrderedDict(
[
('''exact''', 1_0_0.0 * sum(exact_scores[k] for k in qid_list ) / total),
('''f1''', 1_0_0.0 * sum(fa_scores[k] for k in qid_list ) / total),
('''total''', total),
] )
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> Optional[Any]:
'''simple docstring'''
for k in new_eval:
__UpperCAmelCase : Optional[int] = new_eval[k]
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> List[str]:
'''simple docstring'''
plt.step(lowercase_ , lowercase_ , color='''b''' , alpha=0.2 , where='''post''' )
plt.fill_between(lowercase_ , lowercase_ , step='''post''' , alpha=0.2 , color='''b''' )
plt.xlabel('''Recall''' )
plt.ylabel('''Precision''' )
plt.xlim([0.0, 1.0_5] )
plt.ylim([0.0, 1.0_5] )
plt.title(lowercase_ )
plt.savefig(lowercase_ )
plt.clf()
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_=None , lowercase_=None ) -> Optional[Any]:
'''simple docstring'''
__UpperCAmelCase : Dict = sorted(lowercase_ , key=lambda lowercase_ : na_probs[k] )
__UpperCAmelCase : Union[str, Any] = 0.0
__UpperCAmelCase : int = 1.0
__UpperCAmelCase : str = 0.0
__UpperCAmelCase : str = [1.0]
__UpperCAmelCase : str = [0.0]
__UpperCAmelCase : Union[str, Any] = 0.0
for i, qid in enumerate(lowercase_ ):
if qid_to_has_ans[qid]:
true_pos += scores[qid]
__UpperCAmelCase : Dict = true_pos / float(i + 1 )
__UpperCAmelCase : List[Any] = true_pos / float(lowercase_ )
if i == len(lowercase_ ) - 1 or na_probs[qid] != na_probs[qid_list[i + 1]]:
# i.e., if we can put a threshold after this point
avg_prec += cur_p * (cur_r - recalls[-1])
precisions.append(lowercase_ )
recalls.append(lowercase_ )
if out_image:
plot_pr_curve(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
return {"ap": 1_0_0.0 * avg_prec}
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> List[str]:
'''simple docstring'''
if out_image_dir and not os.path.exists(lowercase_ ):
os.makedirs(lowercase_ )
__UpperCAmelCase : str = sum(1 for v in qid_to_has_ans.values() if v )
if num_true_pos == 0:
return
__UpperCAmelCase : Tuple = make_precision_recall_eval(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , out_image=os.path.join(lowercase_ , '''pr_exact.png''' ) , title='''Precision-Recall curve for Exact Match score''' , )
__UpperCAmelCase : Dict = make_precision_recall_eval(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , out_image=os.path.join(lowercase_ , '''pr_f1.png''' ) , title='''Precision-Recall curve for F1 score''' , )
__UpperCAmelCase : List[str] = {k: float(lowercase_ ) for k, v in qid_to_has_ans.items()}
__UpperCAmelCase : List[str] = make_precision_recall_eval(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , out_image=os.path.join(lowercase_ , '''pr_oracle.png''' ) , title='''Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)''' , )
merge_eval(lowercase_ , lowercase_ , '''pr_exact''' )
merge_eval(lowercase_ , lowercase_ , '''pr_f1''' )
merge_eval(lowercase_ , lowercase_ , '''pr_oracle''' )
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> Any:
'''simple docstring'''
if not qid_list:
return
__UpperCAmelCase : Tuple = [na_probs[k] for k in qid_list]
__UpperCAmelCase : str = np.ones_like(lowercase_ ) / float(len(lowercase_ ) )
plt.hist(lowercase_ , weights=lowercase_ , bins=20 , range=(0.0, 1.0) )
plt.xlabel('''Model probability of no-answer''' )
plt.ylabel('''Proportion of dataset''' )
plt.title(f"Histogram of no-answer probability: {name}" )
plt.savefig(os.path.join(lowercase_ , f"na_prob_hist_{name}.png" ) )
plt.clf()
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> Any:
'''simple docstring'''
__UpperCAmelCase : Any = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k] )
__UpperCAmelCase : Any = num_no_ans
__UpperCAmelCase : Optional[Any] = cur_score
__UpperCAmelCase : List[str] = 0.0
__UpperCAmelCase : Tuple = sorted(lowercase_ , key=lambda lowercase_ : na_probs[k] )
for i, qid in enumerate(lowercase_ ):
if qid not in scores:
continue
if qid_to_has_ans[qid]:
__UpperCAmelCase : Tuple = scores[qid]
else:
if preds[qid]:
__UpperCAmelCase : Optional[Any] = -1
else:
__UpperCAmelCase : str = 0
cur_score += diff
if cur_score > best_score:
__UpperCAmelCase : Optional[int] = cur_score
__UpperCAmelCase : int = na_probs[qid]
return 1_0_0.0 * best_score / len(lowercase_ ), best_thresh
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> Dict:
'''simple docstring'''
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = find_best_thresh(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
__UpperCAmelCase , __UpperCAmelCase : Optional[int] = find_best_thresh(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
__UpperCAmelCase : int = best_exact
__UpperCAmelCase : List[Any] = exact_thresh
__UpperCAmelCase : Union[str, Any] = best_fa
__UpperCAmelCase : Optional[Any] = fa_thresh
def __SCREAMING_SNAKE_CASE ( ) -> Dict:
'''simple docstring'''
with open(OPTS.data_file ) as f:
__UpperCAmelCase : List[Any] = json.load(lowercase_ )
__UpperCAmelCase : List[Any] = dataset_json['''data''']
with open(OPTS.pred_file ) as f:
__UpperCAmelCase : str = json.load(lowercase_ )
if OPTS.na_prob_file:
with open(OPTS.na_prob_file ) as f:
__UpperCAmelCase : Optional[int] = json.load(lowercase_ )
else:
__UpperCAmelCase : List[str] = {k: 0.0 for k in preds}
__UpperCAmelCase : int = make_qid_to_has_ans(lowercase_ ) # maps qid to True/False
__UpperCAmelCase : int = [k for k, v in qid_to_has_ans.items() if v]
__UpperCAmelCase : List[Any] = [k for k, v in qid_to_has_ans.items() if not v]
__UpperCAmelCase , __UpperCAmelCase : Dict = get_raw_scores(lowercase_ , lowercase_ )
__UpperCAmelCase : List[str] = apply_no_ans_threshold(lowercase_ , lowercase_ , lowercase_ , OPTS.na_prob_thresh )
__UpperCAmelCase : Optional[Any] = apply_no_ans_threshold(lowercase_ , lowercase_ , lowercase_ , OPTS.na_prob_thresh )
__UpperCAmelCase : Any = make_eval_dict(lowercase_ , lowercase_ )
if has_ans_qids:
__UpperCAmelCase : Tuple = make_eval_dict(lowercase_ , lowercase_ , qid_list=lowercase_ )
merge_eval(lowercase_ , lowercase_ , '''HasAns''' )
if no_ans_qids:
__UpperCAmelCase : List[str] = make_eval_dict(lowercase_ , lowercase_ , qid_list=lowercase_ )
merge_eval(lowercase_ , lowercase_ , '''NoAns''' )
if OPTS.na_prob_file:
find_all_best_thresh(lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
if OPTS.na_prob_file and OPTS.out_image_dir:
run_precision_recall_analysis(lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , OPTS.out_image_dir )
histogram_na_prob(lowercase_ , lowercase_ , OPTS.out_image_dir , '''hasAns''' )
histogram_na_prob(lowercase_ , lowercase_ , OPTS.out_image_dir , '''noAns''' )
if OPTS.out_file:
with open(OPTS.out_file , '''w''' ) as f:
json.dump(lowercase_ , lowercase_ )
else:
print(json.dumps(lowercase_ , indent=2 ) )
if __name__ == "__main__":
lowerCAmelCase = parse_args()
if OPTS.out_image_dir:
import matplotlib
matplotlib.use("""Agg""")
import matplotlib.pyplot as plt
main()
| 675
|
def __SCREAMING_SNAKE_CASE ( ) -> list[list[int]]:
'''simple docstring'''
return [list(range(1000 - i , -1000 - i , -1 ) ) for i in range(1000 )]
lowerCAmelCase = generate_large_matrix()
lowerCAmelCase = (
[[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]],
[[3, 2], [1, 0]],
[[7, 7, 6]],
[[7, 7, 6], [-1, -2, -3]],
grid,
)
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> None:
'''simple docstring'''
assert all(row == sorted(lowercase_ , reverse=lowercase_ ) for row in grid )
assert all(list(lowercase_ ) == sorted(lowercase_ , reverse=lowercase_ ) for col in zip(*lowercase_ ) )
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> int:
'''simple docstring'''
__UpperCAmelCase : Dict = 0
__UpperCAmelCase : List[Any] = len(lowercase_ ) - 1
# Edge cases such as no values or all numbers are negative.
if not array or array[0] < 0:
return 0
while right + 1 > left:
__UpperCAmelCase : List[Any] = (left + right) // 2
__UpperCAmelCase : Dict = array[mid]
# Num must be negative and the index must be greater than or equal to 0.
if num < 0 and array[mid - 1] >= 0:
return mid
if num >= 0:
__UpperCAmelCase : Dict = mid + 1
else:
__UpperCAmelCase : Optional[Any] = mid - 1
# No negative numbers so return the last index of the array + 1 which is the length.
return len(lowercase_ )
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> int:
'''simple docstring'''
__UpperCAmelCase : int = 0
__UpperCAmelCase : Dict = len(grid[0] )
for i in range(len(lowercase_ ) ):
__UpperCAmelCase : Any = find_negative_index(grid[i][:bound] )
total += bound
return (len(lowercase_ ) * len(grid[0] )) - total
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> int:
'''simple docstring'''
return len([number for row in grid for number in row if number < 0] )
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> int:
'''simple docstring'''
__UpperCAmelCase : List[Any] = 0
for row in grid:
for i, number in enumerate(lowercase_ ):
if number < 0:
total += len(lowercase_ ) - i
break
return total
def __SCREAMING_SNAKE_CASE ( ) -> None:
'''simple docstring'''
from timeit import timeit
print('''Running benchmarks''' )
__UpperCAmelCase : Tuple = (
'''from __main__ import count_negatives_binary_search, '''
'''count_negatives_brute_force, count_negatives_brute_force_with_break, grid'''
)
for func in (
"count_negatives_binary_search", # took 0.7727 seconds
"count_negatives_brute_force_with_break", # took 4.6505 seconds
"count_negatives_brute_force", # took 12.8160 seconds
):
__UpperCAmelCase : Union[str, Any] = timeit(f"{func}(grid=grid)" , setup=lowercase_ , number=500 )
print(f"{func}() took {time:0.4f} seconds" )
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 675
| 1
|
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPImageProcessor, CLIPProcessor
@require_vision
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def _a ( self : Union[str, Any] ):
"""simple docstring"""
A__ = tempfile.mkdtemp()
# fmt: off
A__ = ['l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'lo', 'l</w>', 'w</w>', 'r</w>', 't</w>', 'low</w>', 'er</w>', 'lowest</w>', 'newer</w>', 'wider', '<unk>', '<|startoftext|>', '<|endoftext|>']
# fmt: on
A__ = dict(zip(_snake_case , range(len(_snake_case ) ) ) )
A__ = ['#version: 0.2', 'l o', 'lo w</w>', 'e r</w>', '']
A__ = {'unk_token': '<unk>'}
A__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
A__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(_snake_case ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(_snake_case ) )
A__ = {
'do_resize': True,
'size': 20,
'do_center_crop': True,
'crop_size': 18,
'do_normalize': True,
'image_mean': [0.4814_5466, 0.457_8275, 0.4082_1073],
'image_std': [0.2686_2954, 0.2613_0258, 0.2757_7711],
}
A__ = os.path.join(self.tmpdirname , _snake_case )
with open(self.image_processor_file , 'w' , encoding='utf-8' ) as fp:
json.dump(_snake_case , _snake_case )
def _a ( self : Optional[int] , **_snake_case : str ):
"""simple docstring"""
return CLIPTokenizer.from_pretrained(self.tmpdirname , **_snake_case )
def _a ( self : Union[str, Any] , **_snake_case : str ):
"""simple docstring"""
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **_snake_case )
def _a ( self : int , **_snake_case : List[str] ):
"""simple docstring"""
return CLIPImageProcessor.from_pretrained(self.tmpdirname , **_snake_case )
def _a ( self : int ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def _a ( self : Dict ):
"""simple docstring"""
A__ = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
A__ = [Image.fromarray(np.moveaxis(_snake_case , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _a ( self : int ):
"""simple docstring"""
A__ = self.get_tokenizer()
A__ = self.get_rust_tokenizer()
A__ = self.get_image_processor()
A__ = CLIPProcessor(tokenizer=_snake_case , image_processor=_snake_case )
processor_slow.save_pretrained(self.tmpdirname )
A__ = CLIPProcessor.from_pretrained(self.tmpdirname , use_fast=_snake_case )
A__ = CLIPProcessor(tokenizer=_snake_case , image_processor=_snake_case )
processor_fast.save_pretrained(self.tmpdirname )
A__ = CLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , _snake_case )
self.assertIsInstance(processor_fast.tokenizer , _snake_case )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , _snake_case )
self.assertIsInstance(processor_fast.image_processor , _snake_case )
def _a ( self : Dict ):
"""simple docstring"""
A__ = CLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
A__ = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
A__ = self.get_image_processor(do_normalize=_snake_case , padding_value=1.0 )
A__ = CLIPProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=_snake_case , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , _snake_case )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _snake_case )
def _a ( self : Dict ):
"""simple docstring"""
A__ = self.get_image_processor()
A__ = self.get_tokenizer()
A__ = CLIPProcessor(tokenizer=_snake_case , image_processor=_snake_case )
A__ = self.prepare_image_inputs()
A__ = image_processor(_snake_case , return_tensors='np' )
A__ = processor(images=_snake_case , return_tensors='np' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def _a ( self : List[str] ):
"""simple docstring"""
A__ = self.get_image_processor()
A__ = self.get_tokenizer()
A__ = CLIPProcessor(tokenizer=_snake_case , image_processor=_snake_case )
A__ = 'lower newer'
A__ = processor(text=_snake_case )
A__ = tokenizer(_snake_case )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _a ( self : Optional[Any] ):
"""simple docstring"""
A__ = self.get_image_processor()
A__ = self.get_tokenizer()
A__ = CLIPProcessor(tokenizer=_snake_case , image_processor=_snake_case )
A__ = 'lower newer'
A__ = self.prepare_image_inputs()
A__ = processor(text=_snake_case , images=_snake_case )
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'attention_mask', 'pixel_values'] )
# test if it raises when no input is passed
with pytest.raises(_snake_case ):
processor()
def _a ( self : List[Any] ):
"""simple docstring"""
A__ = self.get_image_processor()
A__ = self.get_tokenizer()
A__ = CLIPProcessor(tokenizer=_snake_case , image_processor=_snake_case )
A__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
A__ = processor.batch_decode(_snake_case )
A__ = tokenizer.batch_decode(_snake_case )
self.assertListEqual(_snake_case , _snake_case )
def _a ( self : Optional[int] ):
"""simple docstring"""
A__ = self.get_image_processor()
A__ = self.get_tokenizer()
A__ = CLIPProcessor(tokenizer=_snake_case , image_processor=_snake_case )
A__ = 'lower newer'
A__ = self.prepare_image_inputs()
A__ = processor(text=_snake_case , images=_snake_case )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 9
|
import copy
import unittest
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
MODEL_FOR_QUESTION_ANSWERING_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
)
from transformers.models.layoutlmva.modeling_layoutlmva import LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class lowerCAmelCase_ :
def __init__( self : Any , __A : Optional[int] , __A : Optional[int]=2 , __A : int=3 , __A : Union[str, Any]=4 , __A : Tuple=2 , __A : Union[str, Any]=7 , __A : Any=True , __A : List[str]=True , __A : Tuple=True , __A : Tuple=True , __A : List[str]=99 , __A : Tuple=36 , __A : Union[str, Any]=3 , __A : str=4 , __A : str=37 , __A : int="gelu" , __A : Union[str, Any]=0.1 , __A : str=0.1 , __A : List[Any]=512 , __A : Optional[int]=16 , __A : int=2 , __A : List[Any]=0.02 , __A : Optional[Any]=6 , __A : int=6 , __A : str=3 , __A : Optional[int]=4 , __A : Union[str, Any]=None , __A : Tuple=1000 , ) ->Any:
"""simple docstring"""
a__ :Any = parent
a__ :Optional[int] = batch_size
a__ :Union[str, Any] = num_channels
a__ :Any = image_size
a__ :Optional[Any] = patch_size
a__ :Optional[Any] = text_seq_length
a__ :int = is_training
a__ :Tuple = use_input_mask
a__ :Any = use_token_type_ids
a__ :int = use_labels
a__ :str = vocab_size
a__ :List[str] = hidden_size
a__ :Optional[int] = num_hidden_layers
a__ :List[str] = num_attention_heads
a__ :List[str] = intermediate_size
a__ :int = hidden_act
a__ :Optional[Any] = hidden_dropout_prob
a__ :Union[str, Any] = attention_probs_dropout_prob
a__ :int = max_position_embeddings
a__ :Tuple = type_vocab_size
a__ :Union[str, Any] = type_sequence_label_size
a__ :List[Any] = initializer_range
a__ :str = coordinate_size
a__ :Union[str, Any] = shape_size
a__ :int = num_labels
a__ :Optional[int] = num_choices
a__ :str = scope
a__ :int = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
a__ :str = text_seq_length
a__ :Tuple = (image_size // patch_size) ** 2 + 1
a__ :Optional[int] = self.text_seq_length + self.image_seq_length
def _snake_case ( self : Optional[Any] ) ->Dict:
"""simple docstring"""
a__ :str = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
a__ :Optional[Any] = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
a__ :Optional[Any] = bbox[i, j, 3]
a__ :List[str] = bbox[i, j, 1]
a__ :str = t
if bbox[i, j, 2] < bbox[i, j, 0]:
a__ :Any = bbox[i, j, 2]
a__ :int = bbox[i, j, 0]
a__ :Optional[Any] = t
a__ :int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
a__ :List[Any] = None
if self.use_input_mask:
a__ :str = random_attention_mask([self.batch_size, self.text_seq_length] )
a__ :Optional[Any] = None
if self.use_token_type_ids:
a__ :str = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
a__ :List[str] = None
a__ :List[str] = None
if self.use_labels:
a__ :str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
a__ :List[str] = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
a__ :Tuple = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def _snake_case ( self : Tuple , __A : Any , __A : Union[str, Any] , __A : List[str] , __A : Dict , __A : int , __A : Union[str, Any] , __A : Union[str, Any] , __A : Any ) ->Dict:
"""simple docstring"""
a__ :Optional[int] = LayoutLMvaModel(config=__A )
model.to(__A )
model.eval()
# text + image
a__ :List[Any] = model(__A , pixel_values=__A )
a__ :int = model(
__A , bbox=__A , pixel_values=__A , attention_mask=__A , token_type_ids=__A )
a__ :Union[str, Any] = model(__A , bbox=__A , pixel_values=__A , token_type_ids=__A )
a__ :Optional[Any] = model(__A , bbox=__A , pixel_values=__A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
a__ :Dict = model(__A )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
a__ :Dict = model(pixel_values=__A )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def _snake_case ( self : Tuple , __A : List[str] , __A : str , __A : Union[str, Any] , __A : str , __A : Any , __A : List[Any] , __A : str , __A : Tuple ) ->Tuple:
"""simple docstring"""
a__ :Optional[Any] = self.num_labels
a__ :Tuple = LayoutLMvaForSequenceClassification(__A )
model.to(__A )
model.eval()
a__ :str = model(
__A , bbox=__A , pixel_values=__A , attention_mask=__A , token_type_ids=__A , labels=__A , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _snake_case ( self : Optional[int] , __A : str , __A : Tuple , __A : Union[str, Any] , __A : Union[str, Any] , __A : Dict , __A : int , __A : Optional[int] , __A : int ) ->List[str]:
"""simple docstring"""
a__ :Dict = self.num_labels
a__ :Dict = LayoutLMvaForTokenClassification(config=__A )
model.to(__A )
model.eval()
a__ :Tuple = model(
__A , bbox=__A , pixel_values=__A , attention_mask=__A , token_type_ids=__A , labels=__A , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def _snake_case ( self : str , __A : Optional[Any] , __A : Optional[Any] , __A : List[str] , __A : Union[str, Any] , __A : int , __A : Optional[int] , __A : Union[str, Any] , __A : str ) ->Dict:
"""simple docstring"""
a__ :List[str] = LayoutLMvaForQuestionAnswering(config=__A )
model.to(__A )
model.eval()
a__ :List[str] = model(
__A , bbox=__A , pixel_values=__A , attention_mask=__A , token_type_ids=__A , start_positions=__A , end_positions=__A , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _snake_case ( self : List[Any] ) ->Dict:
"""simple docstring"""
a__ :str = self.prepare_config_and_inputs()
(
(
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) ,
) :str = config_and_inputs
a__ :Tuple = {
"input_ids": input_ids,
"bbox": bbox,
"pixel_values": pixel_values,
"token_type_ids": token_type_ids,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_torch
class lowerCAmelCase_ ( _a ,_a ,unittest.TestCase):
lowerCamelCase_ = False
lowerCamelCase_ = False
lowerCamelCase_ = False
lowerCamelCase_ = (
(
LayoutLMvaModel,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaForQuestionAnswering,
)
if is_torch_available()
else ()
)
lowerCamelCase_ = (
{'document-question-answering': LayoutLMvaForQuestionAnswering, 'feature-extraction': LayoutLMvaModel}
if is_torch_available()
else {}
)
def _snake_case ( self : List[str] , __A : Union[str, Any] , __A : Optional[Any] , __A : Optional[int] , __A : List[str] , __A : Dict ) ->Dict:
"""simple docstring"""
return True
def _snake_case ( self : Optional[int] ) ->Optional[Any]:
"""simple docstring"""
a__ :int = LayoutLMvaModelTester(self )
a__ :Union[str, Any] = ConfigTester(self , config_class=__A , hidden_size=37 )
def _snake_case ( self : int , __A : int , __A : List[Any] , __A : Optional[int]=False ) ->Optional[Any]:
"""simple docstring"""
a__ :Union[str, Any] = copy.deepcopy(__A )
if model_class in get_values(__A ):
a__ :Dict = {
k: v.unsqueeze(1 ).expand(-1 , self.model_tester.num_choices , -1 ).contiguous()
if isinstance(__A , torch.Tensor ) and v.ndim > 1
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(__A ):
a__ :List[str] = torch.ones(self.model_tester.batch_size , dtype=torch.long , device=__A )
elif model_class in get_values(__A ):
a__ :int = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__A )
a__ :Any = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__A )
elif model_class in [
*get_values(__A ),
]:
a__ :List[str] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__A )
elif model_class in [
*get_values(__A ),
]:
a__ :List[Any] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=torch.long , device=__A , )
return inputs_dict
def _snake_case ( self : Optional[Any] ) ->List[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def _snake_case ( self : List[Any] ) ->List[Any]:
"""simple docstring"""
a__ :Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__A )
def _snake_case ( self : int ) ->Optional[Any]:
"""simple docstring"""
a__ :str = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
a__ :List[Any] = type
self.model_tester.create_and_check_model(*__A )
def _snake_case ( self : Tuple ) ->str:
"""simple docstring"""
a__ :Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__A )
def _snake_case ( self : List[Any] ) ->List[str]:
"""simple docstring"""
a__ :Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__A )
def _snake_case ( self : Optional[int] ) ->Dict:
"""simple docstring"""
a__ :Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__A )
@slow
def _snake_case ( self : Union[str, Any] ) ->str:
"""simple docstring"""
for model_name in LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a__ :int = LayoutLMvaModel.from_pretrained(__A )
self.assertIsNotNone(__A )
def lowerCamelCase__ ( ) -> Optional[Any]:
"""simple docstring"""
a__ :List[str] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
class lowerCAmelCase_ ( unittest.TestCase):
@cached_property
def _snake_case ( self : Union[str, Any] ) ->Dict:
"""simple docstring"""
return LayoutLMvaImageProcessor(apply_ocr=__A ) if is_vision_available() else None
@slow
def _snake_case ( self : List[Any] ) ->Union[str, Any]:
"""simple docstring"""
a__ :Optional[Any] = LayoutLMvaModel.from_pretrained("microsoft/layoutlmv3-base" ).to(__A )
a__ :str = self.default_image_processor
a__ :List[str] = prepare_img()
a__ :Tuple = image_processor(images=__A , return_tensors="pt" ).pixel_values.to(__A )
a__ :Dict = torch.tensor([[1, 2]] )
a__ :Optional[Any] = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]] ).unsqueeze(0 )
# forward pass
a__ :int = model(
input_ids=input_ids.to(__A ) , bbox=bbox.to(__A ) , pixel_values=pixel_values.to(__A ) , )
# verify the logits
a__ :int = torch.Size((1, 199, 768) )
self.assertEqual(outputs.last_hidden_state.shape , __A )
a__ :Any = torch.tensor(
[[-0.0_529, 0.3_618, 0.1_632], [-0.1_587, -0.1_667, -0.0_400], [-0.1_557, -0.1_671, -0.0_505]] ).to(__A )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , __A , atol=1E-4 ) )
| 395
| 0
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_distilbert import DistilBertTokenizer
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
lowerCamelCase__ = {
'''vocab_file''': {
'''distilbert-base-uncased''': '''https://huggingface.co/distilbert-base-uncased/resolve/main/vocab.txt''',
'''distilbert-base-uncased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/vocab.txt'''
),
'''distilbert-base-cased''': '''https://huggingface.co/distilbert-base-cased/resolve/main/vocab.txt''',
'''distilbert-base-cased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/vocab.txt'''
),
'''distilbert-base-german-cased''': '''https://huggingface.co/distilbert-base-german-cased/resolve/main/vocab.txt''',
'''distilbert-base-multilingual-cased''': (
'''https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''distilbert-base-uncased''': '''https://huggingface.co/distilbert-base-uncased/resolve/main/tokenizer.json''',
'''distilbert-base-uncased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/tokenizer.json'''
),
'''distilbert-base-cased''': '''https://huggingface.co/distilbert-base-cased/resolve/main/tokenizer.json''',
'''distilbert-base-cased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/tokenizer.json'''
),
'''distilbert-base-german-cased''': (
'''https://huggingface.co/distilbert-base-german-cased/resolve/main/tokenizer.json'''
),
'''distilbert-base-multilingual-cased''': (
'''https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/tokenizer.json'''
),
},
}
lowerCamelCase__ = {
'''distilbert-base-uncased''': 512,
'''distilbert-base-uncased-distilled-squad''': 512,
'''distilbert-base-cased''': 512,
'''distilbert-base-cased-distilled-squad''': 512,
'''distilbert-base-german-cased''': 512,
'''distilbert-base-multilingual-cased''': 512,
}
lowerCamelCase__ = {
'''distilbert-base-uncased''': {'''do_lower_case''': True},
'''distilbert-base-uncased-distilled-squad''': {'''do_lower_case''': True},
'''distilbert-base-cased''': {'''do_lower_case''': False},
'''distilbert-base-cased-distilled-squad''': {'''do_lower_case''': False},
'''distilbert-base-german-cased''': {'''do_lower_case''': False},
'''distilbert-base-multilingual-cased''': {'''do_lower_case''': False},
}
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
__A = VOCAB_FILES_NAMES
__A = PRETRAINED_VOCAB_FILES_MAP
__A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__A = PRETRAINED_INIT_CONFIGURATION
__A = ['''input_ids''', '''attention_mask''']
__A = DistilBertTokenizer
def __init__( self : Tuple , lowercase_ : int=None , lowercase_ : Optional[int]=None , lowercase_ : int=True , lowercase_ : Optional[int]="[UNK]" , lowercase_ : str="[SEP]" , lowercase_ : str="[PAD]" , lowercase_ : str="[CLS]" , lowercase_ : Optional[Any]="[MASK]" , lowercase_ : Optional[Any]=True , lowercase_ : int=None , **lowercase_ : Optional[Any] , ) -> List[str]:
"""simple docstring"""
super().__init__(
lowercase_ , tokenizer_file=lowercase_ , do_lower_case=lowercase_ , unk_token=lowercase_ , sep_token=lowercase_ , pad_token=lowercase_ , cls_token=lowercase_ , mask_token=lowercase_ , tokenize_chinese_chars=lowercase_ , strip_accents=lowercase_ , **lowercase_ , )
_UpperCamelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__())
if (
normalizer_state.get("lowercase" , lowercase_) != do_lower_case
or normalizer_state.get("strip_accents" , lowercase_) != strip_accents
or normalizer_state.get("handle_chinese_chars" , lowercase_) != tokenize_chinese_chars
):
_UpperCamelCase = getattr(lowercase_ , normalizer_state.pop("type"))
_UpperCamelCase = do_lower_case
_UpperCamelCase = strip_accents
_UpperCamelCase = tokenize_chinese_chars
_UpperCamelCase = normalizer_class(**lowercase_)
_UpperCamelCase = do_lower_case
def __UpperCAmelCase ( self : int , lowercase_ : Optional[Any] , lowercase_ : str=None) -> Tuple:
"""simple docstring"""
_UpperCamelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __UpperCAmelCase ( self : List[str] , lowercase_ : List[int] , lowercase_ : Optional[List[int]] = None) -> List[int]:
"""simple docstring"""
_UpperCamelCase = [self.sep_token_id]
_UpperCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1]
def __UpperCAmelCase ( self : str , lowercase_ : str , lowercase_ : Optional[str] = None) -> Tuple[str]:
"""simple docstring"""
_UpperCamelCase = self._tokenizer.model.save(lowercase_ , name=lowercase_)
return tuple(lowercase_)
| 82
|
import argparse
import os
import torch
from transformers import (
XLNetConfig,
XLNetForQuestionAnswering,
XLNetForSequenceClassification,
XLNetLMHeadModel,
load_tf_weights_in_xlnet,
)
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
lowerCamelCase__ = {
'''cola''': 2,
'''mnli''': 3,
'''mrpc''': 2,
'''sst-2''': 2,
'''sts-b''': 1,
'''qqp''': 2,
'''qnli''': 2,
'''rte''': 2,
'''wnli''': 2,
}
logging.set_verbosity_info()
def lowerCAmelCase__ ( a__ , a__ , a__ , a__=None ) ->Optional[Any]:
'''simple docstring'''
_UpperCamelCase = XLNetConfig.from_json_file(a__ )
_UpperCamelCase = finetuning_task.lower() if finetuning_task is not None else ""
if finetuning_task in GLUE_TASKS_NUM_LABELS:
print(f'Building PyTorch XLNetForSequenceClassification model from configuration: {config}' )
_UpperCamelCase = finetuning_task
_UpperCamelCase = GLUE_TASKS_NUM_LABELS[finetuning_task]
_UpperCamelCase = XLNetForSequenceClassification(a__ )
elif "squad" in finetuning_task:
_UpperCamelCase = finetuning_task
_UpperCamelCase = XLNetForQuestionAnswering(a__ )
else:
_UpperCamelCase = XLNetLMHeadModel(a__ )
# Load weights from tf checkpoint
load_tf_weights_in_xlnet(a__ , a__ , a__ )
# Save pytorch-model
_UpperCamelCase = os.path.join(a__ , a__ )
_UpperCamelCase = os.path.join(a__ , a__ )
print(f'Save PyTorch model to {os.path.abspath(a__ )}' )
torch.save(model.state_dict() , a__ )
print(f'Save configuration file to {os.path.abspath(a__ )}' )
with open(a__ , "w" , encoding="utf-8" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--xlnet_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained XLNet model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=str,
required=True,
help='''Path to the folder to store the PyTorch model or dataset/vocab.''',
)
parser.add_argument(
'''--finetuning_task''',
default=None,
type=str,
help='''Name of a task on which the XLNet TensorFlow model was fine-tuned''',
)
lowerCamelCase__ = parser.parse_args()
print(args)
convert_xlnet_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.xlnet_config_file, args.pytorch_dump_folder_path, args.finetuning_task
)
| 82
| 1
|
"""simple docstring"""
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def lowerCamelCase (a_ :List[Any] , a_ :Union[str, Any] , a_ :Tuple , a_ :List[str] , a_ :str=True , a_ :str="pt") -> List[str]:
lowercase :Optional[int] = {'''add_prefix_space''': True} if isinstance(a_ , a_) and not line.startswith(''' ''') else {}
lowercase :Optional[int] = padding_side
return tokenizer(
[line] , max_length=a_ , padding='''max_length''' if pad_to_max_length else None , truncation=a_ , return_tensors=a_ , add_special_tokens=a_ , **a_ , )
def lowerCamelCase (a_ :str , a_ :Tuple , a_ :Optional[Any]=None , ) -> Tuple:
lowercase :Optional[Any] = input_ids.ne(a_).any(dim=0)
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class __magic_name__ ( __UpperCAmelCase ):
def __init__( self : Union[str, Any] , snake_case__ : Union[str, Any] , snake_case__ : Optional[Any] , snake_case__ : Optional[Any] , snake_case__ : Optional[Any] , snake_case__ : str="train" , snake_case__ : Optional[Any]=None , snake_case__ : Tuple=None , snake_case__ : Any=None , snake_case__ : Dict="" , ):
'''simple docstring'''
super().__init__()
lowercase :Tuple = Path(snake_case__ ).joinpath(type_path + '''.source''' )
lowercase :Union[str, Any] = Path(snake_case__ ).joinpath(type_path + '''.target''' )
lowercase :List[Any] = self.get_char_lens(self.src_file )
lowercase :Tuple = max_source_length
lowercase :Optional[int] = max_target_length
assert min(self.src_lens ) > 0, f"""found empty line in {self.src_file}"""
lowercase :Any = tokenizer
lowercase :Tuple = prefix
if n_obs is not None:
lowercase :List[str] = self.src_lens[:n_obs]
lowercase :List[Any] = src_lang
lowercase :str = tgt_lang
def __len__( self : Any ):
'''simple docstring'''
return len(self.src_lens )
def __getitem__( self : str , snake_case__ : Any ):
'''simple docstring'''
lowercase :Optional[int] = index + 1 # linecache starts at 1
lowercase :Optional[Any] = self.prefix + linecache.getline(str(self.src_file ) , snake_case__ ).rstrip('''\n''' )
lowercase :Dict = linecache.getline(str(self.tgt_file ) , snake_case__ ).rstrip('''\n''' )
assert source_line, f"""empty source line for index {index}"""
assert tgt_line, f"""empty tgt line for index {index}"""
# Need to add eos token manually for T5
if isinstance(self.tokenizer , snake_case__ ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
lowercase :Dict = (
self.tokenizer.question_encoder if isinstance(self.tokenizer , snake_case__ ) else self.tokenizer
)
lowercase :Optional[int] = self.tokenizer.generator if isinstance(self.tokenizer , snake_case__ ) else self.tokenizer
lowercase :Optional[int] = encode_line(snake_case__ , snake_case__ , self.max_source_length , '''right''' )
lowercase :Tuple = encode_line(snake_case__ , snake_case__ , self.max_target_length , '''right''' )
lowercase :List[str] = source_inputs['''input_ids'''].squeeze()
lowercase :Optional[Any] = target_inputs['''input_ids'''].squeeze()
lowercase :List[str] = source_inputs['''attention_mask'''].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def __snake_case ( snake_case__ : Optional[int] ):
'''simple docstring'''
return [len(snake_case__ ) for x in Path(snake_case__ ).open().readlines()]
def __snake_case ( self : Tuple , snake_case__ : Union[str, Any] ):
'''simple docstring'''
lowercase :Optional[Any] = torch.stack([x['''input_ids'''] for x in batch] )
lowercase :Tuple = torch.stack([x['''attention_mask'''] for x in batch] )
lowercase :Tuple = torch.stack([x['''decoder_input_ids'''] for x in batch] )
lowercase :str = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer , snake_case__ )
else self.tokenizer.pad_token_id
)
lowercase :Optional[int] = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer , snake_case__ )
else self.tokenizer.pad_token_id
)
lowercase :List[Any] = trim_batch(snake_case__ , snake_case__ )
lowercase , lowercase :List[str] = trim_batch(snake_case__ , snake_case__ , attention_mask=snake_case__ )
lowercase :Optional[int] = {
'''input_ids''': source_ids,
'''attention_mask''': source_mask,
'''decoder_input_ids''': y,
}
return batch
UpperCAmelCase = getLogger(__name__)
def lowerCamelCase (a_ :List[List]) -> Tuple:
return list(itertools.chain.from_iterable(a_))
def lowerCamelCase (a_ :str) -> None:
lowercase :List[str] = get_git_info()
save_json(a_ , os.path.join(a_ , '''git_log.json'''))
def lowerCamelCase (a_ :Optional[int] , a_ :Optional[int] , a_ :Optional[Any]=4 , **a_ :Optional[Any]) -> str:
with open(a_ , '''w''') as f:
json.dump(a_ , a_ , indent=a_ , **a_)
def lowerCamelCase (a_ :Dict) -> Union[str, Any]:
with open(a_) as f:
return json.load(a_)
def lowerCamelCase () -> List[str]:
lowercase :Dict = git.Repo(search_parent_directories=a_)
lowercase :int = {
'''repo_id''': str(a_),
'''repo_sha''': str(repo.head.object.hexsha),
'''repo_branch''': str(repo.active_branch),
'''hostname''': str(socket.gethostname()),
}
return repo_infos
def lowerCamelCase (a_ :Callable , a_ :Iterable) -> List:
return list(map(a_ , a_))
def lowerCamelCase (a_ :Optional[Any] , a_ :str) -> Any:
with open(a_ , '''wb''') as f:
return pickle.dump(a_ , a_)
def lowerCamelCase (a_ :List[str]) -> List[str]:
def remove_articles(a_ :Union[str, Any]):
return re.sub(R'''\b(a|an|the)\b''' , ''' ''' , a_)
def white_space_fix(a_ :Tuple):
return " ".join(text.split())
def remove_punc(a_ :int):
lowercase :List[Any] = set(string.punctuation)
return "".join(ch for ch in text if ch not in exclude)
def lower(a_ :int):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(a_))))
def lowerCamelCase (a_ :List[str] , a_ :Any) -> List[str]:
lowercase :Dict = normalize_answer(a_).split()
lowercase :int = normalize_answer(a_).split()
lowercase :List[Any] = Counter(a_) & Counter(a_)
lowercase :Optional[int] = sum(common.values())
if num_same == 0:
return 0
lowercase :str = 1.0 * num_same / len(a_)
lowercase :Tuple = 1.0 * num_same / len(a_)
lowercase :Tuple = (2 * precision * recall) / (precision + recall)
return fa
def lowerCamelCase (a_ :Tuple , a_ :Optional[Any]) -> List[Any]:
return normalize_answer(a_) == normalize_answer(a_)
def lowerCamelCase (a_ :List[str] , a_ :List[str]) -> Dict:
assert len(a_) == len(a_)
lowercase :Any = 0
for hypo, pred in zip(a_ , a_):
em += exact_match_score(a_ , a_)
if len(a_) > 0:
em /= len(a_)
return {"em": em}
def lowerCamelCase (a_ :Union[str, Any]) -> Optional[Any]:
return model_prefix.startswith('''rag''')
def lowerCamelCase (a_ :List[str] , a_ :Tuple , a_ :List[str]) -> Any:
lowercase :List[str] = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
lowercase :str = '''dropout_rate'''
for p in extra_params:
if getattr(a_ , a_ , a_):
if not hasattr(a_ , a_) and not hasattr(a_ , equivalent_param[p]):
logger.info('''config doesn\'t have a `{}` attribute'''.format(a_))
delattr(a_ , a_)
continue
lowercase :List[str] = p if hasattr(a_ , a_) else equivalent_param[p]
setattr(a_ , a_ , getattr(a_ , a_))
delattr(a_ , a_)
return hparams, config
| 677
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
UpperCAmelCase = {
'''configuration_encodec''': [
'''ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''EncodecConfig''',
],
'''feature_extraction_encodec''': ['''EncodecFeatureExtractor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
'''ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''EncodecModel''',
'''EncodecPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_encodec import (
ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP,
EncodecConfig,
)
from .feature_extraction_encodec import EncodecFeatureExtractor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encodec import (
ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST,
EncodecModel,
EncodecPreTrainedModel,
)
else:
import sys
UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 677
| 1
|
"""simple docstring"""
import unittest
from transformers import is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow
if is_flax_available():
import optax
from flax.training.common_utils import onehot
from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration
from transformers.models.ta.modeling_flax_ta import shift_tokens_right
@require_torch
@require_sentencepiece
@require_tokenizers
@require_flax
class _lowercase ( unittest.TestCase ):
@slow
def _UpperCamelCase ( self ) -> int:
lowerCamelCase : Tuple = FlaxMTaForConditionalGeneration.from_pretrained('google/mt5-small' )
lowerCamelCase : Any = AutoTokenizer.from_pretrained('google/mt5-small' )
lowerCamelCase : List[str] = tokenizer('Hello there' , return_tensors='np' ).input_ids
lowerCamelCase : Dict = tokenizer('Hi I am' , return_tensors='np' ).input_ids
lowerCamelCase : Any = shift_tokens_right(lowercase__ , model.config.pad_token_id , model.config.decoder_start_token_id )
lowerCamelCase : Any = model(lowercase__ , decoder_input_ids=lowercase__ ).logits
lowerCamelCase : Tuple = optax.softmax_cross_entropy(lowercase__ , onehot(lowercase__ , logits.shape[-1] ) ).mean()
lowerCamelCase : Tuple = -(labels.shape[-1] * loss.item())
lowerCamelCase : Union[str, Any] = -84.9127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1E-4 )
| 703
|
"""simple docstring"""
import unittest
import torch
from torch import nn
from accelerate.test_utils import require_cuda
from accelerate.utils.memory import find_executable_batch_size, release_memory
def UpperCAmelCase ( ):
'''simple docstring'''
raise RuntimeError('CUDA out of memory.' )
class _lowercase ( nn.Module ):
def __init__( self ) -> Optional[Any]:
super().__init__()
lowerCamelCase : Dict = nn.Linear(3 , 4 )
lowerCamelCase : Optional[int] = nn.BatchNormad(4 )
lowerCamelCase : List[str] = nn.Linear(4 , 5 )
def _UpperCamelCase ( self , UpperCAmelCase_ ) -> Dict:
return self.lineara(self.batchnorm(self.lineara(UpperCAmelCase_ ) ) )
class _lowercase ( unittest.TestCase ):
def _UpperCamelCase ( self ) -> Dict:
lowerCamelCase : Union[str, Any] = []
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(UpperCAmelCase_ ):
nonlocal batch_sizes
batch_sizes.append(UpperCAmelCase_ )
if batch_size != 8:
raise_fake_out_of_memory()
mock_training_loop_function()
self.assertListEqual(UpperCAmelCase_ , [128, 64, 32, 16, 8] )
def _UpperCamelCase ( self ) -> Any:
lowerCamelCase : Optional[Any] = []
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(UpperCAmelCase_ , UpperCAmelCase_ ):
nonlocal batch_sizes
batch_sizes.append(UpperCAmelCase_ )
if batch_size != 8:
raise_fake_out_of_memory()
return batch_size, arga
lowerCamelCase , lowerCamelCase : List[str] = mock_training_loop_function('hello' )
self.assertListEqual(UpperCAmelCase_ , [128, 64, 32, 16, 8] )
self.assertListEqual([bs, arga] , [8, 'hello'] )
def _UpperCamelCase ( self ) -> List[str]:
@find_executable_batch_size(starting_batch_size=0 )
def mock_training_loop_function(UpperCAmelCase_ ):
pass
with self.assertRaises(UpperCAmelCase_ ) as cm:
mock_training_loop_function()
self.assertIn('No executable batch size found, reached zero.' , cm.exception.args[0] )
def _UpperCamelCase ( self ) -> List[str]:
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(UpperCAmelCase_ ):
if batch_size > 0:
raise_fake_out_of_memory()
pass
with self.assertRaises(UpperCAmelCase_ ) as cm:
mock_training_loop_function()
self.assertIn('No executable batch size found, reached zero.' , cm.exception.args[0] )
def _UpperCamelCase ( self ) -> Any:
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
if batch_size != 8:
raise raise_fake_out_of_memory()
with self.assertRaises(UpperCAmelCase_ ) as cm:
mock_training_loop_function(128 , 'hello' , 'world' )
self.assertIn('Batch size was passed into `f`' , cm.exception.args[0] )
self.assertIn('`f(arg1=\'hello\', arg2=\'world\')' , cm.exception.args[0] )
def _UpperCamelCase ( self ) -> List[str]:
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(UpperCAmelCase_ ):
raise ValueError('Oops, we had an error!' )
with self.assertRaises(UpperCAmelCase_ ) as cm:
mock_training_loop_function()
self.assertIn('Oops, we had an error!' , cm.exception.args[0] )
@require_cuda
def _UpperCamelCase ( self ) -> Union[str, Any]:
lowerCamelCase : List[str] = torch.cuda.memory_allocated()
lowerCamelCase : Optional[int] = ModelForTest()
model.cuda()
self.assertGreater(torch.cuda.memory_allocated() , UpperCAmelCase_ )
lowerCamelCase : Tuple = release_memory(UpperCAmelCase_ )
self.assertEqual(torch.cuda.memory_allocated() , UpperCAmelCase_ )
| 133
| 0
|
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import floats_tensor, load_image, load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class lowerCAmelCase_ ( a__ , unittest.TestCase ):
UpperCAmelCase__ : List[Any] = ShapEImgaImgPipeline
UpperCAmelCase__ : str = ["image"]
UpperCAmelCase__ : int = ["image"]
UpperCAmelCase__ : Optional[int] = [
"num_images_per_prompt",
"num_inference_steps",
"generator",
"latents",
"guidance_scale",
"frame_size",
"output_type",
"return_dict",
]
UpperCAmelCase__ : Tuple = False
@property
def snake_case_ ( self ) -> Union[str, Any]:
return 32
@property
def snake_case_ ( self ) -> str:
return 32
@property
def snake_case_ ( self ) -> int:
return self.time_input_dim * 4
@property
def snake_case_ ( self ) -> Optional[int]:
return 8
@property
def snake_case_ ( self ) -> Optional[int]:
torch.manual_seed(0 )
UpperCamelCase : Union[str, Any] = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size, image_size=64, projection_dim=self.text_embedder_hidden_size, intermediate_size=37, num_attention_heads=4, num_channels=3, num_hidden_layers=5, patch_size=1, )
UpperCamelCase : List[Any] = CLIPVisionModel(SCREAMING_SNAKE_CASE_ )
return model
@property
def snake_case_ ( self ) -> str:
UpperCamelCase : Dict = CLIPImageProcessor(
crop_size=224, do_center_crop=SCREAMING_SNAKE_CASE_, do_normalize=SCREAMING_SNAKE_CASE_, do_resize=SCREAMING_SNAKE_CASE_, image_mean=[0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73], image_std=[0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11], resample=3, size=224, )
return image_processor
@property
def snake_case_ ( self ) -> Any:
torch.manual_seed(0 )
UpperCamelCase : Dict = {
'num_attention_heads': 2,
'attention_head_dim': 16,
'embedding_dim': self.time_input_dim,
'num_embeddings': 32,
'embedding_proj_dim': self.text_embedder_hidden_size,
'time_embed_dim': self.time_embed_dim,
'num_layers': 1,
'clip_embed_dim': self.time_input_dim * 2,
'additional_embeddings': 0,
'time_embed_act_fn': 'gelu',
'norm_in_type': 'layer',
'embedding_proj_norm_type': 'layer',
'encoder_hid_proj_type': None,
'added_emb_type': None,
}
UpperCamelCase : Any = PriorTransformer(**SCREAMING_SNAKE_CASE_ )
return model
@property
def snake_case_ ( self ) -> Optional[int]:
torch.manual_seed(0 )
UpperCamelCase : Dict = {
'param_shapes': (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'd_latent': self.time_input_dim,
'd_hidden': self.renderer_dim,
'n_output': 12,
'background': (
0.1,
0.1,
0.1,
),
}
UpperCamelCase : List[Any] = ShapERenderer(**SCREAMING_SNAKE_CASE_ )
return model
def snake_case_ ( self ) -> Dict:
UpperCamelCase : Dict = self.dummy_prior
UpperCamelCase : Tuple = self.dummy_image_encoder
UpperCamelCase : Optional[Any] = self.dummy_image_processor
UpperCamelCase : Any = self.dummy_renderer
UpperCamelCase : Optional[int] = HeunDiscreteScheduler(
beta_schedule='exp', num_train_timesteps=1024, prediction_type='sample', use_karras_sigmas=SCREAMING_SNAKE_CASE_, clip_sample=SCREAMING_SNAKE_CASE_, clip_sample_range=1.0, )
UpperCamelCase : Optional[int] = {
'prior': prior,
'image_encoder': image_encoder,
'image_processor': image_processor,
'renderer': renderer,
'scheduler': scheduler,
}
return components
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=0 ) -> List[Any]:
UpperCamelCase : Any = floats_tensor((1, 3, 64, 64), rng=random.Random(SCREAMING_SNAKE_CASE_ ) ).to(SCREAMING_SNAKE_CASE_ )
if str(SCREAMING_SNAKE_CASE_ ).startswith('mps' ):
UpperCamelCase : Dict = torch.manual_seed(SCREAMING_SNAKE_CASE_ )
else:
UpperCamelCase : Dict = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[int] = {
'image': input_image,
'generator': generator,
'num_inference_steps': 1,
'frame_size': 32,
'output_type': 'np',
}
return inputs
def snake_case_ ( self ) -> Union[str, Any]:
UpperCamelCase : Tuple = 'cpu'
UpperCamelCase : List[str] = self.get_dummy_components()
UpperCamelCase : List[str] = self.pipeline_class(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase : str = pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[Any] = pipe(**self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ ) )
UpperCamelCase : Optional[int] = output.images[0]
UpperCamelCase : Any = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
UpperCamelCase : Any = np.array(
[
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def snake_case_ ( self ) -> List[Any]:
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def snake_case_ ( self ) -> Optional[Any]:
UpperCamelCase : Optional[Any] = torch_device == 'cpu'
UpperCamelCase : Any = True
self._test_inference_batch_single_identical(
batch_size=2, test_max_difference=SCREAMING_SNAKE_CASE_, relax_max_difference=SCREAMING_SNAKE_CASE_, )
def snake_case_ ( self ) -> Optional[int]:
UpperCamelCase : Union[str, Any] = self.get_dummy_components()
UpperCamelCase : Tuple = self.pipeline_class(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[Any] = pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Any = 1
UpperCamelCase : Optional[Any] = 2
UpperCamelCase : Optional[int] = self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ )
for key in inputs.keys():
if key in self.batch_params:
UpperCamelCase : str = batch_size * [inputs[key]]
UpperCamelCase : Any = pipe(**SCREAMING_SNAKE_CASE_, num_images_per_prompt=SCREAMING_SNAKE_CASE_ )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class lowerCAmelCase_ ( unittest.TestCase ):
def snake_case_ ( self ) -> List[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case_ ( self ) -> str:
UpperCamelCase : Dict = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/shap_e/corgi.png' )
UpperCamelCase : Optional[Any] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/shap_e/test_shap_e_img2img_out.npy' )
UpperCamelCase : Optional[int] = ShapEImgaImgPipeline.from_pretrained('openai/shap-e-img2img' )
UpperCamelCase : List[str] = pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Dict = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(0 )
UpperCamelCase : int = pipe(
SCREAMING_SNAKE_CASE_, generator=SCREAMING_SNAKE_CASE_, guidance_scale=3.0, num_inference_steps=64, frame_size=64, output_type='np', ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
| 40
|
def a__ ( A__, A__ ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = ''
for i in table:
res += inp[i - 1]
return res
def a__ ( A__ ):
return data[1:] + data[0]
def a__ ( A__, A__ ):
SCREAMING_SNAKE_CASE_ : Optional[int] = ''
for i in range(len(A__ ) ):
if a[i] == b[i]:
res += "0"
else:
res += "1"
return res
def a__ ( A__, A__ ):
SCREAMING_SNAKE_CASE_ : Tuple = int('0b' + data[0] + data[-1], 2 )
SCREAMING_SNAKE_CASE_ : Tuple = int('0b' + data[1:3], 2 )
return bin(s[row][col] )[2:]
def a__ ( A__, A__, A__, A__, A__ ):
SCREAMING_SNAKE_CASE_ : List[Any] = message[:4]
SCREAMING_SNAKE_CASE_ : int = message[4:]
SCREAMING_SNAKE_CASE_ : List[str] = apply_table(A__, A__ )
SCREAMING_SNAKE_CASE_ : Dict = xor(A__, A__ )
SCREAMING_SNAKE_CASE_ : Any = apply_sbox(A__, temp[:4] ) # noqa: E741
SCREAMING_SNAKE_CASE_ : List[Any] = apply_sbox(A__, temp[4:] )
SCREAMING_SNAKE_CASE_ : Any = '0' * (2 - len(A__ )) + l # noqa: E741
SCREAMING_SNAKE_CASE_ : List[Any] = '0' * (2 - len(A__ )) + r
SCREAMING_SNAKE_CASE_ : Optional[int] = apply_table(l + r, A__ )
SCREAMING_SNAKE_CASE_ : Dict = xor(A__, A__ )
return temp + right
if __name__ == "__main__":
lowerCAmelCase__ : Optional[Any] =input('Enter 10 bit key: ')
lowerCAmelCase__ : int =input('Enter 8 bit message: ')
lowerCAmelCase__ : Any =[6, 3, 7, 4, 8, 5, 10, 9]
lowerCAmelCase__ : List[str] =[3, 5, 2, 7, 4, 10, 1, 9, 8, 6]
lowerCAmelCase__ : Dict =[2, 4, 3, 1]
lowerCAmelCase__ : Dict =[2, 6, 3, 1, 4, 8, 5, 7]
lowerCAmelCase__ : Union[str, Any] =[4, 1, 3, 5, 7, 2, 8, 6]
lowerCAmelCase__ : Dict =[4, 1, 2, 3, 2, 3, 4, 1]
lowerCAmelCase__ : str =[[1, 0, 3, 2], [3, 2, 1, 0], [0, 2, 1, 3], [3, 1, 3, 2]]
lowerCAmelCase__ : Any =[[0, 1, 2, 3], [2, 0, 1, 3], [3, 0, 1, 0], [2, 1, 0, 3]]
# key generation
lowerCAmelCase__ : Dict =apply_table(key, paa_table)
lowerCAmelCase__ : List[str] =temp[:5]
lowerCAmelCase__ : List[Any] =temp[5:]
lowerCAmelCase__ : Tuple =left_shift(left)
lowerCAmelCase__ : Optional[Any] =left_shift(right)
lowerCAmelCase__ : Optional[int] =apply_table(left + right, pa_table)
lowerCAmelCase__ : Optional[Any] =left_shift(left)
lowerCAmelCase__ : Optional[Any] =left_shift(right)
lowerCAmelCase__ : List[str] =left_shift(left)
lowerCAmelCase__ : Optional[int] =left_shift(right)
lowerCAmelCase__ : Tuple =apply_table(left + right, pa_table)
# encryption
lowerCAmelCase__ : Tuple =apply_table(message, IP)
lowerCAmelCase__ : List[Any] =function(expansion, sa, sa, keya, temp)
lowerCAmelCase__ : str =temp[4:] + temp[:4]
lowerCAmelCase__ : Optional[Any] =function(expansion, sa, sa, keya, temp)
lowerCAmelCase__ : Optional[int] =apply_table(temp, IP_inv)
print('Cipher text is:', CT)
# decryption
lowerCAmelCase__ : Union[str, Any] =apply_table(CT, IP)
lowerCAmelCase__ : Tuple =function(expansion, sa, sa, keya, temp)
lowerCAmelCase__ : Union[str, Any] =temp[4:] + temp[:4]
lowerCAmelCase__ : Dict =function(expansion, sa, sa, keya, temp)
lowerCAmelCase__ : int =apply_table(temp, IP_inv)
print('Plain text after decypting is:', PT)
| 101
| 0
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
_snake_case = {
"Acehnese Arabic": "ace_Arab",
"Acehnese Latin": "ace_Latn",
"Mesopotamian Arabic": "acm_Arab",
"Ta'izzi-Adeni Arabic": "acq_Arab",
"Tunisian Arabic": "aeb_Arab",
"Afrikaans": "afr_Latn",
"South Levantine Arabic": "ajp_Arab",
"Akan": "aka_Latn",
"Amharic": "amh_Ethi",
"North Levantine Arabic": "apc_Arab",
"Modern Standard Arabic": "arb_Arab",
"Modern Standard Arabic Romanized": "arb_Latn",
"Najdi Arabic": "ars_Arab",
"Moroccan Arabic": "ary_Arab",
"Egyptian Arabic": "arz_Arab",
"Assamese": "asm_Beng",
"Asturian": "ast_Latn",
"Awadhi": "awa_Deva",
"Central Aymara": "ayr_Latn",
"South Azerbaijani": "azb_Arab",
"North Azerbaijani": "azj_Latn",
"Bashkir": "bak_Cyrl",
"Bambara": "bam_Latn",
"Balinese": "ban_Latn",
"Belarusian": "bel_Cyrl",
"Bemba": "bem_Latn",
"Bengali": "ben_Beng",
"Bhojpuri": "bho_Deva",
"Banjar Arabic": "bjn_Arab",
"Banjar Latin": "bjn_Latn",
"Standard Tibetan": "bod_Tibt",
"Bosnian": "bos_Latn",
"Buginese": "bug_Latn",
"Bulgarian": "bul_Cyrl",
"Catalan": "cat_Latn",
"Cebuano": "ceb_Latn",
"Czech": "ces_Latn",
"Chokwe": "cjk_Latn",
"Central Kurdish": "ckb_Arab",
"Crimean Tatar": "crh_Latn",
"Welsh": "cym_Latn",
"Danish": "dan_Latn",
"German": "deu_Latn",
"Southwestern Dinka": "dik_Latn",
"Dyula": "dyu_Latn",
"Dzongkha": "dzo_Tibt",
"Greek": "ell_Grek",
"English": "eng_Latn",
"Esperanto": "epo_Latn",
"Estonian": "est_Latn",
"Basque": "eus_Latn",
"Ewe": "ewe_Latn",
"Faroese": "fao_Latn",
"Fijian": "fij_Latn",
"Finnish": "fin_Latn",
"Fon": "fon_Latn",
"French": "fra_Latn",
"Friulian": "fur_Latn",
"Nigerian Fulfulde": "fuv_Latn",
"Scottish Gaelic": "gla_Latn",
"Irish": "gle_Latn",
"Galician": "glg_Latn",
"Guarani": "grn_Latn",
"Gujarati": "guj_Gujr",
"Haitian Creole": "hat_Latn",
"Hausa": "hau_Latn",
"Hebrew": "heb_Hebr",
"Hindi": "hin_Deva",
"Chhattisgarhi": "hne_Deva",
"Croatian": "hrv_Latn",
"Hungarian": "hun_Latn",
"Armenian": "hye_Armn",
"Igbo": "ibo_Latn",
"Ilocano": "ilo_Latn",
"Indonesian": "ind_Latn",
"Icelandic": "isl_Latn",
"Italian": "ita_Latn",
"Javanese": "jav_Latn",
"Japanese": "jpn_Jpan",
"Kabyle": "kab_Latn",
"Jingpho": "kac_Latn",
"Kamba": "kam_Latn",
"Kannada": "kan_Knda",
"Kashmiri Arabic": "kas_Arab",
"Kashmiri Devanagari": "kas_Deva",
"Georgian": "kat_Geor",
"Central Kanuri Arabic": "knc_Arab",
"Central Kanuri Latin": "knc_Latn",
"Kazakh": "kaz_Cyrl",
"Kabiyè": "kbp_Latn",
"Kabuverdianu": "kea_Latn",
"Khmer": "khm_Khmr",
"Kikuyu": "kik_Latn",
"Kinyarwanda": "kin_Latn",
"Kyrgyz": "kir_Cyrl",
"Kimbundu": "kmb_Latn",
"Northern Kurdish": "kmr_Latn",
"Kikongo": "kon_Latn",
"Korean": "kor_Hang",
"Lao": "lao_Laoo",
"Ligurian": "lij_Latn",
"Limburgish": "lim_Latn",
"Lingala": "lin_Latn",
"Lithuanian": "lit_Latn",
"Lombard": "lmo_Latn",
"Latgalian": "ltg_Latn",
"Luxembourgish": "ltz_Latn",
"Luba-Kasai": "lua_Latn",
"Ganda": "lug_Latn",
"Luo": "luo_Latn",
"Mizo": "lus_Latn",
"Standard Latvian": "lvs_Latn",
"Magahi": "mag_Deva",
"Maithili": "mai_Deva",
"Malayalam": "mal_Mlym",
"Marathi": "mar_Deva",
"Minangkabau Arabic ": "min_Arab",
"Minangkabau Latin": "min_Latn",
"Macedonian": "mkd_Cyrl",
"Plateau Malagasy": "plt_Latn",
"Maltese": "mlt_Latn",
"Meitei Bengali": "mni_Beng",
"Halh Mongolian": "khk_Cyrl",
"Mossi": "mos_Latn",
"Maori": "mri_Latn",
"Burmese": "mya_Mymr",
"Dutch": "nld_Latn",
"Norwegian Nynorsk": "nno_Latn",
"Norwegian Bokmål": "nob_Latn",
"Nepali": "npi_Deva",
"Northern Sotho": "nso_Latn",
"Nuer": "nus_Latn",
"Nyanja": "nya_Latn",
"Occitan": "oci_Latn",
"West Central Oromo": "gaz_Latn",
"Odia": "ory_Orya",
"Pangasinan": "pag_Latn",
"Eastern Panjabi": "pan_Guru",
"Papiamento": "pap_Latn",
"Western Persian": "pes_Arab",
"Polish": "pol_Latn",
"Portuguese": "por_Latn",
"Dari": "prs_Arab",
"Southern Pashto": "pbt_Arab",
"Ayacucho Quechua": "quy_Latn",
"Romanian": "ron_Latn",
"Rundi": "run_Latn",
"Russian": "rus_Cyrl",
"Sango": "sag_Latn",
"Sanskrit": "san_Deva",
"Santali": "sat_Olck",
"Sicilian": "scn_Latn",
"Shan": "shn_Mymr",
"Sinhala": "sin_Sinh",
"Slovak": "slk_Latn",
"Slovenian": "slv_Latn",
"Samoan": "smo_Latn",
"Shona": "sna_Latn",
"Sindhi": "snd_Arab",
"Somali": "som_Latn",
"Southern Sotho": "sot_Latn",
"Spanish": "spa_Latn",
"Tosk Albanian": "als_Latn",
"Sardinian": "srd_Latn",
"Serbian": "srp_Cyrl",
"Swati": "ssw_Latn",
"Sundanese": "sun_Latn",
"Swedish": "swe_Latn",
"Swahili": "swh_Latn",
"Silesian": "szl_Latn",
"Tamil": "tam_Taml",
"Tatar": "tat_Cyrl",
"Telugu": "tel_Telu",
"Tajik": "tgk_Cyrl",
"Tagalog": "tgl_Latn",
"Thai": "tha_Thai",
"Tigrinya": "tir_Ethi",
"Tamasheq Latin": "taq_Latn",
"Tamasheq Tifinagh": "taq_Tfng",
"Tok Pisin": "tpi_Latn",
"Tswana": "tsn_Latn",
"Tsonga": "tso_Latn",
"Turkmen": "tuk_Latn",
"Tumbuka": "tum_Latn",
"Turkish": "tur_Latn",
"Twi": "twi_Latn",
"Central Atlas Tamazight": "tzm_Tfng",
"Uyghur": "uig_Arab",
"Ukrainian": "ukr_Cyrl",
"Umbundu": "umb_Latn",
"Urdu": "urd_Arab",
"Northern Uzbek": "uzn_Latn",
"Venetian": "vec_Latn",
"Vietnamese": "vie_Latn",
"Waray": "war_Latn",
"Wolof": "wol_Latn",
"Xhosa": "xho_Latn",
"Eastern Yiddish": "ydd_Hebr",
"Yoruba": "yor_Latn",
"Yue Chinese": "yue_Hant",
"Chinese Simplified": "zho_Hans",
"Chinese Traditional": "zho_Hant",
"Standard Malay": "zsm_Latn",
"Zulu": "zul_Latn",
}
class UpperCAmelCase_ ( a):
lowerCamelCase__ = 'facebook/nllb-200-distilled-600M'
lowerCamelCase__ = (
'This is a tool that translates text from a language to another. It takes three inputs: `text`, which should '
'be the text to translate, `src_lang`, which should be the language of the text to translate and `tgt_lang`, '
'which should be the language for the desired ouput language. Both `src_lang` and `tgt_lang` are written in '
'plain English, such as \'Romanian\', or \'Albanian\'. It returns the text translated in `tgt_lang`.'
)
lowerCamelCase__ = 'translator'
lowerCamelCase__ = AutoTokenizer
lowerCamelCase__ = AutoModelForSeqaSeqLM
lowerCamelCase__ = LANGUAGE_CODES
lowerCamelCase__ = ['text', 'text', 'text']
lowerCamelCase__ = ['text']
def snake_case__ ( self, __a, __a, __a):
'''simple docstring'''
if src_lang not in self.lang_to_code:
raise ValueError(f"{src_lang} is not a supported language.")
if tgt_lang not in self.lang_to_code:
raise ValueError(f"{tgt_lang} is not a supported language.")
_lowerCAmelCase : str = self.lang_to_code[src_lang]
_lowerCAmelCase : Optional[int] = self.lang_to_code[tgt_lang]
return self.pre_processor._build_translation_inputs(
__a, return_tensors="pt", src_lang=__a, tgt_lang=__a)
def snake_case__ ( self, __a):
'''simple docstring'''
return self.model.generate(**__a)
def snake_case__ ( self, __a):
'''simple docstring'''
return self.post_processor.decode(outputs[0].tolist(), skip_special_tokens=__a)
| 658
|
import importlib
import torch
import yaml
from omegaconf import OmegaConf
from taming.models.vqgan import VQModel
def A ( _lowerCamelCase , _lowerCamelCase=False ):
'''simple docstring'''
_lowerCAmelCase : Dict = OmegaConf.load(_lowerCamelCase )
if display:
print(yaml.dump(OmegaConf.to_container(_lowerCamelCase ) ) )
return config
def A ( _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=None ):
'''simple docstring'''
if conf_path is None:
_lowerCAmelCase : Union[str, Any] = "./model_checkpoints/vqgan_only.yaml"
_lowerCAmelCase : Tuple = load_config(_lowerCamelCase , display=_lowerCamelCase )
_lowerCAmelCase : str = VQModel(**config.model.params )
if ckpt_path is None:
_lowerCAmelCase : Optional[int] = "./model_checkpoints/vqgan_only.pt"
_lowerCAmelCase : int = torch.load(_lowerCamelCase , map_location=_lowerCamelCase )
if ".ckpt" in ckpt_path:
_lowerCAmelCase : List[Any] = sd["state_dict"]
model.load_state_dict(_lowerCamelCase , strict=_lowerCamelCase )
model.to(_lowerCamelCase )
del sd
return model
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Tuple = model.encode(_lowerCamelCase )
print(F"VQGAN --- {model.__class__.__name__}: latent shape: {z.shape[2:]}" )
_lowerCAmelCase : int = model.decode(_lowerCamelCase )
return xrec
def A ( _lowerCamelCase , _lowerCamelCase=False ):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase : List[str] = string.rsplit("." , 1 )
if reload:
_lowerCAmelCase : Dict = importlib.import_module(_lowerCamelCase )
importlib.reload(_lowerCamelCase )
return getattr(importlib.import_module(_lowerCamelCase , package=_lowerCamelCase ) , cls )
def A ( _lowerCamelCase ):
'''simple docstring'''
if "target" not in config:
raise KeyError("Expected key `target` to instantiate." )
return get_obj_from_str(config["target"] )(**config.get("params" , {} ) )
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=True , _lowerCamelCase=True ):
'''simple docstring'''
_lowerCAmelCase : str = instantiate_from_config(_lowerCamelCase )
if sd is not None:
model.load_state_dict(_lowerCamelCase )
if gpu:
model.cuda()
if eval_mode:
model.eval()
return {"model": model}
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if ckpt:
_lowerCAmelCase : Optional[int] = torch.load(_lowerCamelCase , map_location="cpu" )
_lowerCAmelCase : int = pl_sd["global_step"]
print(F"loaded model from global step {global_step}." )
else:
_lowerCAmelCase : Optional[int] = {"state_dict": None}
_lowerCAmelCase : Any = None
_lowerCAmelCase : Optional[int] = load_model_from_config(config.model , pl_sd["state_dict"] , gpu=_lowerCamelCase , eval_mode=_lowerCamelCase )["model"]
return model, global_step
| 658
| 1
|
"""simple docstring"""
import torch
from torch import nn
from transformers import CLIPPreTrainedModel, CLIPVisionModel
from ...models.attention import BasicTransformerBlock
from ...utils import logging
UpperCamelCase_ : Dict = logging.get_logger(__name__) # pylint: disable=invalid-name
class __lowerCAmelCase ( _lowercase ):
"""simple docstring"""
def __init__( self : Optional[Any] , _snake_case : Any , _snake_case : List[str]=768 ) -> Dict:
"""simple docstring"""
super().__init__(_snake_case )
A_ = proj_size
A_ = CLIPVisionModel(_snake_case )
A_ = PaintByExampleMapper(_snake_case )
A_ = nn.LayerNorm(config.hidden_size )
A_ = nn.Linear(config.hidden_size , self.proj_size )
# uncondition for scaling
A_ = nn.Parameter(torch.randn((1, 1, self.proj_size) ) )
def lowerCamelCase__ ( self : Tuple , _snake_case : Optional[int] , _snake_case : Optional[int]=False ) -> Dict:
"""simple docstring"""
A_ = self.model(pixel_values=_snake_case )
A_ = clip_output.pooler_output
A_ = self.mapper(latent_states[:, None] )
A_ = self.final_layer_norm(_snake_case )
A_ = self.proj_out(_snake_case )
if return_uncond_vector:
return latent_states, self.uncond_vector
return latent_states
class __lowerCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self : Optional[Any] , _snake_case : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
super().__init__()
A_ = (config.num_hidden_layers + 1) // 5
A_ = config.hidden_size
A_ = 1
A_ = nn.ModuleList(
[
BasicTransformerBlock(_snake_case , _snake_case , _snake_case , activation_fn="gelu" , attention_bias=_snake_case )
for _ in range(_snake_case )
] )
def lowerCamelCase__ ( self : List[str] , _snake_case : int ) -> int:
"""simple docstring"""
for block in self.blocks:
A_ = block(_snake_case )
return hidden_states
| 115
|
'''simple docstring'''
import argparse
import json
import re
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileNetVaConfig,
MobileNetVaForImageClassification,
MobileNetVaImageProcessor,
load_tf_weights_in_mobilenet_va,
)
from transformers.utils import logging
logging.set_verbosity_info()
__SCREAMING_SNAKE_CASE : List[Any] = logging.get_logger(__name__)
def a_ ( UpperCamelCase_ ):
A_ = MobileNetVaConfig(layer_norm_eps=0.001 )
if "_quant" in model_name:
raise ValueError("Quantized models are not supported." )
A_ = re.match(R"^mobilenet_v1_([^_]*)_([^_]*)$" , UpperCamelCase_ )
if matches:
A_ = float(matches[1] )
A_ = int(matches[2] )
# The TensorFlow version of MobileNetV1 predicts 1001 classes instead of
# the usual 1000. The first class (index 0) is "background".
A_ = 1_0_0_1
A_ = "imagenet-1k-id2label.json"
A_ = "huggingface/label-files"
A_ = json.load(open(hf_hub_download(UpperCamelCase_ , UpperCamelCase_ , repo_type="dataset" ) , "r" ) )
A_ = {int(UpperCamelCase_ ) + 1: v for k, v in idalabel.items()}
A_ = "background"
A_ = idalabel
A_ = {v: k for k, v in idalabel.items()}
return config
def a_ ( ):
A_ = "http://images.cocodataset.org/val2017/000000039769.jpg"
A_ = Image.open(requests.get(UpperCamelCase_ , stream=UpperCamelCase_ ).raw )
return im
@torch.no_grad()
def a_ ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_=False ):
A_ = get_mobilenet_va_config(UpperCamelCase_ )
# Load 🤗 model
A_ = MobileNetVaForImageClassification(UpperCamelCase_ ).eval()
# Load weights from TensorFlow checkpoint
load_tf_weights_in_mobilenet_va(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# Check outputs on an image, prepared by MobileNetV1ImageProcessor
A_ = MobileNetVaImageProcessor(
crop_size={"width": config.image_size, "height": config.image_size} , size={"shortest_edge": config.image_size + 3_2} , )
A_ = image_processor(images=prepare_img() , return_tensors="pt" )
A_ = model(**UpperCamelCase_ )
A_ = outputs.logits
assert logits.shape == (1, 1_0_0_1)
if model_name == "mobilenet_v1_1.0_224":
A_ = torch.tensor([-4.1739, -1.1233, 3.1205] )
elif model_name == "mobilenet_v1_0.75_192":
A_ = torch.tensor([-3.9440, -2.3141, -0.3333] )
else:
A_ = None
if expected_logits is not None:
assert torch.allclose(logits[0, :3] , UpperCamelCase_ , atol=1e-4 )
Path(UpperCamelCase_ ).mkdir(exist_ok=UpperCamelCase_ )
print(f"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(UpperCamelCase_ )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(UpperCamelCase_ )
if push_to_hub:
print("Pushing to the hub..." )
A_ = "google/" + model_name
image_processor.push_to_hub(UpperCamelCase_ )
model.push_to_hub(UpperCamelCase_ )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''mobilenet_v1_1.0_224''',
type=str,
help='''Name of the MobileNetV1 model you\'d like to convert. Should in the form \'mobilenet_v1_<depth>_<size>\'.''',
)
parser.add_argument(
'''--checkpoint_path''', required=True, type=str, help='''Path to the original TensorFlow checkpoint (.ckpt file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
__SCREAMING_SNAKE_CASE : Dict = parser.parse_args()
convert_movilevit_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 452
| 0
|
import os
import sys
SCREAMING_SNAKE_CASE_ = os.path.join(os.path.dirname(__file__), """src""")
sys.path.append(SRC_DIR)
from transformers import (
AutoConfig,
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForQuestionAnswering,
AutoModelForSequenceClassification,
AutoTokenizer,
add_start_docstrings,
)
SCREAMING_SNAKE_CASE_ = [
"""torch""",
"""numpy""",
"""tokenizers""",
"""filelock""",
"""requests""",
"""tqdm""",
"""regex""",
"""sentencepiece""",
"""sacremoses""",
"""importlib_metadata""",
"""huggingface_hub""",
]
@add_start_docstrings(AutoConfig.__doc__ )
def __lowercase ( *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> int:
'''simple docstring'''
return AutoConfig.from_pretrained(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
@add_start_docstrings(AutoTokenizer.__doc__ )
def __lowercase ( *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
'''simple docstring'''
return AutoTokenizer.from_pretrained(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
@add_start_docstrings(AutoModel.__doc__ )
def __lowercase ( *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Any:
'''simple docstring'''
return AutoModel.from_pretrained(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
@add_start_docstrings(AutoModelForCausalLM.__doc__ )
def __lowercase ( *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> str:
'''simple docstring'''
return AutoModelForCausalLM.from_pretrained(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
@add_start_docstrings(AutoModelForMaskedLM.__doc__ )
def __lowercase ( *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Tuple:
'''simple docstring'''
return AutoModelForMaskedLM.from_pretrained(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
@add_start_docstrings(AutoModelForSequenceClassification.__doc__ )
def __lowercase ( *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Optional[int]:
'''simple docstring'''
return AutoModelForSequenceClassification.from_pretrained(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
@add_start_docstrings(AutoModelForQuestionAnswering.__doc__ )
def __lowercase ( *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> int:
'''simple docstring'''
return AutoModelForQuestionAnswering.from_pretrained(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
| 720
|
from ..utils import DummyObject, requires_backends
class UpperCamelCase__ ( metaclass=lowerCAmelCase_ ):
'''simple docstring'''
__snake_case : Dict = ["sentencepiece"]
def __init__( self : int ,*lowerCamelCase__ : Any ,**lowerCamelCase__ : Any ) -> str:
'''simple docstring'''
requires_backends(self ,["""sentencepiece"""] )
class UpperCamelCase__ ( metaclass=lowerCAmelCase_ ):
'''simple docstring'''
__snake_case : Union[str, Any] = ["sentencepiece"]
def __init__( self : Any ,*lowerCamelCase__ : str ,**lowerCamelCase__ : List[Any] ) -> str:
'''simple docstring'''
requires_backends(self ,["""sentencepiece"""] )
class UpperCamelCase__ ( metaclass=lowerCAmelCase_ ):
'''simple docstring'''
__snake_case : str = ["sentencepiece"]
def __init__( self : Union[str, Any] ,*lowerCamelCase__ : Union[str, Any] ,**lowerCamelCase__ : Optional[int] ) -> Optional[int]:
'''simple docstring'''
requires_backends(self ,["""sentencepiece"""] )
class UpperCamelCase__ ( metaclass=lowerCAmelCase_ ):
'''simple docstring'''
__snake_case : Dict = ["sentencepiece"]
def __init__( self : Tuple ,*lowerCamelCase__ : Dict ,**lowerCamelCase__ : Any ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(self ,["""sentencepiece"""] )
class UpperCamelCase__ ( metaclass=lowerCAmelCase_ ):
'''simple docstring'''
__snake_case : Dict = ["sentencepiece"]
def __init__( self : Optional[int] ,*lowerCamelCase__ : List[Any] ,**lowerCamelCase__ : Optional[int] ) -> str:
'''simple docstring'''
requires_backends(self ,["""sentencepiece"""] )
class UpperCamelCase__ ( metaclass=lowerCAmelCase_ ):
'''simple docstring'''
__snake_case : List[str] = ["sentencepiece"]
def __init__( self : Union[str, Any] ,*lowerCamelCase__ : Union[str, Any] ,**lowerCamelCase__ : Optional[Any] ) -> Tuple:
'''simple docstring'''
requires_backends(self ,["""sentencepiece"""] )
class UpperCamelCase__ ( metaclass=lowerCAmelCase_ ):
'''simple docstring'''
__snake_case : List[str] = ["sentencepiece"]
def __init__( self : int ,*lowerCamelCase__ : List[str] ,**lowerCamelCase__ : str ) -> Optional[int]:
'''simple docstring'''
requires_backends(self ,["""sentencepiece"""] )
class UpperCamelCase__ ( metaclass=lowerCAmelCase_ ):
'''simple docstring'''
__snake_case : int = ["sentencepiece"]
def __init__( self : Union[str, Any] ,*lowerCamelCase__ : List[Any] ,**lowerCamelCase__ : Optional[Any] ) -> int:
'''simple docstring'''
requires_backends(self ,["""sentencepiece"""] )
class UpperCamelCase__ ( metaclass=lowerCAmelCase_ ):
'''simple docstring'''
__snake_case : List[Any] = ["sentencepiece"]
def __init__( self : List[str] ,*lowerCamelCase__ : List[str] ,**lowerCamelCase__ : int ) -> Tuple:
'''simple docstring'''
requires_backends(self ,["""sentencepiece"""] )
class UpperCamelCase__ ( metaclass=lowerCAmelCase_ ):
'''simple docstring'''
__snake_case : Union[str, Any] = ["sentencepiece"]
def __init__( self : str ,*lowerCamelCase__ : Union[str, Any] ,**lowerCamelCase__ : List[str] ) -> Optional[int]:
'''simple docstring'''
requires_backends(self ,["""sentencepiece"""] )
class UpperCamelCase__ ( metaclass=lowerCAmelCase_ ):
'''simple docstring'''
__snake_case : Any = ["sentencepiece"]
def __init__( self : int ,*lowerCamelCase__ : Dict ,**lowerCamelCase__ : List[Any] ) -> List[str]:
'''simple docstring'''
requires_backends(self ,["""sentencepiece"""] )
class UpperCamelCase__ ( metaclass=lowerCAmelCase_ ):
'''simple docstring'''
__snake_case : Any = ["sentencepiece"]
def __init__( self : Dict ,*lowerCamelCase__ : Any ,**lowerCamelCase__ : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(self ,["""sentencepiece"""] )
class UpperCamelCase__ ( metaclass=lowerCAmelCase_ ):
'''simple docstring'''
__snake_case : Tuple = ["sentencepiece"]
def __init__( self : Dict ,*lowerCamelCase__ : int ,**lowerCamelCase__ : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
requires_backends(self ,["""sentencepiece"""] )
class UpperCamelCase__ ( metaclass=lowerCAmelCase_ ):
'''simple docstring'''
__snake_case : List[Any] = ["sentencepiece"]
def __init__( self : Optional[Any] ,*lowerCamelCase__ : Tuple ,**lowerCamelCase__ : int ) -> Any:
'''simple docstring'''
requires_backends(self ,["""sentencepiece"""] )
class UpperCamelCase__ ( metaclass=lowerCAmelCase_ ):
'''simple docstring'''
__snake_case : Tuple = ["sentencepiece"]
def __init__( self : Optional[Any] ,*lowerCamelCase__ : Tuple ,**lowerCamelCase__ : Dict ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(self ,["""sentencepiece"""] )
class UpperCamelCase__ ( metaclass=lowerCAmelCase_ ):
'''simple docstring'''
__snake_case : List[Any] = ["sentencepiece"]
def __init__( self : int ,*lowerCamelCase__ : str ,**lowerCamelCase__ : Optional[Any] ) -> Dict:
'''simple docstring'''
requires_backends(self ,["""sentencepiece"""] )
class UpperCamelCase__ ( metaclass=lowerCAmelCase_ ):
'''simple docstring'''
__snake_case : int = ["sentencepiece"]
def __init__( self : Union[str, Any] ,*lowerCamelCase__ : Any ,**lowerCamelCase__ : str ) -> List[Any]:
'''simple docstring'''
requires_backends(self ,["""sentencepiece"""] )
class UpperCamelCase__ ( metaclass=lowerCAmelCase_ ):
'''simple docstring'''
__snake_case : Dict = ["sentencepiece"]
def __init__( self : int ,*lowerCamelCase__ : Any ,**lowerCamelCase__ : Dict ) -> List[Any]:
'''simple docstring'''
requires_backends(self ,["""sentencepiece"""] )
class UpperCamelCase__ ( metaclass=lowerCAmelCase_ ):
'''simple docstring'''
__snake_case : int = ["sentencepiece"]
def __init__( self : List[Any] ,*lowerCamelCase__ : str ,**lowerCamelCase__ : Any ) -> int:
'''simple docstring'''
requires_backends(self ,["""sentencepiece"""] )
class UpperCamelCase__ ( metaclass=lowerCAmelCase_ ):
'''simple docstring'''
__snake_case : Union[str, Any] = ["sentencepiece"]
def __init__( self : Dict ,*lowerCamelCase__ : Optional[int] ,**lowerCamelCase__ : str ) -> Tuple:
'''simple docstring'''
requires_backends(self ,["""sentencepiece"""] )
class UpperCamelCase__ ( metaclass=lowerCAmelCase_ ):
'''simple docstring'''
__snake_case : str = ["sentencepiece"]
def __init__( self : Union[str, Any] ,*lowerCamelCase__ : int ,**lowerCamelCase__ : Any ) -> List[str]:
'''simple docstring'''
requires_backends(self ,["""sentencepiece"""] )
class UpperCamelCase__ ( metaclass=lowerCAmelCase_ ):
'''simple docstring'''
__snake_case : Any = ["sentencepiece"]
def __init__( self : Dict ,*lowerCamelCase__ : Tuple ,**lowerCamelCase__ : str ) -> int:
'''simple docstring'''
requires_backends(self ,["""sentencepiece"""] )
class UpperCamelCase__ ( metaclass=lowerCAmelCase_ ):
'''simple docstring'''
__snake_case : List[str] = ["sentencepiece"]
def __init__( self : Optional[int] ,*lowerCamelCase__ : Optional[int] ,**lowerCamelCase__ : Union[str, Any] ) -> Tuple:
'''simple docstring'''
requires_backends(self ,["""sentencepiece"""] )
class UpperCamelCase__ ( metaclass=lowerCAmelCase_ ):
'''simple docstring'''
__snake_case : str = ["sentencepiece"]
def __init__( self : Optional[Any] ,*lowerCamelCase__ : Optional[int] ,**lowerCamelCase__ : List[Any] ) -> Any:
'''simple docstring'''
requires_backends(self ,["""sentencepiece"""] )
class UpperCamelCase__ ( metaclass=lowerCAmelCase_ ):
'''simple docstring'''
__snake_case : Optional[Any] = ["sentencepiece"]
def __init__( self : List[str] ,*lowerCamelCase__ : Tuple ,**lowerCamelCase__ : str ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(self ,["""sentencepiece"""] )
class UpperCamelCase__ ( metaclass=lowerCAmelCase_ ):
'''simple docstring'''
__snake_case : Optional[int] = ["sentencepiece"]
def __init__( self : Optional[Any] ,*lowerCamelCase__ : Tuple ,**lowerCamelCase__ : str ) -> Dict:
'''simple docstring'''
requires_backends(self ,["""sentencepiece"""] )
class UpperCamelCase__ ( metaclass=lowerCAmelCase_ ):
'''simple docstring'''
__snake_case : Tuple = ["sentencepiece"]
def __init__( self : Any ,*lowerCamelCase__ : Optional[int] ,**lowerCamelCase__ : Any ) -> Optional[int]:
'''simple docstring'''
requires_backends(self ,["""sentencepiece"""] )
class UpperCamelCase__ ( metaclass=lowerCAmelCase_ ):
'''simple docstring'''
__snake_case : Dict = ["sentencepiece"]
def __init__( self : Union[str, Any] ,*lowerCamelCase__ : List[str] ,**lowerCamelCase__ : List[Any] ) -> str:
'''simple docstring'''
requires_backends(self ,["""sentencepiece"""] )
class UpperCamelCase__ ( metaclass=lowerCAmelCase_ ):
'''simple docstring'''
__snake_case : str = ["sentencepiece"]
def __init__( self : int ,*lowerCamelCase__ : str ,**lowerCamelCase__ : str ) -> Dict:
'''simple docstring'''
requires_backends(self ,["""sentencepiece"""] )
class UpperCamelCase__ ( metaclass=lowerCAmelCase_ ):
'''simple docstring'''
__snake_case : Union[str, Any] = ["sentencepiece"]
def __init__( self : int ,*lowerCamelCase__ : List[Any] ,**lowerCamelCase__ : Optional[int] ) -> List[str]:
'''simple docstring'''
requires_backends(self ,["""sentencepiece"""] )
class UpperCamelCase__ ( metaclass=lowerCAmelCase_ ):
'''simple docstring'''
__snake_case : Union[str, Any] = ["sentencepiece"]
def __init__( self : List[Any] ,*lowerCamelCase__ : Tuple ,**lowerCamelCase__ : Dict ) -> Dict:
'''simple docstring'''
requires_backends(self ,["""sentencepiece"""] )
| 116
| 0
|
from __future__ import annotations
import json
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
__a : List[str] = {"""UserAgent""": UserAgent().random}
def UpperCAmelCase ( lowercase ):
"""simple docstring"""
__lowercase = script.contents[0]
__lowercase = json.loads(data[data.find('''{"config"''' ) : -1] )
return info["entry_data"]["ProfilePage"][0]["graphql"]["user"]
class _UpperCamelCase :
"""simple docstring"""
def __init__( self , lowerCAmelCase__ ) -> List[str]:
'''simple docstring'''
__lowercase = F"https://www.instagram.com/{username}/"
__lowercase = self.get_json()
def _SCREAMING_SNAKE_CASE ( self ) -> dict:
'''simple docstring'''
__lowercase = requests.get(self.url , headers=lowerCamelCase__ ).text
__lowercase = BeautifulSoup(lowerCamelCase__ , '''html.parser''' ).find_all('''script''' )
try:
return extract_user_profile(scripts[4] )
except (json.decoder.JSONDecodeError, KeyError):
return extract_user_profile(scripts[3] )
def __repr__( self ) -> str:
'''simple docstring'''
return F"{self.__class__.__name__}(\'{self.username}\')"
def __str__( self ) -> str:
'''simple docstring'''
return F"{self.fullname} ({self.username}) is {self.biography}"
@property
def _SCREAMING_SNAKE_CASE ( self ) -> str:
'''simple docstring'''
return self.user_data["username"]
@property
def _SCREAMING_SNAKE_CASE ( self ) -> str:
'''simple docstring'''
return self.user_data["full_name"]
@property
def _SCREAMING_SNAKE_CASE ( self ) -> str:
'''simple docstring'''
return self.user_data["biography"]
@property
def _SCREAMING_SNAKE_CASE ( self ) -> str:
'''simple docstring'''
return self.user_data["business_email"]
@property
def _SCREAMING_SNAKE_CASE ( self ) -> str:
'''simple docstring'''
return self.user_data["external_url"]
@property
def _SCREAMING_SNAKE_CASE ( self ) -> int:
'''simple docstring'''
return self.user_data["edge_followed_by"]["count"]
@property
def _SCREAMING_SNAKE_CASE ( self ) -> int:
'''simple docstring'''
return self.user_data["edge_follow"]["count"]
@property
def _SCREAMING_SNAKE_CASE ( self ) -> int:
'''simple docstring'''
return self.user_data["edge_owner_to_timeline_media"]["count"]
@property
def _SCREAMING_SNAKE_CASE ( self ) -> str:
'''simple docstring'''
return self.user_data["profile_pic_url_hd"]
@property
def _SCREAMING_SNAKE_CASE ( self ) -> bool:
'''simple docstring'''
return self.user_data["is_verified"]
@property
def _SCREAMING_SNAKE_CASE ( self ) -> bool:
'''simple docstring'''
return self.user_data["is_private"]
def UpperCAmelCase ( lowercase = "github" ):
"""simple docstring"""
import os
if os.environ.get('''CI''' ):
return # test failing on GitHub Actions
__lowercase = InstagramUser(UpperCamelCase__ )
assert instagram_user.user_data
assert isinstance(instagram_user.user_data , UpperCamelCase__ )
assert instagram_user.username == username
if username != "github":
return
assert instagram_user.fullname == "GitHub"
assert instagram_user.biography == "Built for developers."
assert instagram_user.number_of_posts > 150
assert instagram_user.number_of_followers > 120000
assert instagram_user.number_of_followings > 15
assert instagram_user.email == "support@github.com"
assert instagram_user.website == "https://github.com/readme"
assert instagram_user.profile_picture_url.startswith('''https://instagram.''' )
assert instagram_user.is_verified is True
assert instagram_user.is_private is False
if __name__ == "__main__":
import doctest
doctest.testmod()
__a : Union[str, Any] = InstagramUser("""github""")
print(instagram_user)
print(F'''{instagram_user.number_of_posts = }''')
print(F'''{instagram_user.number_of_followers = }''')
print(F'''{instagram_user.number_of_followings = }''')
print(F'''{instagram_user.email = }''')
print(F'''{instagram_user.website = }''')
print(F'''{instagram_user.profile_picture_url = }''')
print(F'''{instagram_user.is_verified = }''')
print(F'''{instagram_user.is_private = }''')
| 534
|
import torch
from transformers import PreTrainedModel, XLMRobertaConfig, XLMRobertaModel
class a ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase_ : Tuple = 'M-CLIP'
def __init__( self : Any , lowerCamelCase__ : List[Any]=1_024 , lowerCamelCase__ : List[str]=768 , **lowerCamelCase__ : Optional[int] ) -> int:
"""simple docstring"""
__lowercase = transformerDimSize
__lowercase = imageDimSize
super().__init__(**lowerCamelCase__ )
class a ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase_ : Dict = MCLIPConfig
def __init__( self : Union[str, Any] , lowerCamelCase__ : Tuple , *lowerCamelCase__ : Union[str, Any] , **lowerCamelCase__ : Any ) -> List[str]:
"""simple docstring"""
super().__init__(lowerCamelCase__ , *lowerCamelCase__ , **lowerCamelCase__ )
__lowercase = XLMRobertaModel(lowerCamelCase__ )
__lowercase = torch.nn.Linear(
in_features=config.transformerDimensions , out_features=config.numDims )
def UpperCAmelCase_ ( self : str , lowerCamelCase__ : int , lowerCamelCase__ : Optional[Any] ) -> Tuple:
"""simple docstring"""
__lowercase = self.transformer(input_ids=lowerCamelCase__ , attention_mask=lowerCamelCase__ )[0]
__lowercase = (embs * attention_mask.unsqueeze(2 )).sum(dim=1 ) / attention_mask.sum(dim=1 )[:, None]
return self.LinearTransformation(lowerCamelCase__ ), embs
| 332
| 0
|
"""simple docstring"""
import json
import os
import re
import unicodedata
from json.encoder import INFINITY
from typing import Any, Dict, List, Optional, Tuple, Union
import numpy as np
import regex
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_flax_available, is_tf_available, is_torch_available, logging
from ...utils.generic import _is_jax, _is_numpy
lowerCAmelCase_ : Any = logging.get_logger(__name__)
lowerCAmelCase_ : Optional[Any] = {
'''artists_file''': '''artists.json''',
'''lyrics_file''': '''lyrics.json''',
'''genres_file''': '''genres.json''',
}
lowerCAmelCase_ : List[str] = {
'''artists_file''': {
'''jukebox''': '''https://huggingface.co/ArthurZ/jukebox/blob/main/artists.json''',
},
'''genres_file''': {
'''jukebox''': '''https://huggingface.co/ArthurZ/jukebox/blob/main/genres.json''',
},
'''lyrics_file''': {
'''jukebox''': '''https://huggingface.co/ArthurZ/jukebox/blob/main/lyrics.json''',
},
}
lowerCAmelCase_ : Union[str, Any] = {
'''jukebox''': 5_1_2,
}
class UpperCamelCase_ ( a_ ):
_A : int = VOCAB_FILES_NAMES
_A : Dict = PRETRAINED_VOCAB_FILES_MAP
_A : List[Any] = PRETRAINED_LYRIC_TOKENS_SIZES
_A : Optional[Any] = ['input_ids', 'attention_mask']
def __init__( self , snake_case__ , snake_case__ , snake_case__ , snake_case__=["v3", "v2", "v2"] , snake_case__=5_12 , snake_case__=5 , snake_case__="<|endoftext|>" , **snake_case__ , ) -> int:
"""simple docstring"""
UpperCAmelCase = AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else unk_token
super().__init__(
unk_token=snake_case__ , n_genres=snake_case__ , version=snake_case__ , max_n_lyric_tokens=snake_case__ , **snake_case__ , )
UpperCAmelCase = version
UpperCAmelCase = max_n_lyric_tokens
UpperCAmelCase = n_genres
with open(snake_case__ , encoding="""utf-8""" ) as vocab_handle:
UpperCAmelCase = json.load(snake_case__ )
with open(snake_case__ , encoding="""utf-8""" ) as vocab_handle:
UpperCAmelCase = json.load(snake_case__ )
with open(snake_case__ , encoding="""utf-8""" ) as vocab_handle:
UpperCAmelCase = json.load(snake_case__ )
UpperCAmelCase = R"""[^A-Za-z0-9.,:;!?\-'\"()\[\] \t\n]+"""
# In v2, we had a n_vocab=80 and in v3 we missed + and so n_vocab=79 of characters.
if len(self.lyrics_encoder ) == 79:
UpperCAmelCase = oov.replace(R"""\-'""" , R"""\-+'""" )
UpperCAmelCase = regex.compile(snake_case__ )
UpperCAmelCase = {v: k for k, v in self.artists_encoder.items()}
UpperCAmelCase = {v: k for k, v in self.genres_encoder.items()}
UpperCAmelCase = {v: k for k, v in self.lyrics_encoder.items()}
@property
def UpperCamelCase_ ( self ) -> Optional[Any]:
"""simple docstring"""
return len(self.artists_encoder ) + len(self.genres_encoder ) + len(self.lyrics_encoder )
def UpperCamelCase_ ( self ) -> List[str]:
"""simple docstring"""
return dict(self.artists_encoder , self.genres_encoder , self.lyrics_encoder )
def UpperCamelCase_ ( self , snake_case__ , snake_case__ , snake_case__ ) -> Any:
"""simple docstring"""
UpperCAmelCase = [self.artists_encoder.get(snake_case__ , 0 ) for artist in list_artists]
for genres in range(len(snake_case__ ) ):
UpperCAmelCase = [self.genres_encoder.get(snake_case__ , 0 ) for genre in list_genres[genres]]
UpperCAmelCase = list_genres[genres] + [-1] * (self.n_genres - len(list_genres[genres] ))
UpperCAmelCase = [[self.lyrics_encoder.get(snake_case__ , 0 ) for character in list_lyrics[0]], [], []]
return artists_id, list_genres, lyric_ids
def UpperCamelCase_ ( self , snake_case__ ) -> List[Any]:
"""simple docstring"""
return list(snake_case__ )
def UpperCamelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , **snake_case__ ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = self.prepare_for_tokenization(snake_case__ , snake_case__ , snake_case__ )
UpperCAmelCase = self._tokenize(snake_case__ )
return artist, genre, lyrics
def UpperCamelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ = False ) -> Tuple[str, str, str, Dict[str, Any]]:
"""simple docstring"""
for idx in range(len(self.version ) ):
if self.version[idx] == "v3":
UpperCAmelCase = artists[idx].lower()
UpperCAmelCase = [genres[idx].lower()]
else:
UpperCAmelCase = self._normalize(artists[idx] ) + """.v2"""
UpperCAmelCase = [
self._normalize(snake_case__ ) + """.v2""" for genre in genres[idx].split("""_""" )
] # split is for the full dictionary with combined genres
if self.version[0] == "v2":
UpperCAmelCase = regex.compile(R"""[^A-Za-z0-9.,:;!?\-'\"()\[\] \t\n]+""" )
UpperCAmelCase = """ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789.,:;!?-+'\"()[] \t\n"""
UpperCAmelCase = {vocab[index]: index + 1 for index in range(len(snake_case__ ) )}
UpperCAmelCase = 0
UpperCAmelCase = len(snake_case__ ) + 1
UpperCAmelCase = self.vocab
UpperCAmelCase = {v: k for k, v in self.vocab.items()}
UpperCAmelCase = """"""
else:
UpperCAmelCase = regex.compile(R"""[^A-Za-z0-9.,:;!?\-+'\"()\[\] \t\n]+""" )
UpperCAmelCase = self._run_strip_accents(snake_case__ )
UpperCAmelCase = lyrics.replace("""\\""" , """\n""" )
UpperCAmelCase = self.out_of_vocab.sub("""""" , snake_case__ ), [], []
return artists, genres, lyrics
def UpperCamelCase_ ( self , snake_case__ ) -> Dict:
"""simple docstring"""
UpperCAmelCase = unicodedata.normalize("""NFD""" , snake_case__ )
UpperCAmelCase = []
for char in text:
UpperCAmelCase = unicodedata.category(snake_case__ )
if cat == "Mn":
continue
output.append(snake_case__ )
return "".join(snake_case__ )
def UpperCamelCase_ ( self , snake_case__ ) -> str:
"""simple docstring"""
UpperCAmelCase = (
[chr(snake_case__ ) for i in range(ord("""a""" ) , ord("""z""" ) + 1 )]
+ [chr(snake_case__ ) for i in range(ord("""A""" ) , ord("""Z""" ) + 1 )]
+ [chr(snake_case__ ) for i in range(ord("""0""" ) , ord("""9""" ) + 1 )]
+ ["""."""]
)
UpperCAmelCase = frozenset(snake_case__ )
UpperCAmelCase = re.compile(R"""_+""" )
UpperCAmelCase = """""".join([c if c in accepted else """_""" for c in text.lower()] )
UpperCAmelCase = pattern.sub("""_""" , snake_case__ ).strip("""_""" )
return text
def UpperCamelCase_ ( self , snake_case__ ) -> str:
"""simple docstring"""
return " ".join(snake_case__ )
def UpperCamelCase_ ( self , snake_case__ , snake_case__ = None , snake_case__ = False ) -> List[str]:
"""simple docstring"""
if not isinstance(snake_case__ , snake_case__ ):
UpperCAmelCase = TensorType(snake_case__ )
# Get a function reference for the correct framework
if tensor_type == TensorType.TENSORFLOW:
if not is_tf_available():
raise ImportError(
"""Unable to convert output to TensorFlow tensors format, TensorFlow is not installed.""" )
import tensorflow as tf
UpperCAmelCase = tf.constant
UpperCAmelCase = tf.is_tensor
elif tensor_type == TensorType.PYTORCH:
if not is_torch_available():
raise ImportError("""Unable to convert output to PyTorch tensors format, PyTorch is not installed.""" )
import torch
UpperCAmelCase = torch.tensor
UpperCAmelCase = torch.is_tensor
elif tensor_type == TensorType.JAX:
if not is_flax_available():
raise ImportError("""Unable to convert output to JAX tensors format, JAX is not installed.""" )
import jax.numpy as jnp # noqa: F811
UpperCAmelCase = jnp.array
UpperCAmelCase = _is_jax
else:
UpperCAmelCase = np.asarray
UpperCAmelCase = _is_numpy
# Do the tensor conversion in batch
try:
if prepend_batch_axis:
UpperCAmelCase = [inputs]
if not is_tensor(snake_case__ ):
UpperCAmelCase = as_tensor(snake_case__ )
except: # noqa E722
raise ValueError(
"""Unable to create tensor, you should probably activate truncation and/or padding """
"""with 'padding=True' 'truncation=True' to have batched tensors with the same length.""" )
return inputs
def __call__( self , snake_case__ , snake_case__ , snake_case__="" , snake_case__="pt" ) -> BatchEncoding:
"""simple docstring"""
UpperCAmelCase = [0, 0, 0]
UpperCAmelCase = [artist] * len(self.version )
UpperCAmelCase = [genres] * len(self.version )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = self.tokenize(snake_case__ , snake_case__ , snake_case__ )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = self._convert_token_to_id(snake_case__ , snake_case__ , snake_case__ )
UpperCAmelCase = [-INFINITY] * len(full_tokens[-1] )
UpperCAmelCase = [
self.convert_to_tensors(
[input_ids + [artists_id[i]] + genres_ids[i] + full_tokens[i]] , tensor_type=snake_case__ )
for i in range(len(self.version ) )
]
return BatchEncoding({"""input_ids""": input_ids, """attention_masks""": attention_masks} )
def UpperCamelCase_ ( self , snake_case__ , snake_case__ = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(snake_case__ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
UpperCAmelCase = os.path.join(
snake_case__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""artists_file"""] )
with open(snake_case__ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.artists_encoder , ensure_ascii=snake_case__ ) )
UpperCAmelCase = os.path.join(
snake_case__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""genres_file"""] )
with open(snake_case__ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.genres_encoder , ensure_ascii=snake_case__ ) )
UpperCAmelCase = os.path.join(
snake_case__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""lyrics_file"""] )
with open(snake_case__ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.lyrics_encoder , ensure_ascii=snake_case__ ) )
return (artists_file, genres_file, lyrics_file)
def UpperCamelCase_ ( self , snake_case__ , snake_case__ , snake_case__ ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase = self.artists_decoder.get(snake_case__ )
UpperCAmelCase = [self.genres_decoder.get(snake_case__ ) for genre in genres_index]
UpperCAmelCase = [self.lyrics_decoder.get(snake_case__ ) for character in lyric_index]
return artist, genres, lyrics
| 714
|
"""simple docstring"""
import argparse
import json
import os
import numpy as np
import PIL
import requests
import tensorflow.keras.applications.efficientnet as efficientnet
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from tensorflow.keras.preprocessing import image
from transformers import (
EfficientNetConfig,
EfficientNetForImageClassification,
EfficientNetImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase_ : List[Any] = logging.get_logger(__name__)
lowerCAmelCase_ : Tuple = {
'''b0''': efficientnet.EfficientNetBa,
'''b1''': efficientnet.EfficientNetBa,
'''b2''': efficientnet.EfficientNetBa,
'''b3''': efficientnet.EfficientNetBa,
'''b4''': efficientnet.EfficientNetBa,
'''b5''': efficientnet.EfficientNetBa,
'''b6''': efficientnet.EfficientNetBa,
'''b7''': efficientnet.EfficientNetBa,
}
lowerCAmelCase_ : int = {
'''b0''': {
'''hidden_dim''': 1_2_8_0,
'''width_coef''': 1.0,
'''depth_coef''': 1.0,
'''image_size''': 2_2_4,
'''dropout_rate''': 0.2,
'''dw_padding''': [],
},
'''b1''': {
'''hidden_dim''': 1_2_8_0,
'''width_coef''': 1.0,
'''depth_coef''': 1.1,
'''image_size''': 2_4_0,
'''dropout_rate''': 0.2,
'''dw_padding''': [1_6],
},
'''b2''': {
'''hidden_dim''': 1_4_0_8,
'''width_coef''': 1.1,
'''depth_coef''': 1.2,
'''image_size''': 2_6_0,
'''dropout_rate''': 0.3,
'''dw_padding''': [5, 8, 1_6],
},
'''b3''': {
'''hidden_dim''': 1_5_3_6,
'''width_coef''': 1.2,
'''depth_coef''': 1.4,
'''image_size''': 3_0_0,
'''dropout_rate''': 0.3,
'''dw_padding''': [5, 1_8],
},
'''b4''': {
'''hidden_dim''': 1_7_9_2,
'''width_coef''': 1.4,
'''depth_coef''': 1.8,
'''image_size''': 3_8_0,
'''dropout_rate''': 0.4,
'''dw_padding''': [6],
},
'''b5''': {
'''hidden_dim''': 2_0_4_8,
'''width_coef''': 1.6,
'''depth_coef''': 2.2,
'''image_size''': 4_5_6,
'''dropout_rate''': 0.4,
'''dw_padding''': [1_3, 2_7],
},
'''b6''': {
'''hidden_dim''': 2_3_0_4,
'''width_coef''': 1.8,
'''depth_coef''': 2.6,
'''image_size''': 5_2_8,
'''dropout_rate''': 0.5,
'''dw_padding''': [3_1],
},
'''b7''': {
'''hidden_dim''': 2_5_6_0,
'''width_coef''': 2.0,
'''depth_coef''': 3.1,
'''image_size''': 6_0_0,
'''dropout_rate''': 0.5,
'''dw_padding''': [1_8],
},
}
def _lowerCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase = EfficientNetConfig()
UpperCAmelCase = CONFIG_MAP[model_name]["""hidden_dim"""]
UpperCAmelCase = CONFIG_MAP[model_name]["""width_coef"""]
UpperCAmelCase = CONFIG_MAP[model_name]["""depth_coef"""]
UpperCAmelCase = CONFIG_MAP[model_name]["""image_size"""]
UpperCAmelCase = CONFIG_MAP[model_name]["""dropout_rate"""]
UpperCAmelCase = CONFIG_MAP[model_name]["""dw_padding"""]
UpperCAmelCase = """huggingface/label-files"""
UpperCAmelCase = """imagenet-1k-id2label.json"""
UpperCAmelCase = 1000
UpperCAmelCase = json.load(open(hf_hub_download(lowerCAmelCase , lowerCAmelCase , repo_type="""dataset""" ) , """r""" ) )
UpperCAmelCase = {int(lowerCAmelCase ): v for k, v in idalabel.items()}
UpperCAmelCase = idalabel
UpperCAmelCase = {v: k for k, v in idalabel.items()}
return config
def _lowerCAmelCase ( ):
'''simple docstring'''
UpperCAmelCase = """http://images.cocodataset.org/val2017/000000039769.jpg"""
UpperCAmelCase = Image.open(requests.get(lowerCAmelCase , stream=lowerCAmelCase ).raw )
return im
def _lowerCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase = CONFIG_MAP[model_name]["""image_size"""]
UpperCAmelCase = EfficientNetImageProcessor(
size={"""height""": size, """width""": size} , image_mean=[0.4_85, 0.4_56, 0.4_06] , image_std=[0.47_85_39_44, 0.4_73_28_64, 0.47_43_41_63] , do_center_crop=lowerCAmelCase , )
return preprocessor
def _lowerCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase = [v.split("""_""" )[0].split("""block""" )[1] for v in original_param_names if v.startswith("""block""" )]
UpperCAmelCase = sorted(set(lowerCAmelCase ) )
UpperCAmelCase = len(lowerCAmelCase )
UpperCAmelCase = {b: str(lowerCAmelCase ) for b, i in zip(lowerCAmelCase , range(lowerCAmelCase ) )}
UpperCAmelCase = []
rename_keys.append(("""stem_conv/kernel:0""", """embeddings.convolution.weight""") )
rename_keys.append(("""stem_bn/gamma:0""", """embeddings.batchnorm.weight""") )
rename_keys.append(("""stem_bn/beta:0""", """embeddings.batchnorm.bias""") )
rename_keys.append(("""stem_bn/moving_mean:0""", """embeddings.batchnorm.running_mean""") )
rename_keys.append(("""stem_bn/moving_variance:0""", """embeddings.batchnorm.running_var""") )
for b in block_names:
UpperCAmelCase = block_name_mapping[b]
rename_keys.append((F'''block{b}_expand_conv/kernel:0''', F'''encoder.blocks.{hf_b}.expansion.expand_conv.weight''') )
rename_keys.append((F'''block{b}_expand_bn/gamma:0''', F'''encoder.blocks.{hf_b}.expansion.expand_bn.weight''') )
rename_keys.append((F'''block{b}_expand_bn/beta:0''', F'''encoder.blocks.{hf_b}.expansion.expand_bn.bias''') )
rename_keys.append(
(F'''block{b}_expand_bn/moving_mean:0''', F'''encoder.blocks.{hf_b}.expansion.expand_bn.running_mean''') )
rename_keys.append(
(F'''block{b}_expand_bn/moving_variance:0''', F'''encoder.blocks.{hf_b}.expansion.expand_bn.running_var''') )
rename_keys.append(
(F'''block{b}_dwconv/depthwise_kernel:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight''') )
rename_keys.append((F'''block{b}_bn/gamma:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight''') )
rename_keys.append((F'''block{b}_bn/beta:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias''') )
rename_keys.append(
(F'''block{b}_bn/moving_mean:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean''') )
rename_keys.append(
(F'''block{b}_bn/moving_variance:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var''') )
rename_keys.append((F'''block{b}_se_reduce/kernel:0''', F'''encoder.blocks.{hf_b}.squeeze_excite.reduce.weight''') )
rename_keys.append((F'''block{b}_se_reduce/bias:0''', F'''encoder.blocks.{hf_b}.squeeze_excite.reduce.bias''') )
rename_keys.append((F'''block{b}_se_expand/kernel:0''', F'''encoder.blocks.{hf_b}.squeeze_excite.expand.weight''') )
rename_keys.append((F'''block{b}_se_expand/bias:0''', F'''encoder.blocks.{hf_b}.squeeze_excite.expand.bias''') )
rename_keys.append(
(F'''block{b}_project_conv/kernel:0''', F'''encoder.blocks.{hf_b}.projection.project_conv.weight''') )
rename_keys.append((F'''block{b}_project_bn/gamma:0''', F'''encoder.blocks.{hf_b}.projection.project_bn.weight''') )
rename_keys.append((F'''block{b}_project_bn/beta:0''', F'''encoder.blocks.{hf_b}.projection.project_bn.bias''') )
rename_keys.append(
(F'''block{b}_project_bn/moving_mean:0''', F'''encoder.blocks.{hf_b}.projection.project_bn.running_mean''') )
rename_keys.append(
(F'''block{b}_project_bn/moving_variance:0''', F'''encoder.blocks.{hf_b}.projection.project_bn.running_var''') )
rename_keys.append(("""top_conv/kernel:0""", """encoder.top_conv.weight""") )
rename_keys.append(("""top_bn/gamma:0""", """encoder.top_bn.weight""") )
rename_keys.append(("""top_bn/beta:0""", """encoder.top_bn.bias""") )
rename_keys.append(("""top_bn/moving_mean:0""", """encoder.top_bn.running_mean""") )
rename_keys.append(("""top_bn/moving_variance:0""", """encoder.top_bn.running_var""") )
UpperCAmelCase = {}
for item in rename_keys:
if item[0] in original_param_names:
UpperCAmelCase = """efficientnet.""" + item[1]
UpperCAmelCase = """classifier.weight"""
UpperCAmelCase = """classifier.bias"""
return key_mapping
def _lowerCAmelCase ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
'''simple docstring'''
for key, value in tf_params.items():
if "normalization" in key:
continue
UpperCAmelCase = key_mapping[key]
if "_conv" in key and "kernel" in key:
UpperCAmelCase = torch.from_numpy(lowerCAmelCase ).permute(3 , 2 , 0 , 1 )
elif "depthwise_kernel" in key:
UpperCAmelCase = torch.from_numpy(lowerCAmelCase ).permute(2 , 3 , 0 , 1 )
elif "kernel" in key:
UpperCAmelCase = torch.from_numpy(np.transpose(lowerCAmelCase ) )
else:
UpperCAmelCase = torch.from_numpy(lowerCAmelCase )
# Replace HF parameters with original TF model parameters
assert hf_params[hf_key].shape == new_hf_value.shape
hf_params[hf_key].copy_(lowerCAmelCase )
@torch.no_grad()
def _lowerCAmelCase ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase = model_classes[model_name](
include_top=lowerCAmelCase , weights="""imagenet""" , input_tensor=lowerCAmelCase , input_shape=lowerCAmelCase , pooling=lowerCAmelCase , classes=1000 , classifier_activation="""softmax""" , )
UpperCAmelCase = original_model.trainable_variables
UpperCAmelCase = original_model.non_trainable_variables
UpperCAmelCase = {param.name: param.numpy() for param in tf_params}
for param in tf_non_train_params:
UpperCAmelCase = param.numpy()
UpperCAmelCase = list(tf_params.keys() )
# Load HuggingFace model
UpperCAmelCase = get_efficientnet_config(lowerCAmelCase )
UpperCAmelCase = EfficientNetForImageClassification(lowerCAmelCase ).eval()
UpperCAmelCase = hf_model.state_dict()
# Create src-to-dst parameter name mapping dictionary
print("""Converting parameters...""" )
UpperCAmelCase = rename_keys(lowerCAmelCase )
replace_params(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# Initialize preprocessor and preprocess input image
UpperCAmelCase = convert_image_processor(lowerCAmelCase )
UpperCAmelCase = preprocessor(images=prepare_img() , return_tensors="""pt""" )
# HF model inference
hf_model.eval()
with torch.no_grad():
UpperCAmelCase = hf_model(**lowerCAmelCase )
UpperCAmelCase = outputs.logits.detach().numpy()
# Original model inference
UpperCAmelCase = False
UpperCAmelCase = CONFIG_MAP[model_name]["""image_size"""]
UpperCAmelCase = prepare_img().resize((image_size, image_size) , resample=PIL.Image.NEAREST )
UpperCAmelCase = image.img_to_array(lowerCAmelCase )
UpperCAmelCase = np.expand_dims(lowerCAmelCase , axis=0 )
UpperCAmelCase = original_model.predict(lowerCAmelCase )
# Check whether original and HF model outputs match -> np.allclose
assert np.allclose(lowerCAmelCase , lowerCAmelCase , atol=1e-3 ), "The predicted logits are not the same."
print("""Model outputs match!""" )
if save_model:
# Create folder to save model
if not os.path.isdir(lowerCAmelCase ):
os.mkdir(lowerCAmelCase )
# Save converted model and image processor
hf_model.save_pretrained(lowerCAmelCase )
preprocessor.save_pretrained(lowerCAmelCase )
if push_to_hub:
# Push model and image processor to hub
print(F'''Pushing converted {model_name} to the hub...''' )
UpperCAmelCase = F'''efficientnet-{model_name}'''
preprocessor.push_to_hub(lowerCAmelCase )
hf_model.push_to_hub(lowerCAmelCase )
if __name__ == "__main__":
lowerCAmelCase_ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''b0''',
type=str,
help='''Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''hf_model''',
type=str,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument('''--save_model''', action='''store_true''', help='''Save model to local''')
parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Push model and image processor to the hub''')
lowerCAmelCase_ : Optional[Any] = parser.parse_args()
convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
| 378
| 0
|
from collections.abc import Callable
import numpy as np
def lowerCAmelCase( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )-> np.ndarray:
"""simple docstring"""
UpperCamelCase_ = int(np.ceil((x_end - xa) / step_size ) )
UpperCamelCase_ = np.zeros((n + 1,) )
UpperCamelCase_ = ya
UpperCamelCase_ = xa
for k in range(SCREAMING_SNAKE_CASE_ ):
UpperCamelCase_ = y[k] + step_size * ode_func(SCREAMING_SNAKE_CASE_ , y[k] )
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 628
|
import torch
from diffusers import KDPMaDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class __magic_name__ ( snake_case ):
UpperCamelCase_ :Dict = (KDPMaDiscreteScheduler,)
UpperCamelCase_ :str = 1_0
def UpperCAmelCase_ ( self , **_lowercase )-> str:
UpperCamelCase_ = {
"num_train_timesteps": 1_100,
"beta_start": 0.0_001,
"beta_end": 0.02,
"beta_schedule": "linear",
}
config.update(**_lowercase )
return config
def UpperCAmelCase_ ( self )-> Union[str, Any]:
for timesteps in [10, 50, 100, 1_000]:
self.check_over_configs(num_train_timesteps=_lowercase )
def UpperCAmelCase_ ( self )-> int:
for beta_start, beta_end in zip([0.00_001, 0.0_001, 0.001] , [0.0_002, 0.002, 0.02] ):
self.check_over_configs(beta_start=_lowercase , beta_end=_lowercase )
def UpperCAmelCase_ ( self )-> str:
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=_lowercase )
def UpperCAmelCase_ ( self )-> Any:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_lowercase )
def UpperCAmelCase_ ( self )-> Optional[int]:
UpperCamelCase_ = self.scheduler_classes[0]
UpperCamelCase_ = self.get_scheduler_config(prediction_type="v_prediction" )
UpperCamelCase_ = scheduler_class(**_lowercase )
scheduler.set_timesteps(self.num_inference_steps )
UpperCamelCase_ = self.dummy_model()
UpperCamelCase_ = self.dummy_sample_deter * scheduler.init_noise_sigma
UpperCamelCase_ = sample.to(_lowercase )
for i, t in enumerate(scheduler.timesteps ):
UpperCamelCase_ = scheduler.scale_model_input(_lowercase , _lowercase )
UpperCamelCase_ = model(_lowercase , _lowercase )
UpperCamelCase_ = scheduler.step(_lowercase , _lowercase , _lowercase )
UpperCamelCase_ = output.prev_sample
UpperCamelCase_ = torch.sum(torch.abs(_lowercase ) )
UpperCamelCase_ = torch.mean(torch.abs(_lowercase ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 4.6_9_3_4e-0_7 ) < 1e-2
assert abs(result_mean.item() - 6.1_1_1_2e-1_0 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 4.6_9_3_4_2_8_6_5_0_1_7_0_9_7_2e-0_7 ) < 1e-2
assert abs(result_mean.item() - 0.0_002 ) < 1e-3
def UpperCAmelCase_ ( self )-> Dict:
if torch_device == "mps":
return
UpperCamelCase_ = self.scheduler_classes[0]
UpperCamelCase_ = self.get_scheduler_config()
UpperCamelCase_ = scheduler_class(**_lowercase )
scheduler.set_timesteps(self.num_inference_steps )
UpperCamelCase_ = self.dummy_model()
UpperCamelCase_ = self.dummy_sample_deter * scheduler.init_noise_sigma
UpperCamelCase_ = sample.to(_lowercase )
for i, t in enumerate(scheduler.timesteps ):
UpperCamelCase_ = scheduler.scale_model_input(_lowercase , _lowercase )
UpperCamelCase_ = model(_lowercase , _lowercase )
UpperCamelCase_ = scheduler.step(_lowercase , _lowercase , _lowercase )
UpperCamelCase_ = output.prev_sample
UpperCamelCase_ = torch.sum(torch.abs(_lowercase ) )
UpperCamelCase_ = torch.mean(torch.abs(_lowercase ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 20.4_125 ) < 1e-2
assert abs(result_mean.item() - 0.0_266 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 20.4_125 ) < 1e-2
assert abs(result_mean.item() - 0.0_266 ) < 1e-3
def UpperCAmelCase_ ( self )-> Optional[int]:
if torch_device == "mps":
return
UpperCamelCase_ = self.scheduler_classes[0]
UpperCamelCase_ = self.get_scheduler_config()
UpperCamelCase_ = scheduler_class(**_lowercase )
scheduler.set_timesteps(self.num_inference_steps , device=_lowercase )
UpperCamelCase_ = self.dummy_model()
UpperCamelCase_ = self.dummy_sample_deter.to(_lowercase ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
UpperCamelCase_ = scheduler.scale_model_input(_lowercase , _lowercase )
UpperCamelCase_ = model(_lowercase , _lowercase )
UpperCamelCase_ = scheduler.step(_lowercase , _lowercase , _lowercase )
UpperCamelCase_ = output.prev_sample
UpperCamelCase_ = torch.sum(torch.abs(_lowercase ) )
UpperCamelCase_ = torch.mean(torch.abs(_lowercase ) )
if str(_lowercase ).startswith("cpu" ):
# The following sum varies between 148 and 156 on mps. Why?
assert abs(result_sum.item() - 20.4_125 ) < 1e-2
assert abs(result_mean.item() - 0.0_266 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 20.4_125 ) < 1e-2
assert abs(result_mean.item() - 0.0_266 ) < 1e-3
| 628
| 1
|
import argparse
import ast
import logging
import os
import sys
import pandas as pd
import torch
from tqdm import tqdm
from transformers import BartForConditionalGeneration, RagRetriever, RagSequenceForGeneration, RagTokenForGeneration
from transformers import logging as transformers_logging
sys.path.append(os.path.join(os.getcwd())) # noqa: E402 # isort:skip
from utils_rag import exact_match_score, fa_score # noqa: E402 # isort:skip
__UpperCamelCase : Optional[int] = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
transformers_logging.set_verbosity_info()
def a_ ( _A ) -> List[Any]:
"""simple docstring"""
if "token" in model_name_or_path:
return "rag_token"
if "sequence" in model_name_or_path:
return "rag_sequence"
if "bart" in model_name_or_path:
return "bart"
return None
def a_ ( _A , _A , _A ) -> Optional[Any]:
"""simple docstring"""
return max(metric_fn(_A , _A ) for gt in ground_truths )
def a_ ( _A , _A , _A ) -> Union[str, Any]:
"""simple docstring"""
snake_case__ = [line.strip() for line in open(_A , 'r' ).readlines()]
snake_case__ = []
if args.gold_data_mode == "qa":
snake_case__ = pd.read_csv(_A , sep='\t' , header=_A )
for answer_list in data[1]:
snake_case__ = ast.literal_eval(_A )
answers.append(_A )
else:
snake_case__ = [line.strip() for line in open(_A , 'r' ).readlines()]
snake_case__ = [[reference] for reference in references]
snake_case__ = snake_case__ = snake_case__ = 0
for prediction, ground_truths in zip(_A , _A ):
total += 1
em += metric_max_over_ground_truths(_A , _A , _A )
fa += metric_max_over_ground_truths(_A , _A , _A )
snake_case__ = 100.0 * em / total
snake_case__ = 100.0 * fa / total
logger.info(f'''F1: {fa:.2f}''' )
logger.info(f'''EM: {em:.2f}''' )
def a_ ( _A , _A , _A ) -> Optional[int]:
"""simple docstring"""
snake_case__ = args.k
snake_case__ = [line.strip() for line in open(_A , 'r' ).readlines()]
snake_case__ = [line.strip() for line in open(_A , 'r' ).readlines()]
snake_case__ = snake_case__ = 0
for hypo, reference in zip(_A , _A ):
snake_case__ = set(hypo.split('\t' )[:k] )
snake_case__ = set(reference.split('\t' ) )
total += 1
em += len(hypo_provenance & ref_provenance ) / k
snake_case__ = 100.0 * em / total
logger.info(f'''Precision@{k}: {em: .2f}''' )
def a_ ( _A , _A , _A ) -> List[Any]:
"""simple docstring"""
def strip_title(_A ):
if title.startswith('"' ):
snake_case__ = title[1:]
if title.endswith('"' ):
snake_case__ = title[:-1]
return title
snake_case__ = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
_A , return_tensors='pt' , padding=_A , truncation=_A , )['input_ids'].to(args.device )
snake_case__ = rag_model.rag.question_encoder(_A )
snake_case__ = question_enc_outputs[0]
snake_case__ = rag_model.retriever(
_A , question_enc_pool_output.cpu().detach().to(torch.floataa ).numpy() , prefix=rag_model.rag.generator.config.prefix , n_docs=rag_model.config.n_docs , return_tensors='pt' , )
snake_case__ = rag_model.retriever.index.get_doc_dicts(result.doc_ids )
snake_case__ = []
for docs in all_docs:
snake_case__ = [strip_title(_A ) for title in docs['title']]
provenance_strings.append('\t'.join(_A ) )
return provenance_strings
def a_ ( _A , _A , _A ) -> Dict:
"""simple docstring"""
with torch.no_grad():
snake_case__ = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
_A , return_tensors='pt' , padding=_A , truncation=_A )
snake_case__ = inputs_dict.input_ids.to(args.device )
snake_case__ = inputs_dict.attention_mask.to(args.device )
snake_case__ = rag_model.generate( # rag_model overwrites generate
_A , attention_mask=_A , num_beams=args.num_beams , min_length=args.min_length , max_length=args.max_length , early_stopping=_A , num_return_sequences=1 , bad_words_ids=[[0, 0]] , )
snake_case__ = rag_model.retriever.generator_tokenizer.batch_decode(_A , skip_special_tokens=_A )
if args.print_predictions:
for q, a in zip(_A , _A ):
logger.info('Q: {} - A: {}'.format(_A , _A ) )
return answers
def a_ ( ) -> List[Any]:
"""simple docstring"""
snake_case__ = argparse.ArgumentParser()
parser.add_argument(
'--model_type' , choices=['rag_sequence', 'rag_token', 'bart'] , type=_A , help=(
'RAG model type: rag_sequence, rag_token or bart, if none specified, the type is inferred from the'
' model_name_or_path'
) , )
parser.add_argument(
'--index_name' , default=_A , choices=['exact', 'compressed', 'legacy'] , type=_A , help='RAG model retriever type' , )
parser.add_argument(
'--index_path' , default=_A , type=_A , help='Path to the retrieval index' , )
parser.add_argument('--n_docs' , default=5 , type=_A , help='Number of retrieved docs' )
parser.add_argument(
'--model_name_or_path' , default=_A , type=_A , required=_A , help='Path to pretrained checkpoints or model identifier from huggingface.co/models' , )
parser.add_argument(
'--eval_mode' , choices=['e2e', 'retrieval'] , default='e2e' , type=_A , help=(
'Evaluation mode, e2e calculates exact match and F1 of the downstream task, retrieval calculates'
' precision@k.'
) , )
parser.add_argument('--k' , default=1 , type=_A , help='k for the precision@k calculation' )
parser.add_argument(
'--evaluation_set' , default=_A , type=_A , required=_A , help='Path to a file containing evaluation samples' , )
parser.add_argument(
'--gold_data_path' , default=_A , type=_A , required=_A , help='Path to a tab-separated file with gold samples' , )
parser.add_argument(
'--gold_data_mode' , default='qa' , type=_A , choices=['qa', 'ans'] , help=(
'Format of the gold data file'
'qa - a single line in the following format: question [tab] answer_list'
'ans - a single line of the gold file contains the expected answer string'
) , )
parser.add_argument(
'--predictions_path' , type=_A , default='predictions.txt' , help='Name of the predictions file, to be stored in the checkpoints directory' , )
parser.add_argument(
'--eval_all_checkpoints' , action='store_true' , help='Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number' , )
parser.add_argument(
'--eval_batch_size' , default=8 , type=_A , help='Batch size per GPU/CPU for evaluation.' , )
parser.add_argument(
'--recalculate' , help='Recalculate predictions even if the prediction file exists' , action='store_true' , )
parser.add_argument(
'--num_beams' , default=4 , type=_A , help='Number of beams to be used when generating answers' , )
parser.add_argument('--min_length' , default=1 , type=_A , help='Min length of the generated answers' )
parser.add_argument('--max_length' , default=50 , type=_A , help='Max length of the generated answers' )
parser.add_argument(
'--print_predictions' , action='store_true' , help='If True, prints predictions while evaluating.' , )
parser.add_argument(
'--print_docs' , action='store_true' , help='If True, prints docs retried while generating.' , )
snake_case__ = parser.parse_args()
snake_case__ = torch.device('cuda' if torch.cuda.is_available() else 'cpu' )
return args
def a_ ( _A ) -> Optional[Any]:
"""simple docstring"""
snake_case__ = {}
if args.model_type is None:
snake_case__ = infer_model_type(args.model_name_or_path )
assert args.model_type is not None
if args.model_type.startswith('rag' ):
snake_case__ = RagTokenForGeneration if args.model_type == 'rag_token' else RagSequenceForGeneration
snake_case__ = args.n_docs
if args.index_name is not None:
snake_case__ = args.index_name
if args.index_path is not None:
snake_case__ = args.index_path
else:
snake_case__ = BartForConditionalGeneration
snake_case__ = (
[f.path for f in os.scandir(args.model_name_or_path ) if f.is_dir()]
if args.eval_all_checkpoints
else [args.model_name_or_path]
)
logger.info('Evaluate the following checkpoints: %s' , _A )
snake_case__ = get_scores if args.eval_mode == 'e2e' else get_precision_at_k
snake_case__ = evaluate_batch_eae if args.eval_mode == 'e2e' else evaluate_batch_retrieval
for checkpoint in checkpoints:
if os.path.exists(args.predictions_path ) and (not args.recalculate):
logger.info('Calculating metrics based on an existing predictions file: {}'.format(args.predictions_path ) )
score_fn(_A , args.predictions_path , args.gold_data_path )
continue
logger.info('***** Running evaluation for {} *****'.format(_A ) )
logger.info(' Batch size = %d' , args.eval_batch_size )
logger.info(' Predictions will be stored under {}'.format(args.predictions_path ) )
if args.model_type.startswith('rag' ):
snake_case__ = RagRetriever.from_pretrained(_A , **_A )
snake_case__ = model_class.from_pretrained(_A , retriever=_A , **_A )
model.retriever.init_retrieval()
else:
snake_case__ = model_class.from_pretrained(_A , **_A )
model.to(args.device )
with open(args.evaluation_set , 'r' ) as eval_file, open(args.predictions_path , 'w' ) as preds_file:
snake_case__ = []
for line in tqdm(_A ):
questions.append(line.strip() )
if len(_A ) == args.eval_batch_size:
snake_case__ = evaluate_batch_fn(_A , _A , _A )
preds_file.write('\n'.join(_A ) + '\n' )
preds_file.flush()
snake_case__ = []
if len(_A ) > 0:
snake_case__ = evaluate_batch_fn(_A , _A , _A )
preds_file.write('\n'.join(_A ) )
preds_file.flush()
score_fn(_A , args.predictions_path , args.gold_data_path )
if __name__ == "__main__":
__UpperCamelCase : Optional[Any] = get_args()
main(args)
| 701
|
def a_ ( _A , _A ) -> float:
"""simple docstring"""
if digit_amount > 0:
return round(number - int(_A ) , _A )
return number - int(_A )
if __name__ == "__main__":
print(decimal_isolate(1.5_3, 0))
print(decimal_isolate(3_5.3_4_5, 1))
print(decimal_isolate(3_5.3_4_5, 2))
print(decimal_isolate(3_5.3_4_5, 3))
print(decimal_isolate(-1_4.7_8_9, 3))
print(decimal_isolate(0, 2))
print(decimal_isolate(-1_4.1_2_3, 1))
print(decimal_isolate(-1_4.1_2_3, 2))
print(decimal_isolate(-1_4.1_2_3, 3))
| 372
| 0
|
import unittest
from transformers import is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow
if is_flax_available():
import optax
from flax.training.common_utils import onehot
from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration
from transformers.models.ta.modeling_flax_ta import shift_tokens_right
@require_torch
@require_sentencepiece
@require_tokenizers
@require_flax
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCAmelCase ( self ) -> Any:
"""simple docstring"""
snake_case__ : Dict =FlaxMTaForConditionalGeneration.from_pretrained('''google/mt5-small''' )
snake_case__ : int =AutoTokenizer.from_pretrained('''google/mt5-small''' )
snake_case__ : str =tokenizer('''Hello there''' , return_tensors='''np''' ).input_ids
snake_case__ : Any =tokenizer('''Hi I am''' , return_tensors='''np''' ).input_ids
snake_case__ : Any =shift_tokens_right(__SCREAMING_SNAKE_CASE , model.config.pad_token_id , model.config.decoder_start_token_id )
snake_case__ : List[str] =model(__SCREAMING_SNAKE_CASE , decoder_input_ids=__SCREAMING_SNAKE_CASE ).logits
snake_case__ : str =optax.softmax_cross_entropy(__SCREAMING_SNAKE_CASE , onehot(__SCREAMING_SNAKE_CASE , logits.shape[-1] ) ).mean()
snake_case__ : Optional[int] =-(labels.shape[-1] * loss.item())
snake_case__ : List[Any] =-84.9127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1e-4 )
| 381
|
import unittest
from transformers import AlbertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
)
from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=13 , __SCREAMING_SNAKE_CASE=7 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=99 , __SCREAMING_SNAKE_CASE=16 , __SCREAMING_SNAKE_CASE=36 , __SCREAMING_SNAKE_CASE=6 , __SCREAMING_SNAKE_CASE=6 , __SCREAMING_SNAKE_CASE=6 , __SCREAMING_SNAKE_CASE=37 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=512 , __SCREAMING_SNAKE_CASE=16 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=None , ) -> List[Any]:
"""simple docstring"""
snake_case__ : Any =parent
snake_case__ : str =batch_size
snake_case__ : str =seq_length
snake_case__ : Any =is_training
snake_case__ : Optional[Any] =use_input_mask
snake_case__ : List[str] =use_token_type_ids
snake_case__ : int =use_labels
snake_case__ : Dict =vocab_size
snake_case__ : Union[str, Any] =embedding_size
snake_case__ : Union[str, Any] =hidden_size
snake_case__ : Dict =num_hidden_layers
snake_case__ : List[Any] =num_hidden_groups
snake_case__ : str =num_attention_heads
snake_case__ : List[str] =intermediate_size
snake_case__ : Dict =hidden_act
snake_case__ : Tuple =hidden_dropout_prob
snake_case__ : List[Any] =attention_probs_dropout_prob
snake_case__ : Any =max_position_embeddings
snake_case__ : int =type_vocab_size
snake_case__ : Union[str, Any] =type_sequence_label_size
snake_case__ : str =initializer_range
snake_case__ : Optional[Any] =num_labels
snake_case__ : Dict =num_choices
snake_case__ : List[str] =scope
def UpperCAmelCase ( self ) -> str:
"""simple docstring"""
snake_case__ : Dict =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case__ : Tuple =None
if self.use_input_mask:
snake_case__ : Any =random_attention_mask([self.batch_size, self.seq_length] )
snake_case__ : Union[str, Any] =None
if self.use_token_type_ids:
snake_case__ : List[Any] =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case__ : Optional[Any] =None
snake_case__ : Optional[Any] =None
snake_case__ : Optional[Any] =None
if self.use_labels:
snake_case__ : List[str] =ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case__ : List[Any] =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case__ : str =ids_tensor([self.batch_size] , self.num_choices )
snake_case__ : Dict =self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase ( self ) -> List[str]:
"""simple docstring"""
return AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , )
def UpperCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
snake_case__ : List[Any] =AlbertModel(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
snake_case__ : List[str] =model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[Any] =model(__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE )
snake_case__ : int =model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def UpperCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
snake_case__ : List[Any] =AlbertForPreTraining(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
snake_case__ : Optional[int] =model(
__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE , sentence_order_label=__SCREAMING_SNAKE_CASE , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels) )
def UpperCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> List[Any]:
"""simple docstring"""
snake_case__ : Optional[Any] =AlbertForMaskedLM(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
snake_case__ : Tuple =model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
"""simple docstring"""
snake_case__ : Dict =AlbertForQuestionAnswering(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
snake_case__ : Optional[Any] =model(
__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , start_positions=__SCREAMING_SNAKE_CASE , end_positions=__SCREAMING_SNAKE_CASE , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
snake_case__ : Optional[Any] =self.num_labels
snake_case__ : Any =AlbertForSequenceClassification(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
snake_case__ : Tuple =model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> List[str]:
"""simple docstring"""
snake_case__ : int =self.num_labels
snake_case__ : int =AlbertForTokenClassification(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
snake_case__ : Optional[Any] =model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
snake_case__ : Dict =self.num_choices
snake_case__ : Optional[Any] =AlbertForMultipleChoice(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
snake_case__ : Dict =input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case__ : int =token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case__ : int =input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case__ : Tuple =model(
__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCAmelCase ( self ) -> Any:
"""simple docstring"""
snake_case__ : Optional[Any] =self.prepare_config_and_inputs()
(
(
snake_case__
), (
snake_case__
), (
snake_case__
), (
snake_case__
), (
snake_case__
), (
snake_case__
), (
snake_case__
),
) : List[str] =config_and_inputs
snake_case__ : Union[str, Any] ={'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class _lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ =(
(
AlbertModel,
AlbertForPreTraining,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertForQuestionAnswering,
)
if is_torch_available()
else ()
)
lowerCAmelCase__ =(
{
'''feature-extraction''': AlbertModel,
'''fill-mask''': AlbertForMaskedLM,
'''question-answering''': AlbertForQuestionAnswering,
'''text-classification''': AlbertForSequenceClassification,
'''token-classification''': AlbertForTokenClassification,
'''zero-shot''': AlbertForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCAmelCase__ =True
def UpperCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=False ) -> Optional[Any]:
"""simple docstring"""
snake_case__ : int =super()._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , return_labels=__SCREAMING_SNAKE_CASE )
if return_labels:
if model_class in get_values(__SCREAMING_SNAKE_CASE ):
snake_case__ : Dict =torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=__SCREAMING_SNAKE_CASE )
snake_case__ : List[str] =torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__SCREAMING_SNAKE_CASE )
return inputs_dict
def UpperCAmelCase ( self ) -> Dict:
"""simple docstring"""
snake_case__ : str =AlbertModelTester(self )
snake_case__ : Optional[Any] =ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , hidden_size=37 )
def UpperCAmelCase ( self ) -> List[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCAmelCase ( self ) -> Dict:
"""simple docstring"""
snake_case__ : Optional[int] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self ) -> Optional[Any]:
"""simple docstring"""
snake_case__ : Dict =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*__SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self ) -> List[Any]:
"""simple docstring"""
snake_case__ : int =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self ) -> Optional[Any]:
"""simple docstring"""
snake_case__ : Optional[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self ) -> Optional[Any]:
"""simple docstring"""
snake_case__ : Tuple =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self ) -> Any:
"""simple docstring"""
snake_case__ : Tuple =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self ) -> Any:
"""simple docstring"""
snake_case__ : Optional[Any] =self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
snake_case__ : List[Any] =type
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE )
@slow
def UpperCAmelCase ( self ) -> Optional[Any]:
"""simple docstring"""
for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case__ : Optional[Any] =AlbertModel.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
@require_torch
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCAmelCase ( self ) -> int:
"""simple docstring"""
snake_case__ : Dict =AlbertModel.from_pretrained('''albert-base-v2''' )
snake_case__ : Dict =torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
snake_case__ : List[Any] =torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
snake_case__ : Union[str, Any] =model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE )[0]
snake_case__ : Dict =torch.Size((1, 11, 768) )
self.assertEqual(output.shape , __SCREAMING_SNAKE_CASE )
snake_case__ : Optional[Any] =torch.tensor(
[[[-0.6513, 1.5035, -0.2766], [-0.6515, 1.5046, -0.2780], [-0.6512, 1.5049, -0.2784]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __SCREAMING_SNAKE_CASE , atol=1e-4 ) )
| 381
| 1
|
import pyarrow.parquet as pq
import pytest
from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config
from datasets.features.image import Image
from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def _snake_case ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
assert isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def _snake_case ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Optional[Any]:
"""simple docstring"""
_lowerCAmelCase : Union[str, Any] = tmp_path / "cache"
_lowerCAmelCase : List[str] = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_lowerCAmelCase : int = ParquetDatasetReader(SCREAMING_SNAKE_CASE , cache_dir=SCREAMING_SNAKE_CASE , keep_in_memory=SCREAMING_SNAKE_CASE ).read()
_check_parquet_dataset(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
@pytest.mark.parametrize(
"features" , [
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
] , )
def _snake_case ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
"""simple docstring"""
_lowerCAmelCase : Any = tmp_path / "cache"
_lowerCAmelCase : str = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
_lowerCAmelCase : Dict = features.copy() if features else default_expected_features
_lowerCAmelCase : Dict = (
Features({feature: Value(SCREAMING_SNAKE_CASE ) for feature, dtype in features.items()} ) if features is not None else None
)
_lowerCAmelCase : List[Any] = ParquetDatasetReader(SCREAMING_SNAKE_CASE , features=SCREAMING_SNAKE_CASE , cache_dir=SCREAMING_SNAKE_CASE ).read()
_check_parquet_dataset(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def _snake_case ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
_lowerCAmelCase : str = tmp_path / "cache"
_lowerCAmelCase : int = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
_lowerCAmelCase : Tuple = ParquetDatasetReader(SCREAMING_SNAKE_CASE , cache_dir=SCREAMING_SNAKE_CASE , split=SCREAMING_SNAKE_CASE ).read()
_check_parquet_dataset(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("path_type" , [str, list] )
def _snake_case ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
if issubclass(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
_lowerCAmelCase : Any = parquet_path
elif issubclass(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
_lowerCAmelCase : Tuple = [parquet_path]
_lowerCAmelCase : Any = tmp_path / "cache"
_lowerCAmelCase : Optional[Any] = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
_lowerCAmelCase : Any = ParquetDatasetReader(SCREAMING_SNAKE_CASE , cache_dir=SCREAMING_SNAKE_CASE ).read()
_check_parquet_dataset(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def _snake_case ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=("train",) ) -> Tuple:
"""simple docstring"""
assert isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
for split in splits:
_lowerCAmelCase : Optional[int] = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def _snake_case ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> List[str]:
"""simple docstring"""
_lowerCAmelCase : Union[str, Any] = tmp_path / "cache"
_lowerCAmelCase : Tuple = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_lowerCAmelCase : Optional[int] = ParquetDatasetReader(
{"train": parquet_path} , cache_dir=SCREAMING_SNAKE_CASE , keep_in_memory=SCREAMING_SNAKE_CASE ).read()
_check_parquet_datasetdict(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
@pytest.mark.parametrize(
"features" , [
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
] , )
def _snake_case ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
_lowerCAmelCase : Optional[int] = tmp_path / "cache"
_lowerCAmelCase : Optional[Any] = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
_lowerCAmelCase : List[Any] = features.copy() if features else default_expected_features
_lowerCAmelCase : Tuple = (
Features({feature: Value(SCREAMING_SNAKE_CASE ) for feature, dtype in features.items()} ) if features is not None else None
)
_lowerCAmelCase : Optional[Any] = ParquetDatasetReader({"train": parquet_path} , features=SCREAMING_SNAKE_CASE , cache_dir=SCREAMING_SNAKE_CASE ).read()
_check_parquet_datasetdict(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def _snake_case ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
if split:
_lowerCAmelCase : str = {split: parquet_path}
else:
_lowerCAmelCase : str = "train"
_lowerCAmelCase : Any = {"train": parquet_path, "test": parquet_path}
_lowerCAmelCase : Union[str, Any] = tmp_path / "cache"
_lowerCAmelCase : List[Any] = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
_lowerCAmelCase : Optional[Any] = ParquetDatasetReader(SCREAMING_SNAKE_CASE , cache_dir=SCREAMING_SNAKE_CASE ).read()
_check_parquet_datasetdict(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def _snake_case ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
_lowerCAmelCase : Any = ParquetDatasetWriter(SCREAMING_SNAKE_CASE , tmp_path / "foo.parquet" )
assert writer.write() > 0
_lowerCAmelCase : Any = pq.ParquetFile(tmp_path / "foo.parquet" )
_lowerCAmelCase : Dict = pf.read()
assert dataset.data.table == output_table
def _snake_case ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Optional[Any]:
"""simple docstring"""
_lowerCAmelCase : int = str(shared_datadir / "test_image_rgb.jpg" )
_lowerCAmelCase : str = {"image": [image_path]}
_lowerCAmelCase : Any = Features({"image": Image()} )
_lowerCAmelCase : List[str] = Dataset.from_dict(SCREAMING_SNAKE_CASE , features=SCREAMING_SNAKE_CASE )
_lowerCAmelCase : str = ParquetDatasetWriter(SCREAMING_SNAKE_CASE , tmp_path / "foo.parquet" )
assert writer.write() > 0
_lowerCAmelCase : str = Dataset.from_parquet(str(tmp_path / "foo.parquet" ) )
assert dataset.features == reloaded_dataset.features
_lowerCAmelCase : str = ParquetDatasetReader(str(tmp_path / "foo.parquet" ) , streaming=SCREAMING_SNAKE_CASE ).read()
assert dataset.features == reloaded_iterable_dataset.features
@pytest.mark.parametrize(
"feature, expected" , [
(Features({"foo": Value("int32" )} ), None),
(Features({"image": Image(), "foo": Value("int32" )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS),
(Features({"nested": Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS),
] , )
def _snake_case ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> List[str]:
"""simple docstring"""
assert get_writer_batch_size(SCREAMING_SNAKE_CASE ) == expected
| 700
|
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
__UpperCAmelCase = '\nHugging Face was founded in 2016 by French entrepreneurs Clément Delangue, Julien Chaumond, and Thomas Wolf originally as a company that developed a chatbot app targeted at teenagers.[2] After open-sourcing the model behind the chatbot, the company pivoted to focus on being a platform for machine learning.\n\nIn March 2021, Hugging Face raised $40 million in a Series B funding round.[3]\n\nOn April 28, 2021, the company launched the BigScience Research Workshop in collaboration with several other research groups to release an open large language model.[4] In 2022, the workshop concluded with the announcement of BLOOM, a multilingual large language model with 176 billion parameters.[5]\n'
class A__ ( unittest.TestCase , A ):
"""simple docstring"""
def __magic_name__ ( self : Tuple ):
'''simple docstring'''
_lowerCAmelCase : Dict = load_tool("text-question-answering" )
self.tool.setup()
_lowerCAmelCase : int = load_tool("text-question-answering" , remote=A_ )
def __magic_name__ ( self : List[str] ):
'''simple docstring'''
_lowerCAmelCase : Dict = self.tool(A_ , "What did Hugging Face do in April 2021?" )
self.assertEqual(A_ , "launched the BigScience Research Workshop" )
def __magic_name__ ( self : Tuple ):
'''simple docstring'''
_lowerCAmelCase : int = self.remote_tool(A_ , "What did Hugging Face do in April 2021?" )
self.assertEqual(A_ , "launched the BigScience Research Workshop" )
def __magic_name__ ( self : Dict ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.tool(text=A_ , question="What did Hugging Face do in April 2021?" )
self.assertEqual(A_ , "launched the BigScience Research Workshop" )
def __magic_name__ ( self : Optional[Any] ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.remote_tool(text=A_ , question="What did Hugging Face do in April 2021?" )
self.assertEqual(A_ , "launched the BigScience Research Workshop" )
| 503
| 0
|
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class A_ ( __lowerCamelCase ):
'''simple docstring'''
_UpperCamelCase : Tuple = (DEISMultistepScheduler,)
_UpperCamelCase : List[str] = (("num_inference_steps", 25),)
def SCREAMING_SNAKE_CASE__ ( self , **snake_case ):
lowercase = {
'num_train_timesteps': 1000,
'beta_start': 0.0_001,
'beta_end': 0.02,
'beta_schedule': 'linear',
'solver_order': 2,
}
config.update(**SCREAMING_SNAKE_CASE__ )
return config
def SCREAMING_SNAKE_CASE__ ( self , snake_case=0 , **snake_case ):
lowercase = dict(self.forward_default_kwargs )
lowercase = kwargs.pop('num_inference_steps' , SCREAMING_SNAKE_CASE__ )
lowercase = self.dummy_sample
lowercase = 0.1 * sample
lowercase = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
lowercase = self.get_scheduler_config(**SCREAMING_SNAKE_CASE__ )
lowercase = scheduler_class(**SCREAMING_SNAKE_CASE__ )
scheduler.set_timesteps(SCREAMING_SNAKE_CASE__ )
# copy over dummy past residuals
lowercase = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(SCREAMING_SNAKE_CASE__ )
lowercase = scheduler_class.from_pretrained(SCREAMING_SNAKE_CASE__ )
new_scheduler.set_timesteps(SCREAMING_SNAKE_CASE__ )
# copy over dummy past residuals
lowercase = dummy_past_residuals[: new_scheduler.config.solver_order]
lowercase , lowercase = sample, sample
for t in range(SCREAMING_SNAKE_CASE__ , time_step + scheduler.config.solver_order + 1 ):
lowercase = scheduler.step(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ).prev_sample
lowercase = new_scheduler.step(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def SCREAMING_SNAKE_CASE__ ( self ):
pass
def SCREAMING_SNAKE_CASE__ ( self , snake_case=0 , **snake_case ):
lowercase = dict(self.forward_default_kwargs )
lowercase = kwargs.pop('num_inference_steps' , SCREAMING_SNAKE_CASE__ )
lowercase = self.dummy_sample
lowercase = 0.1 * sample
lowercase = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
lowercase = self.get_scheduler_config()
lowercase = scheduler_class(**SCREAMING_SNAKE_CASE__ )
scheduler.set_timesteps(SCREAMING_SNAKE_CASE__ )
# copy over dummy past residuals (must be after setting timesteps)
lowercase = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(SCREAMING_SNAKE_CASE__ )
lowercase = scheduler_class.from_pretrained(SCREAMING_SNAKE_CASE__ )
# copy over dummy past residuals
new_scheduler.set_timesteps(SCREAMING_SNAKE_CASE__ )
# copy over dummy past residual (must be after setting timesteps)
lowercase = dummy_past_residuals[: new_scheduler.config.solver_order]
lowercase = scheduler.step(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ).prev_sample
lowercase = new_scheduler.step(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def SCREAMING_SNAKE_CASE__ ( self , snake_case=None , **snake_case ):
if scheduler is None:
lowercase = self.scheduler_classes[0]
lowercase = self.get_scheduler_config(**SCREAMING_SNAKE_CASE__ )
lowercase = scheduler_class(**SCREAMING_SNAKE_CASE__ )
lowercase = self.scheduler_classes[0]
lowercase = self.get_scheduler_config(**SCREAMING_SNAKE_CASE__ )
lowercase = scheduler_class(**SCREAMING_SNAKE_CASE__ )
lowercase = 10
lowercase = self.dummy_model()
lowercase = self.dummy_sample_deter
scheduler.set_timesteps(SCREAMING_SNAKE_CASE__ )
for i, t in enumerate(scheduler.timesteps ):
lowercase = model(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowercase = scheduler.step(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ).prev_sample
return sample
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = dict(self.forward_default_kwargs )
lowercase = kwargs.pop('num_inference_steps' , SCREAMING_SNAKE_CASE__ )
for scheduler_class in self.scheduler_classes:
lowercase = self.get_scheduler_config()
lowercase = scheduler_class(**SCREAMING_SNAKE_CASE__ )
lowercase = self.dummy_sample
lowercase = 0.1 * sample
if num_inference_steps is not None and hasattr(SCREAMING_SNAKE_CASE__ , 'set_timesteps' ):
scheduler.set_timesteps(SCREAMING_SNAKE_CASE__ )
elif num_inference_steps is not None and not hasattr(SCREAMING_SNAKE_CASE__ , 'set_timesteps' ):
lowercase = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
lowercase = [residual + 0.2, residual + 0.15, residual + 0.10]
lowercase = dummy_past_residuals[: scheduler.config.solver_order]
lowercase = scheduler.timesteps[5]
lowercase = scheduler.timesteps[6]
lowercase = scheduler.step(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ).prev_sample
lowercase = scheduler.step(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = DEISMultistepScheduler(**self.get_scheduler_config() )
lowercase = self.full_loop(scheduler=SCREAMING_SNAKE_CASE__ )
lowercase = torch.mean(torch.abs(SCREAMING_SNAKE_CASE__ ) )
assert abs(result_mean.item() - 0.23_916 ) < 1E-3
lowercase = DPMSolverSinglestepScheduler.from_config(scheduler.config )
lowercase = DPMSolverMultistepScheduler.from_config(scheduler.config )
lowercase = UniPCMultistepScheduler.from_config(scheduler.config )
lowercase = DEISMultistepScheduler.from_config(scheduler.config )
lowercase = self.full_loop(scheduler=SCREAMING_SNAKE_CASE__ )
lowercase = torch.mean(torch.abs(SCREAMING_SNAKE_CASE__ ) )
assert abs(result_mean.item() - 0.23_916 ) < 1E-3
def SCREAMING_SNAKE_CASE__ ( self ):
for timesteps in [25, 50, 100, 999, 1000]:
self.check_over_configs(num_train_timesteps=SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE__ ( self ):
self.check_over_configs(thresholding=SCREAMING_SNAKE_CASE__ )
for order in [1, 2, 3]:
for solver_type in ["logrho"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=SCREAMING_SNAKE_CASE__ , prediction_type=SCREAMING_SNAKE_CASE__ , sample_max_value=SCREAMING_SNAKE_CASE__ , algorithm_type='deis' , solver_order=SCREAMING_SNAKE_CASE__ , solver_type=SCREAMING_SNAKE_CASE__ , )
def SCREAMING_SNAKE_CASE__ ( self ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE__ ( self ):
for algorithm_type in ["deis"]:
for solver_type in ["logrho"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=SCREAMING_SNAKE_CASE__ , solver_type=SCREAMING_SNAKE_CASE__ , prediction_type=SCREAMING_SNAKE_CASE__ , algorithm_type=SCREAMING_SNAKE_CASE__ , )
lowercase = self.full_loop(
solver_order=SCREAMING_SNAKE_CASE__ , solver_type=SCREAMING_SNAKE_CASE__ , prediction_type=SCREAMING_SNAKE_CASE__ , algorithm_type=SCREAMING_SNAKE_CASE__ , )
assert not torch.isnan(SCREAMING_SNAKE_CASE__ ).any(), "Samples have nan numbers"
def SCREAMING_SNAKE_CASE__ ( self ):
self.check_over_configs(lower_order_final=SCREAMING_SNAKE_CASE__ )
self.check_over_configs(lower_order_final=SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE__ ( self ):
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]:
self.check_over_forward(num_inference_steps=SCREAMING_SNAKE_CASE__ , time_step=0 )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.full_loop()
lowercase = torch.mean(torch.abs(SCREAMING_SNAKE_CASE__ ) )
assert abs(result_mean.item() - 0.23_916 ) < 1E-3
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.full_loop(prediction_type='v_prediction' )
lowercase = torch.mean(torch.abs(SCREAMING_SNAKE_CASE__ ) )
assert abs(result_mean.item() - 0.091 ) < 1E-3
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.scheduler_classes[0]
lowercase = self.get_scheduler_config(thresholding=SCREAMING_SNAKE_CASE__ , dynamic_thresholding_ratio=0 )
lowercase = scheduler_class(**SCREAMING_SNAKE_CASE__ )
lowercase = 10
lowercase = self.dummy_model()
lowercase = self.dummy_sample_deter.half()
scheduler.set_timesteps(SCREAMING_SNAKE_CASE__ )
for i, t in enumerate(scheduler.timesteps ):
lowercase = model(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowercase = scheduler.step(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ).prev_sample
assert sample.dtype == torch.floataa
| 84
|
def __lowerCamelCase ( _lowercase ) -> str:
return "".join(chr(ord(_lowercase ) - 32 ) if 'a' <= char <= 'z' else char for char in word )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 282
| 0
|
import math
class lowerCamelCase_ :
def lowercase ( self , lowerCamelCase_ , lowerCamelCase_ ) -> int:
"""simple docstring"""
_UpperCamelCase = 0.0
_UpperCamelCase = 0.0
for i in range(len(__a ) ):
da += math.pow((sample[i] - weights[0][i]) , 2 )
da += math.pow((sample[i] - weights[1][i]) , 2 )
return 0 if da > da else 1
return 0
def lowercase ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> list[list[int | float]]:
"""simple docstring"""
for i in range(len(__a ) ):
weights[j][i] += alpha * (sample[i] - weights[j][i])
return weights
def _lowercase ( ) -> None:
"""simple docstring"""
_UpperCamelCase = [[1, 1, 0, 0], [0, 0, 0, 1], [1, 0, 0, 0], [0, 0, 1, 1]]
# weight initialization ( n, C )
_UpperCamelCase = [[0.2, 0.6, 0.5, 0.9], [0.8, 0.4, 0.7, 0.3]]
# training
_UpperCamelCase = SelfOrganizingMap()
_UpperCamelCase = 3
_UpperCamelCase = 0.5
for _ in range(lowercase_ ):
for j in range(len(lowercase_ ) ):
# training sample
_UpperCamelCase = training_samples[j]
# Compute the winning vector
_UpperCamelCase = self_organizing_map.get_winner(lowercase_ , lowercase_ )
# Update the winning vector
_UpperCamelCase = self_organizing_map.update(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
# classify test sample
_UpperCamelCase = [0, 0, 0, 1]
_UpperCamelCase = self_organizing_map.get_winner(lowercase_ , lowercase_ )
# results
print(f'''Clusters that the test sample belongs to : {winner}''' )
print(f'''Weights that have been trained : {weights}''' )
# running the main() function
if __name__ == "__main__":
main()
| 707
|
import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
__lowerCAmelCase = imread(r"""digital_image_processing/image_data/lena_small.jpg""")
__lowerCAmelCase = cvtColor(img, COLOR_BGR2GRAY)
def _lowercase ( ) -> List[str]:
"""simple docstring"""
_UpperCamelCase = cn.convert_to_negative(a__ )
# assert negative_img array for at least one True
assert negative_img.any()
def _lowercase ( ) -> Union[str, Any]:
"""simple docstring"""
with Image.open("digital_image_processing/image_data/lena_small.jpg" ) as img:
# Work around assertion for response
assert str(cc.change_contrast(a__ , 1_10 ) ).startswith(
"<PIL.Image.Image image mode=RGB size=100x100 at" )
def _lowercase ( ) -> Any:
"""simple docstring"""
_UpperCamelCase = canny.gen_gaussian_kernel(9 , sigma=1.4 )
# Assert ambiguous array
assert resp.all()
def _lowercase ( ) -> List[str]:
"""simple docstring"""
_UpperCamelCase = imread("digital_image_processing/image_data/lena_small.jpg" , 0 )
# assert ambiguous array for all == True
assert canny_img.all()
_UpperCamelCase = canny.canny(a__ )
# assert canny array for at least one True
assert canny_array.any()
def _lowercase ( ) -> Tuple:
"""simple docstring"""
assert gg.gaussian_filter(a__ , 5 , sigma=0.9 ).all()
def _lowercase ( ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] )
_UpperCamelCase = conv.img_convolve(a__ , a__ ).astype(a__ )
assert res.any()
def _lowercase ( ) -> int:
"""simple docstring"""
assert med.median_filter(a__ , 3 ).any()
def _lowercase ( ) -> Tuple:
"""simple docstring"""
_UpperCamelCase , _UpperCamelCase = sob.sobel_filter(a__ )
assert grad.any() and theta.any()
def _lowercase ( ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = sp.make_sepia(a__ , 20 )
assert sepia.all()
def _lowercase ( a__ : str = "digital_image_processing/image_data/lena_small.jpg" ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = bs.Burkes(imread(a__ , 1 ) , 1_20 )
burkes.process()
assert burkes.output_img.any()
def _lowercase ( a__ : str = "digital_image_processing/image_data/lena_small.jpg" , ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = rs.NearestNeighbour(imread(a__ , 1 ) , 4_00 , 2_00 )
nn.process()
assert nn.output.any()
def _lowercase ( ) -> Any:
"""simple docstring"""
_UpperCamelCase = "digital_image_processing/image_data/lena.jpg"
# Reading the image and converting it to grayscale.
_UpperCamelCase = imread(a__ , 0 )
# Test for get_neighbors_pixel function() return not None
_UpperCamelCase = 0
_UpperCamelCase = 0
_UpperCamelCase = image[x_coordinate][y_coordinate]
_UpperCamelCase = lbp.get_neighbors_pixel(
a__ , a__ , a__ , a__ )
assert neighbors_pixels is not None
# Test for local_binary_pattern function()
# Create a numpy array as the same height and width of read image
_UpperCamelCase = np.zeros((image.shape[0], image.shape[1]) )
# Iterating through the image and calculating the local binary pattern value
# for each pixel.
for i in range(0 , image.shape[0] ):
for j in range(0 , image.shape[1] ):
_UpperCamelCase = lbp.local_binary_value(a__ , a__ , a__ )
assert lbp_image.any()
| 589
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_speech_available, is_torch_available
__lowercase : str ={
"""configuration_audio_spectrogram_transformer""": [
"""AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""ASTConfig""",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : Optional[Any] =[
"""AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ASTForAudioClassification""",
"""ASTModel""",
"""ASTPreTrainedModel""",
]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : Dict =["""ASTFeatureExtractor"""]
if TYPE_CHECKING:
from .configuration_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
ASTConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ASTForAudioClassification,
ASTModel,
ASTPreTrainedModel,
)
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_audio_spectrogram_transformer import ASTFeatureExtractor
else:
import sys
__lowercase : List[str] =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 54
|
'''simple docstring'''
import argparse
import json
import os
import pickle
import shutil
import numpy as np
import torch
from distiller import Distiller
from lm_seqs_dataset import LmSeqsDataset
from transformers import (
BertConfig,
BertForMaskedLM,
BertTokenizer,
DistilBertConfig,
DistilBertForMaskedLM,
DistilBertTokenizer,
GPTaConfig,
GPTaLMHeadModel,
GPTaTokenizer,
RobertaConfig,
RobertaForMaskedLM,
RobertaTokenizer,
)
from utils import git_log, init_gpu_params, logger, set_seed
__A ={
'distilbert': (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer),
'roberta': (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer),
'bert': (BertConfig, BertForMaskedLM, BertTokenizer),
'gpt2': (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer),
}
def _UpperCamelCase ( UpperCamelCase__ ):
assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0)
assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0)
if args.mlm:
assert os.path.isfile(args.token_counts )
assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"])
else:
assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"])
assert args.teacher_type == args.student_type or (
args.student_type == "distilbert" and args.teacher_type == "bert"
)
assert os.path.isfile(args.student_config )
if args.student_pretrained_weights is not None:
assert os.path.isfile(args.student_pretrained_weights )
if args.freeze_token_type_embds:
assert args.student_type in ["roberta"]
assert args.alpha_ce >= 0.0
assert args.alpha_mlm >= 0.0
assert args.alpha_clm >= 0.0
assert args.alpha_mse >= 0.0
assert args.alpha_cos >= 0.0
assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
if args.student_type == "roberta":
UpperCAmelCase__ : Optional[Any] = False
elif args.student_type == "gpt2":
UpperCAmelCase__ : str = False
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
if args.student_type == "roberta":
UpperCAmelCase__ : str = False
def _UpperCamelCase ( ):
UpperCAmelCase__ : Union[str, Any] = argparse.ArgumentParser(description="""Training""" )
parser.add_argument("""--force""" , action="""store_true""" , help="""Overwrite dump_path if it already exists.""" )
parser.add_argument(
"""--dump_path""" , type=UpperCamelCase__ , required=UpperCamelCase__ , help="""The output directory (log, checkpoints, parameters, etc.)""" )
parser.add_argument(
"""--data_file""" , type=UpperCamelCase__ , required=UpperCamelCase__ , help="""The binarized file (tokenized + tokens_to_ids) and grouped by sequence.""" , )
parser.add_argument(
"""--student_type""" , type=UpperCamelCase__ , choices=["""distilbert""", """roberta""", """gpt2"""] , required=UpperCamelCase__ , help="""The student type (DistilBERT, RoBERTa).""" , )
parser.add_argument("""--student_config""" , type=UpperCamelCase__ , required=UpperCamelCase__ , help="""Path to the student configuration.""" )
parser.add_argument(
"""--student_pretrained_weights""" , default=UpperCamelCase__ , type=UpperCamelCase__ , help="""Load student initialization checkpoint.""" )
parser.add_argument(
"""--teacher_type""" , choices=["""bert""", """roberta""", """gpt2"""] , required=UpperCamelCase__ , help="""Teacher type (BERT, RoBERTa).""" )
parser.add_argument("""--teacher_name""" , type=UpperCamelCase__ , required=UpperCamelCase__ , help="""The teacher model.""" )
parser.add_argument("""--temperature""" , default=2.0 , type=UpperCamelCase__ , help="""Temperature for the softmax temperature.""" )
parser.add_argument(
"""--alpha_ce""" , default=0.5 , type=UpperCamelCase__ , help="""Linear weight for the distillation loss. Must be >=0.""" )
parser.add_argument(
"""--alpha_mlm""" , default=0.0 , type=UpperCamelCase__ , help="""Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag.""" , )
parser.add_argument("""--alpha_clm""" , default=0.5 , type=UpperCamelCase__ , help="""Linear weight for the CLM loss. Must be >=0.""" )
parser.add_argument("""--alpha_mse""" , default=0.0 , type=UpperCamelCase__ , help="""Linear weight of the MSE loss. Must be >=0.""" )
parser.add_argument(
"""--alpha_cos""" , default=0.0 , type=UpperCamelCase__ , help="""Linear weight of the cosine embedding loss. Must be >=0.""" )
parser.add_argument(
"""--mlm""" , action="""store_true""" , help="""The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM.""" )
parser.add_argument(
"""--mlm_mask_prop""" , default=0.15 , type=UpperCamelCase__ , help="""Proportion of tokens for which we need to make a prediction.""" , )
parser.add_argument("""--word_mask""" , default=0.8 , type=UpperCamelCase__ , help="""Proportion of tokens to mask out.""" )
parser.add_argument("""--word_keep""" , default=0.1 , type=UpperCamelCase__ , help="""Proportion of tokens to keep.""" )
parser.add_argument("""--word_rand""" , default=0.1 , type=UpperCamelCase__ , help="""Proportion of tokens to randomly replace.""" )
parser.add_argument(
"""--mlm_smoothing""" , default=0.7 , type=UpperCamelCase__ , help="""Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec).""" , )
parser.add_argument("""--token_counts""" , type=UpperCamelCase__ , help="""The token counts in the data_file for MLM.""" )
parser.add_argument(
"""--restrict_ce_to_mask""" , action="""store_true""" , help="""If true, compute the distillation loss only the [MLM] prediction distribution.""" , )
parser.add_argument(
"""--freeze_pos_embs""" , action="""store_true""" , help="""Freeze positional embeddings during distillation. For student_type in ['roberta', 'gpt2'] only.""" , )
parser.add_argument(
"""--freeze_token_type_embds""" , action="""store_true""" , help="""Freeze token type embeddings during distillation if existent. For student_type in ['roberta'] only.""" , )
parser.add_argument("""--n_epoch""" , type=UpperCamelCase__ , default=3 , help="""Number of pass on the whole dataset.""" )
parser.add_argument("""--batch_size""" , type=UpperCamelCase__ , default=5 , help="""Batch size (for each process).""" )
parser.add_argument(
"""--group_by_size""" , action="""store_false""" , help="""If true, group sequences that have similar length into the same batch. Default is true.""" , )
parser.add_argument(
"""--gradient_accumulation_steps""" , type=UpperCamelCase__ , default=5_0 , help="""Gradient accumulation for larger training batches.""" , )
parser.add_argument("""--warmup_prop""" , default=0.05 , type=UpperCamelCase__ , help="""Linear warmup proportion.""" )
parser.add_argument("""--weight_decay""" , default=0.0 , type=UpperCamelCase__ , help="""Weight decay if we apply some.""" )
parser.add_argument("""--learning_rate""" , default=5e-4 , type=UpperCamelCase__ , help="""The initial learning rate for Adam.""" )
parser.add_argument("""--adam_epsilon""" , default=1e-6 , type=UpperCamelCase__ , help="""Epsilon for Adam optimizer.""" )
parser.add_argument("""--max_grad_norm""" , default=5.0 , type=UpperCamelCase__ , help="""Max gradient norm.""" )
parser.add_argument("""--initializer_range""" , default=0.02 , type=UpperCamelCase__ , help="""Random initialization range.""" )
parser.add_argument(
"""--fp16""" , action="""store_true""" , help="""Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit""" , )
parser.add_argument(
"""--fp16_opt_level""" , type=UpperCamelCase__ , default="""O1""" , help=(
"""For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."""
"""See details at https://nvidia.github.io/apex/amp.html"""
) , )
parser.add_argument("""--n_gpu""" , type=UpperCamelCase__ , default=1 , help="""Number of GPUs in the node.""" )
parser.add_argument("""--local_rank""" , type=UpperCamelCase__ , default=-1 , help="""Distributed training - Local rank""" )
parser.add_argument("""--seed""" , type=UpperCamelCase__ , default=5_6 , help="""Random seed""" )
parser.add_argument("""--log_interval""" , type=UpperCamelCase__ , default=5_0_0 , help="""Tensorboard logging interval.""" )
parser.add_argument("""--checkpoint_interval""" , type=UpperCamelCase__ , default=4_0_0_0 , help="""Checkpoint interval.""" )
UpperCAmelCase__ : Dict = parser.parse_args()
sanity_checks(UpperCamelCase__ )
# ARGS #
init_gpu_params(UpperCamelCase__ )
set_seed(UpperCamelCase__ )
if args.is_master:
if os.path.exists(args.dump_path ):
if not args.force:
raise ValueError(
f'''Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite'''
""" itUse `--force` if you want to overwrite it""" )
else:
shutil.rmtree(args.dump_path )
if not os.path.exists(args.dump_path ):
os.makedirs(args.dump_path )
logger.info(f'''Experiment will be dumped and logged in {args.dump_path}''' )
# SAVE PARAMS #
logger.info(f'''Param: {args}''' )
with open(os.path.join(args.dump_path , """parameters.json""" ) , """w""" ) as f:
json.dump(vars(UpperCamelCase__ ) , UpperCamelCase__ , indent=4 )
git_log(args.dump_path )
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = MODEL_CLASSES[args.student_type]
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = MODEL_CLASSES[args.teacher_type]
# TOKENIZER #
UpperCAmelCase__ : Optional[Any] = teacher_tokenizer_class.from_pretrained(args.teacher_name )
UpperCAmelCase__ : Any = {}
for tok_name, tok_symbol in tokenizer.special_tokens_map.items():
UpperCAmelCase__ : Any = tokenizer.all_special_tokens.index(UpperCamelCase__ )
UpperCAmelCase__ : str = tokenizer.all_special_ids[idx]
logger.info(f'''Special tokens {special_tok_ids}''' )
UpperCAmelCase__ : Any = special_tok_ids
UpperCAmelCase__ : str = tokenizer.max_model_input_sizes[args.teacher_name]
# DATA LOADER #
logger.info(f'''Loading data from {args.data_file}''' )
with open(args.data_file , """rb""" ) as fp:
UpperCAmelCase__ : str = pickle.load(UpperCamelCase__ )
if args.mlm:
logger.info(f'''Loading token counts from {args.token_counts} (already pre-computed)''' )
with open(args.token_counts , """rb""" ) as fp:
UpperCAmelCase__ : int = pickle.load(UpperCamelCase__ )
UpperCAmelCase__ : Optional[Any] = np.maximum(UpperCamelCase__ , 1 ) ** -args.mlm_smoothing
for idx in special_tok_ids.values():
UpperCAmelCase__ : Tuple = 0.0 # do not predict special tokens
UpperCAmelCase__ : Any = torch.from_numpy(UpperCamelCase__ )
else:
UpperCAmelCase__ : Tuple = None
UpperCAmelCase__ : Optional[int] = LmSeqsDataset(params=UpperCamelCase__ , data=UpperCamelCase__ )
logger.info("""Data loader created.""" )
# STUDENT #
logger.info(f'''Loading student config from {args.student_config}''' )
UpperCAmelCase__ : int = student_config_class.from_pretrained(args.student_config )
UpperCAmelCase__ : Optional[int] = True
if args.student_pretrained_weights is not None:
logger.info(f'''Loading pretrained weights from {args.student_pretrained_weights}''' )
UpperCAmelCase__ : str = student_model_class.from_pretrained(args.student_pretrained_weights , config=UpperCamelCase__ )
else:
UpperCAmelCase__ : Any = student_model_class(UpperCamelCase__ )
if args.n_gpu > 0:
student.to(f'''cuda:{args.local_rank}''' )
logger.info("""Student loaded.""" )
# TEACHER #
UpperCAmelCase__ : int = teacher_model_class.from_pretrained(args.teacher_name , output_hidden_states=UpperCamelCase__ )
if args.n_gpu > 0:
teacher.to(f'''cuda:{args.local_rank}''' )
logger.info(f'''Teacher loaded from {args.teacher_name}.''' )
# FREEZING #
if args.freeze_pos_embs:
freeze_pos_embeddings(UpperCamelCase__ , UpperCamelCase__ )
if args.freeze_token_type_embds:
freeze_token_type_embeddings(UpperCamelCase__ , UpperCamelCase__ )
# SANITY CHECKS #
assert student.config.vocab_size == teacher.config.vocab_size
assert student.config.hidden_size == teacher.config.hidden_size
assert student.config.max_position_embeddings == teacher.config.max_position_embeddings
if args.mlm:
assert token_probs.size(0 ) == stu_architecture_config.vocab_size
# DISTILLER #
torch.cuda.empty_cache()
UpperCAmelCase__ : Optional[Any] = Distiller(
params=UpperCamelCase__ , dataset=UpperCamelCase__ , token_probs=UpperCamelCase__ , student=UpperCamelCase__ , teacher=UpperCamelCase__ )
distiller.train()
logger.info("""Let's go get some drinks.""" )
if __name__ == "__main__":
main()
| 407
| 0
|
import inspect
import math
import tempfile
import unittest
import numpy as np
from transformers import ViTMAEConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMAEForPreTraining, ViTMAEModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class A_ :
def __init__( self : Union[str, Any] , snake_case__ : Optional[Any] , snake_case__ : List[str]=13 , snake_case__ : int=30 , snake_case__ : int=2 , snake_case__ : List[str]=3 , snake_case__ : List[str]=True , snake_case__ : Union[str, Any]=True , snake_case__ : str=32 , snake_case__ : Dict=5 , snake_case__ : int=4 , snake_case__ : Dict=37 , snake_case__ : Optional[int]="gelu" , snake_case__ : Tuple=0.1 , snake_case__ : Union[str, Any]=0.1 , snake_case__ : List[Any]=10 , snake_case__ : Optional[int]=0.02 , snake_case__ : Union[str, Any]=3 , snake_case__ : str=0.6 , snake_case__ : Optional[Any]=None , ):
lowercase = parent
lowercase = batch_size
lowercase = image_size
lowercase = patch_size
lowercase = num_channels
lowercase = is_training
lowercase = use_labels
lowercase = hidden_size
lowercase = num_hidden_layers
lowercase = num_attention_heads
lowercase = intermediate_size
lowercase = hidden_act
lowercase = hidden_dropout_prob
lowercase = attention_probs_dropout_prob
lowercase = type_sequence_label_size
lowercase = initializer_range
lowercase = mask_ratio
lowercase = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
lowercase = (image_size // patch_size) ** 2
lowercase = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def SCREAMING_SNAKE_CASE__ ( self : int ):
lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase = None
if self.use_labels:
lowercase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=snake_case__ , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , snake_case__ : Optional[int] , snake_case__ : str , snake_case__ : Dict ):
lowercase = ViTMAEModel(config=snake_case__ )
model.to(snake_case__ )
model.eval()
lowercase = model(snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , snake_case__ : List[Any] , snake_case__ : Tuple , snake_case__ : str ):
lowercase = ViTMAEForPreTraining(snake_case__ )
model.to(snake_case__ )
model.eval()
lowercase = model(snake_case__ )
lowercase = (self.image_size // self.patch_size) ** 2
lowercase = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
lowercase = 1
lowercase = ViTMAEForPreTraining(snake_case__ )
model.to(snake_case__ )
model.eval()
lowercase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowercase = model(snake_case__ )
lowercase = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
lowercase = self.prepare_config_and_inputs()
lowercase , lowercase , lowercase = config_and_inputs
lowercase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class A_ ( __a , __a , unittest.TestCase ):
_A :int = (ViTMAEModel, ViTMAEForPreTraining) if is_torch_available() else ()
_A :Any = {'''feature-extraction''': ViTMAEModel} if is_torch_available() else {}
_A :Dict = False
_A :Tuple = False
_A :List[Any] = False
_A :List[str] = False
def SCREAMING_SNAKE_CASE__ ( self : str ):
lowercase = ViTMAEModelTester(self )
lowercase = ConfigTester(self , config_class=snake_case__ , has_text_modality=snake_case__ , hidden_size=37 )
def SCREAMING_SNAKE_CASE__ ( self : Any ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""ViTMAE does not use inputs_embeds""" )
def SCREAMING_SNAKE_CASE__ ( self : str ):
pass
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase = model_class(snake_case__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowercase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(snake_case__ , nn.Linear ) )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase = model_class(snake_case__ )
lowercase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase = [*signature.parameters.keys()]
lowercase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , snake_case__ )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case__ )
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*snake_case__ )
def SCREAMING_SNAKE_CASE__ ( self : List[str] , snake_case__ : int , snake_case__ : Any , snake_case__ : Optional[int] ):
# make masks reproducible
np.random.seed(2 )
lowercase = int((pt_model.config.image_size // pt_model.config.patch_size) ** 2 )
lowercase = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
lowercase = torch.from_numpy(snake_case__ )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
lowercase = pt_noise
super().check_pt_tf_models(snake_case__ , snake_case__ , snake_case__ )
def SCREAMING_SNAKE_CASE__ ( self : Any ):
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase = model_class(snake_case__ )
model.to(snake_case__ )
model.eval()
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
lowercase = model(**self._prepare_for_class(snake_case__ , snake_case__ ) )
lowercase = outputs[0].cpu().numpy()
lowercase = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(snake_case__ )
lowercase = model_class.from_pretrained(snake_case__ )
model.to(snake_case__ )
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
lowercase = model(**self._prepare_for_class(snake_case__ , snake_case__ ) )
# Make sure we don't have nans
lowercase = after_outputs[0].cpu().numpy()
lowercase = 0
lowercase = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(snake_case__ , 1E-5 )
@unittest.skip(
reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.""" )
def SCREAMING_SNAKE_CASE__ ( self : str ):
pass
@unittest.skip(
reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.""" )
def SCREAMING_SNAKE_CASE__ ( self : int ):
pass
@unittest.skip(
reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.""" )
def SCREAMING_SNAKE_CASE__ ( self : int ):
pass
@unittest.skip(reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load""" )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
pass
@slow
def SCREAMING_SNAKE_CASE__ ( self : Any ):
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase = ViTMAEModel.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
def UpperCamelCase__ ( ):
lowercase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class A_ ( unittest.TestCase ):
@cached_property
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
return ViTImageProcessor.from_pretrained("""facebook/vit-mae-base""" ) if is_vision_available() else None
@slow
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
# make random mask reproducible across the PT and TF model
np.random.seed(2 )
lowercase = ViTMAEForPreTraining.from_pretrained("""facebook/vit-mae-base""" ).to(snake_case__ )
lowercase = self.default_image_processor
lowercase = prepare_img()
lowercase = image_processor(images=snake_case__ , return_tensors="""pt""" ).to(snake_case__ )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
lowercase = ViTMAEConfig()
lowercase = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
lowercase = np.random.uniform(size=(1, num_patches) )
# forward pass
with torch.no_grad():
lowercase = model(**snake_case__ , noise=torch.from_numpy(snake_case__ ).to(device=snake_case__ ) )
# verify the logits
lowercase = torch.Size((1, 1_96, 7_68) )
self.assertEqual(outputs.logits.shape , snake_case__ )
lowercase = torch.tensor(
[[-0.0_548, -1.7_023, -0.9_325], [0.3_721, -0.5_670, -0.2_233], [0.8_235, -1.3_878, -0.3_524]] )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , expected_slice.to(snake_case__ ) , atol=1E-4 ) )
| 718
|
# using dfs for finding eulerian path traversal
def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__=None ):
lowercase = (path or []) + [u]
for v in graph[u]:
if visited_edge[u][v] is False:
lowercase , lowercase = True, True
lowercase = dfs(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ )
return path
def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ):
lowercase = 0
lowercase = -1
for i in range(lowerCAmelCase__ ):
if i not in graph.keys():
continue
if len(graph[i] ) % 2 == 1:
odd_degree_nodes += 1
lowercase = i
if odd_degree_nodes == 0:
return 1, odd_node
if odd_degree_nodes == 2:
return 2, odd_node
return 3, odd_node
def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ):
lowercase = [[False for _ in range(max_node + 1 )] for _ in range(max_node + 1 )]
lowercase , lowercase = check_circuit_or_path(lowerCAmelCase__ ,lowerCAmelCase__ )
if check == 3:
print("""graph is not Eulerian""" )
print("""no path""" )
return
lowercase = 1
if check == 2:
lowercase = odd_node
print("""graph has a Euler path""" )
if check == 1:
print("""graph has a Euler cycle""" )
lowercase = dfs(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ )
print(lowerCAmelCase__ )
def UpperCamelCase__ ( ):
lowercase = {1: [2, 3, 4], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [4]}
lowercase = {1: [2, 3, 4, 5], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [1, 4]}
lowercase = {1: [2, 3, 4], 2: [1, 3, 4], 3: [1, 2], 4: [1, 2, 5], 5: [4]}
lowercase = {1: [2, 3], 2: [1, 3], 3: [1, 2]}
lowercase = {
1: [],
2: []
# all degree is zero
}
lowercase = 10
check_euler(lowerCAmelCase__ ,lowerCAmelCase__ )
check_euler(lowerCAmelCase__ ,lowerCAmelCase__ )
check_euler(lowerCAmelCase__ ,lowerCAmelCase__ )
check_euler(lowerCAmelCase__ ,lowerCAmelCase__ )
check_euler(lowerCAmelCase__ ,lowerCAmelCase__ )
if __name__ == "__main__":
main()
| 72
| 0
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase : str = logging.get_logger(__name__)
UpperCamelCase : Dict = {
"google/realm-cc-news-pretrained-embedder": (
"https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/config.json"
),
"google/realm-cc-news-pretrained-encoder": (
"https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/config.json"
),
"google/realm-cc-news-pretrained-scorer": (
"https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/config.json"
),
"google/realm-cc-news-pretrained-openqa": (
"https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/config.json"
),
"google/realm-orqa-nq-openqa": "https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/config.json",
"google/realm-orqa-nq-reader": "https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/config.json",
"google/realm-orqa-wq-openqa": "https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/config.json",
"google/realm-orqa-wq-reader": "https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/config.json",
# See all REALM models at https://huggingface.co/models?filter=realm
}
class lowerCamelCase__ ( UpperCAmelCase_ ):
lowerCAmelCase = """realm"""
def __init__( self : Dict , _lowercase : str=30_522 , _lowercase : List[Any]=768 , _lowercase : Any=128 , _lowercase : Any=12 , _lowercase : Union[str, Any]=12 , _lowercase : Any=8 , _lowercase : str=3_072 , _lowercase : Union[str, Any]="gelu_new" , _lowercase : Any=0.1 , _lowercase : List[str]=0.1 , _lowercase : str=512 , _lowercase : int=2 , _lowercase : str=0.0_2 , _lowercase : Optional[Any]=1e-12 , _lowercase : Tuple=256 , _lowercase : Tuple=10 , _lowercase : Union[str, Any]=1e-3 , _lowercase : Optional[Any]=5 , _lowercase : Optional[Any]=320 , _lowercase : List[str]=13_353_718 , _lowercase : Dict=5_000 , _lowercase : Any=1 , _lowercase : Dict=0 , _lowercase : Any=2 , **_lowercase : List[str] , ):
super().__init__(pad_token_id=_lowercase , bos_token_id=_lowercase , eos_token_id=_lowercase , **_lowercase )
# Common config
A = vocab_size
A = max_position_embeddings
A = hidden_size
A = retriever_proj_size
A = num_hidden_layers
A = num_attention_heads
A = num_candidates
A = intermediate_size
A = hidden_act
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = initializer_range
A = type_vocab_size
A = layer_norm_eps
# Reader config
A = span_hidden_size
A = max_span_width
A = reader_layer_norm_eps
A = reader_beam_size
A = reader_seq_len
# Retrieval config
A = num_block_records
A = searcher_beam_size
| 690
|
"""simple docstring"""
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
UpperCamelCase : List[str] = logging.get_logger(__name__)
class lowerCamelCase__ ( UpperCAmelCase_ ):
lowerCAmelCase = ["""pixel_values"""]
def __init__( self : Tuple , _lowercase : bool = True , _lowercase : Optional[Dict[str, int]] = None , _lowercase : PILImageResampling = PILImageResampling.BILINEAR , _lowercase : bool = True , _lowercase : Dict[str, int] = None , _lowercase : bool = True , _lowercase : Union[int, float] = 1 / 255 , _lowercase : bool = True , _lowercase : Optional[Union[float, List[float]]] = None , _lowercase : Optional[Union[float, List[float]]] = None , **_lowercase : List[str] , ):
super().__init__(**_lowercase )
A = size if size is not None else {'shortest_edge': 256}
A = get_size_dict(_lowercase , default_to_square=_lowercase )
A = crop_size if crop_size is not None else {'height': 224, 'width': 224}
A = get_size_dict(_lowercase , param_name='crop_size' )
A = do_resize
A = size
A = resample
A = do_center_crop
A = crop_size
A = do_rescale
A = rescale_factor
A = do_normalize
A = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
A = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __a ( self : Any , _lowercase : np.ndarray , _lowercase : Dict[str, int] , _lowercase : PILImageResampling = PILImageResampling.BICUBIC , _lowercase : Optional[Union[str, ChannelDimension]] = None , **_lowercase : Tuple , ):
A = get_size_dict(_lowercase , default_to_square=_lowercase )
if "shortest_edge" not in size:
raise ValueError(f'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' )
A = get_resize_output_image_size(_lowercase , size=size['shortest_edge'] , default_to_square=_lowercase )
return resize(_lowercase , size=_lowercase , resample=_lowercase , data_format=_lowercase , **_lowercase )
def __a ( self : List[Any] , _lowercase : np.ndarray , _lowercase : Dict[str, int] , _lowercase : Optional[Union[str, ChannelDimension]] = None , **_lowercase : Optional[int] , ):
A = get_size_dict(_lowercase )
if "height" not in size or "width" not in size:
raise ValueError(f'The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}' )
return center_crop(_lowercase , size=(size['height'], size['width']) , data_format=_lowercase , **_lowercase )
def __a ( self : int , _lowercase : np.ndarray , _lowercase : float , _lowercase : Optional[Union[str, ChannelDimension]] = None , **_lowercase : Tuple ):
return rescale(_lowercase , scale=_lowercase , data_format=_lowercase , **_lowercase )
def __a ( self : int , _lowercase : np.ndarray , _lowercase : Union[float, List[float]] , _lowercase : Union[float, List[float]] , _lowercase : Optional[Union[str, ChannelDimension]] = None , **_lowercase : str , ):
return normalize(_lowercase , mean=_lowercase , std=_lowercase , data_format=_lowercase , **_lowercase )
def __a ( self : Any , _lowercase : ImageInput , _lowercase : Optional[bool] = None , _lowercase : Dict[str, int] = None , _lowercase : PILImageResampling = None , _lowercase : bool = None , _lowercase : Dict[str, int] = None , _lowercase : Optional[bool] = None , _lowercase : Optional[float] = None , _lowercase : Optional[bool] = None , _lowercase : Optional[Union[float, List[float]]] = None , _lowercase : Optional[Union[float, List[float]]] = None , _lowercase : Optional[Union[str, TensorType]] = None , _lowercase : Union[str, ChannelDimension] = ChannelDimension.FIRST , **_lowercase : Any , ):
A = do_resize if do_resize is not None else self.do_resize
A = size if size is not None else self.size
A = get_size_dict(_lowercase , default_to_square=_lowercase )
A = resample if resample is not None else self.resample
A = do_center_crop if do_center_crop is not None else self.do_center_crop
A = crop_size if crop_size is not None else self.crop_size
A = get_size_dict(_lowercase , param_name='crop_size' )
A = do_rescale if do_rescale is not None else self.do_rescale
A = rescale_factor if rescale_factor is not None else self.rescale_factor
A = do_normalize if do_normalize is not None else self.do_normalize
A = image_mean if image_mean is not None else self.image_mean
A = image_std if image_std is not None else self.image_std
A = make_list_of_images(_lowercase )
if not valid_images(_lowercase ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
A = [to_numpy_array(_lowercase ) for image in images]
if do_resize:
A = [self.resize(image=_lowercase , size=_lowercase , resample=_lowercase ) for image in images]
if do_center_crop:
A = [self.center_crop(image=_lowercase , size=_lowercase ) for image in images]
if do_rescale:
A = [self.rescale(image=_lowercase , scale=_lowercase ) for image in images]
if do_normalize:
A = [self.normalize(image=_lowercase , mean=_lowercase , std=_lowercase ) for image in images]
A = [to_channel_dimension_format(_lowercase , _lowercase ) for image in images]
A = {'pixel_values': images}
return BatchFeature(data=_lowercase , tensor_type=_lowercase )
def __a ( self : int , _lowercase : List[str] , _lowercase : List[Tuple] = None ):
A = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(_lowercase ) != len(_lowercase ):
raise ValueError(
'Make sure that you pass in as many target sizes as the batch dimension of the logits' )
if is_torch_tensor(_lowercase ):
A = target_sizes.numpy()
A = []
for idx in range(len(_lowercase ) ):
A = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='bilinear' , align_corners=_lowercase )
A = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(_lowercase )
else:
A = logits.argmax(dim=1 )
A = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 690
| 1
|
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
UpperCamelCase_ = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, 'utils'))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated.
UpperCamelCase_ = " \"\"\"\n Output class for the scheduler's step function output.\n\n Args:\n prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the\n denoising loop.\n pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n The predicted denoised sample (x_{0}) based on the model output from the current timestep.\n `pred_original_sample` can be used to preview progress or for guidance.\n \"\"\"\n\n prev_sample: torch.FloatTensor\n pred_original_sample: Optional[torch.FloatTensor] = None\n"
class snake_case_ ( unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self ) -> List[Any]:
UpperCAmelCase__ =tempfile.mkdtemp()
os.makedirs(os.path.join(self.diffusers_dir, "schedulers/" ) )
UpperCAmelCase__ =self.diffusers_dir
shutil.copy(
os.path.join(A_, "src/diffusers/schedulers/scheduling_ddpm.py" ), os.path.join(self.diffusers_dir, "schedulers/scheduling_ddpm.py" ), )
def __UpperCAmelCase ( self ) -> Union[str, Any]:
UpperCAmelCase__ ="src/diffusers"
shutil.rmtree(self.diffusers_dir )
def __UpperCAmelCase ( self, A_, A_, A_, A_=None ) -> List[Any]:
UpperCAmelCase__ =comment + f"""\nclass {class_name}(nn.Module):\n""" + class_code
if overwrite_result is not None:
UpperCAmelCase__ =comment + f"""\nclass {class_name}(nn.Module):\n""" + overwrite_result
UpperCAmelCase__ =black.Mode(target_versions={black.TargetVersion.PYaa}, line_length=119 )
UpperCAmelCase__ =black.format_str(A_, mode=A_ )
UpperCAmelCase__ =os.path.join(self.diffusers_dir, "new_code.py" )
with open(A_, "w", newline="\n" ) as f:
f.write(A_ )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(A_ ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name, overwrite=A_ )
with open(A_, "r" ) as f:
self.assertTrue(f.read(), A_ )
def __UpperCAmelCase ( self ) -> Dict:
UpperCAmelCase__ =check_copies.find_code_in_diffusers("schedulers.scheduling_ddpm.DDPMSchedulerOutput" )
self.assertEqual(A_, A_ )
def __UpperCAmelCase ( self ) -> Any:
# Base copy consistency
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput", "DDPMSchedulerOutput", REFERENCE_CODE + "\n", )
# With no empty line at the end
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput", "DDPMSchedulerOutput", A_, )
# Copy consistency with rename
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test", "TestSchedulerOutput", re.sub("DDPM", "Test", A_ ), )
# Copy consistency with a really long name
UpperCAmelCase__ ="TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason"
self.check_copy_consistency(
f"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}""", f"""{long_class_name}SchedulerOutput""", re.sub("Bert", A_, A_ ), )
# Copy consistency with overwrite
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test", "TestSchedulerOutput", A_, overwrite_result=re.sub("DDPM", "Test", A_ ), )
| 705
|
import math
def _UpperCAmelCase ( A , A ):
'''simple docstring'''
if (
not isinstance(A , (int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError("power_factor must be a valid float value between -1 and 1." )
return apparent_power * power_factor
def _UpperCAmelCase ( A , A ):
'''simple docstring'''
if (
not isinstance(A , (int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError("power_factor must be a valid float value between -1 and 1." )
return apparent_power * math.sqrt(1 - power_factor**2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 510
| 0
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {}
class UpperCAmelCase_ ( snake_case ):
UpperCamelCase ="llama"
UpperCamelCase =["past_key_values"]
def __init__( self , UpperCamelCase_=3_20_00 , UpperCamelCase_=40_96 , UpperCamelCase_=1_10_08 , UpperCamelCase_=32 , UpperCamelCase_=32 , UpperCamelCase_=None , UpperCamelCase_="silu" , UpperCamelCase_=20_48 , UpperCamelCase_=0.0_2 , UpperCamelCase_=1E-6 , UpperCamelCase_=True , UpperCamelCase_=0 , UpperCamelCase_=1 , UpperCamelCase_=2 , UpperCamelCase_=1 , UpperCamelCase_=False , UpperCamelCase_=None , **UpperCamelCase_ , ) -> int:
__lowercase : Any = vocab_size
__lowercase : Tuple = max_position_embeddings
__lowercase : Optional[int] = hidden_size
__lowercase : Tuple = intermediate_size
__lowercase : Union[str, Any] = num_hidden_layers
__lowercase : str = num_attention_heads
# for backward compatibility
if num_key_value_heads is None:
__lowercase : Union[str, Any] = num_attention_heads
__lowercase : Optional[int] = num_key_value_heads
__lowercase : Union[str, Any] = hidden_act
__lowercase : Tuple = initializer_range
__lowercase : Tuple = rms_norm_eps
__lowercase : Dict = pretraining_tp
__lowercase : str = use_cache
__lowercase : Optional[Any] = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=UpperCamelCase_ , bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , tie_word_embeddings=UpperCamelCase_ , **UpperCamelCase_ , )
def _lowerCamelCase ( self ) -> int:
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , UpperCamelCase_ ) or len(self.rope_scaling ) != 2:
raise ValueError(
'''`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '''
F"""got {self.rope_scaling}""" )
__lowercase : Optional[int] = self.rope_scaling.get('''type''' , UpperCamelCase_ )
__lowercase : Dict = self.rope_scaling.get('''factor''' , UpperCamelCase_ )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
F"""`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}""" )
if rope_scaling_factor is None or not isinstance(UpperCamelCase_ , UpperCamelCase_ ) or rope_scaling_factor <= 1.0:
raise ValueError(F"""`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}""" )
| 76
|
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
__lowerCamelCase : Union[str, Any] = ""
__lowerCamelCase : Dict = ""
__lowerCamelCase : Optional[int] = ""
__lowerCamelCase : Optional[Any] = 1 # (0 is vertical, 1 is horizontal)
def SCREAMING_SNAKE_CASE__ ( ) -> None:
A__ , A__ : Optional[int] =get_dataset(snake_case_, snake_case_ )
print('''Processing...''' )
A__ , A__ , A__ : List[Any] =update_image_and_anno(snake_case_, snake_case_, snake_case_ )
for index, image in enumerate(snake_case_ ):
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
A__ : List[Any] =random_chars(3_2 )
A__ : Union[str, Any] =paths[index].split(os.sep )[-1].rsplit('''.''', 1 )[0]
A__ : Any =f'{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}'
cva.imwrite(f'/{file_root}.jpg', snake_case_, [cva.IMWRITE_JPEG_QUALITY, 8_5] )
print(f'Success {index+1}/{len(snake_case_ )} with {file_name}' )
A__ : str =[]
for anno in new_annos[index]:
A__ : Optional[Any] =f'{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}'
annos_list.append(snake_case_ )
with open(f'/{file_root}.txt', '''w''' ) as outfile:
outfile.write('''\n'''.join(line for line in annos_list ) )
def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_ ) -> tuple[list, list]:
A__ : int =[]
A__ : int =[]
for label_file in glob.glob(os.path.join(snake_case_, '''*.txt''' ) ):
A__ : Optional[int] =label_file.split(os.sep )[-1].rsplit('''.''', 1 )[0]
with open(snake_case_ ) as in_file:
A__ : Union[str, Any] =in_file.readlines()
A__ : Union[str, Any] =os.path.join(snake_case_, f'{label_name}.jpg' )
A__ : Optional[Any] =[]
for obj_list in obj_lists:
A__ : Optional[int] =obj_list.rstrip('''\n''' ).split(''' ''' )
boxes.append(
[
int(obj[0] ),
float(obj[1] ),
float(obj[2] ),
float(obj[3] ),
float(obj[4] ),
] )
if not boxes:
continue
img_paths.append(snake_case_ )
labels.append(snake_case_ )
return img_paths, labels
def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_, snake_case_ = 1 ) -> tuple[list, list, list]:
A__ : List[Any] =[]
A__ : List[str] =[]
A__ : Any =[]
for idx in range(len(snake_case_ ) ):
A__ : int =[]
A__ : Any =img_list[idx]
path_list.append(snake_case_ )
A__ : str =anno_list[idx]
A__ : List[str] =cva.imread(snake_case_ )
if flip_type == 1:
A__ : Optional[int] =cva.flip(snake_case_, snake_case_ )
for bbox in img_annos:
A__ : List[str] =1 - bbox[1]
new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]] )
elif flip_type == 0:
A__ : List[str] =cva.flip(snake_case_, snake_case_ )
for bbox in img_annos:
A__ : Optional[int] =1 - bbox[2]
new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]] )
new_annos_lists.append(snake_case_ )
new_imgs_list.append(snake_case_ )
return new_imgs_list, new_annos_lists, path_list
def SCREAMING_SNAKE_CASE__ ( snake_case_ = 3_2 ) -> str:
assert number_char > 1, "The number of character should greater than 1"
A__ : Optional[int] =ascii_lowercase + digits
return "".join(random.choice(snake_case_ ) for _ in range(snake_case_ ) )
if __name__ == "__main__":
main()
print("DONE ✅")
| 416
| 0
|
"""simple docstring"""
import argparse
import json
import re
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileNetVaConfig,
MobileNetVaForImageClassification,
MobileNetVaImageProcessor,
load_tf_weights_in_mobilenet_va,
)
from transformers.utils import logging
logging.set_verbosity_info()
_lowercase : Optional[int] = logging.get_logger(__name__)
def lowercase__ ( snake_case_ :List[Any] ):
__UpperCAmelCase = MobileNetVaConfig(layer_norm_eps=0.001 )
if "_quant" in model_name:
raise ValueError('''Quantized models are not supported.''' )
__UpperCAmelCase = re.match(r'''^mobilenet_v1_([^_]*)_([^_]*)$''' , _UpperCamelCase )
if matches:
__UpperCAmelCase = float(matches[1] )
__UpperCAmelCase = int(matches[2] )
# The TensorFlow version of MobileNetV1 predicts 1001 classes instead of
# the usual 1000. The first class (index 0) is "background".
__UpperCAmelCase = 1_001
__UpperCAmelCase = '''imagenet-1k-id2label.json'''
__UpperCAmelCase = '''huggingface/label-files'''
__UpperCAmelCase = json.load(open(hf_hub_download(_UpperCamelCase , _UpperCamelCase , repo_type='''dataset''' ) , '''r''' ) )
__UpperCAmelCase = {int(_UpperCamelCase ) + 1: v for k, v in idalabel.items()}
__UpperCAmelCase = '''background'''
__UpperCAmelCase = idalabel
__UpperCAmelCase = {v: k for k, v in idalabel.items()}
return config
def lowercase__ ( ):
__UpperCAmelCase = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
__UpperCAmelCase = Image.open(requests.get(_UpperCamelCase , stream=_UpperCamelCase ).raw )
return im
@torch.no_grad()
def lowercase__ ( snake_case_ :Optional[int] , snake_case_ :List[Any] , snake_case_ :Optional[Any] , snake_case_ :Optional[int]=False ):
__UpperCAmelCase = get_mobilenet_va_config(_UpperCamelCase )
# Load 🤗 model
__UpperCAmelCase = MobileNetVaForImageClassification(_UpperCamelCase ).eval()
# Load weights from TensorFlow checkpoint
load_tf_weights_in_mobilenet_va(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# Check outputs on an image, prepared by MobileNetV1ImageProcessor
__UpperCAmelCase = MobileNetVaImageProcessor(
crop_size={'''width''': config.image_size, '''height''': config.image_size} , size={'''shortest_edge''': config.image_size + 32} , )
__UpperCAmelCase = image_processor(images=prepare_img() , return_tensors='''pt''' )
__UpperCAmelCase = model(**_UpperCamelCase )
__UpperCAmelCase = outputs.logits
assert logits.shape == (1, 1_001)
if model_name == "mobilenet_v1_1.0_224":
__UpperCAmelCase = torch.tensor([-4.1739, -1.1233, 3.1205] )
elif model_name == "mobilenet_v1_0.75_192":
__UpperCAmelCase = torch.tensor([-3.9440, -2.3141, -0.3333] )
else:
__UpperCAmelCase = None
if expected_logits is not None:
assert torch.allclose(logits[0, :3] , _UpperCamelCase , atol=1E-4 )
Path(_UpperCamelCase ).mkdir(exist_ok=_UpperCamelCase )
print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(_UpperCamelCase )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(_UpperCamelCase )
if push_to_hub:
print('''Pushing to the hub...''' )
__UpperCAmelCase = '''google/''' + model_name
image_processor.push_to_hub(_UpperCamelCase )
model.push_to_hub(_UpperCamelCase )
if __name__ == "__main__":
_lowercase : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='mobilenet_v1_1.0_224',
type=str,
help='Name of the MobileNetV1 model you\'d like to convert. Should in the form \'mobilenet_v1_<depth>_<size>\'.',
)
parser.add_argument(
'--checkpoint_path', required=True, type=str, help='Path to the original TensorFlow checkpoint (.ckpt file).'
)
parser.add_argument(
'--pytorch_dump_folder_path', required=True, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
_lowercase : Tuple = parser.parse_args()
convert_movilevit_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 721
|
"""simple docstring"""
import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
_lowercase : int = logging.get_logger(__name__)
_lowercase : List[str] = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
_lowercase : str = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class _UpperCAmelCase :
a__ : str = field(
default=_lowerCAmelCase , metadata={"help": "Model type selected in the list: " + ", ".join(_lowerCAmelCase )} )
a__ : str = field(
default=_lowerCAmelCase , metadata={"help": "The input data dir. Should contain the .json files for the SQuAD task."} )
a__ : int = field(
default=128 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
a__ : int = field(
default=128 , metadata={"help": "When splitting up a long document into chunks, how much stride to take between chunks."} , )
a__ : int = field(
default=64 , metadata={
"help": (
"The maximum number of tokens for the question. Questions longer than this will "
"be truncated to this length."
)
} , )
a__ : int = field(
default=30 , metadata={
"help": (
"The maximum length of an answer that can be generated. This is needed because the start "
"and end predictions are not conditioned on one another."
)
} , )
a__ : bool = field(
default=_lowerCAmelCase , metadata={"help": "Overwrite the cached training and evaluation sets"} )
a__ : bool = field(
default=_lowerCAmelCase , metadata={"help": "If true, the SQuAD examples contain some that do not have an answer."} )
a__ : float = field(
default=0.0 , metadata={"help": "If null_score - best_non_null is greater than the threshold predict null."} )
a__ : int = field(
default=20 , metadata={"help": "If null_score - best_non_null is greater than the threshold predict null."} )
a__ : int = field(
default=0 , metadata={
"help": (
"language id of input for language-specific xlm models (see"
" tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)"
)
} , )
a__ : int = field(default=1 , metadata={"help": "multiple threads for converting example to features"} )
class _UpperCAmelCase ( _lowerCAmelCase ):
a__ : List[str] = "train"
a__ : List[Any] = "dev"
class _UpperCAmelCase ( _lowerCAmelCase ):
a__ : SquadDataTrainingArguments
a__ : List[SquadFeatures]
a__ : Split
a__ : bool
def __init__( self : Optional[Any] , _lowercase : SquadDataTrainingArguments , _lowercase : PreTrainedTokenizer , _lowercase : Optional[int] = None , _lowercase : Union[str, Split] = Split.train , _lowercase : Optional[bool] = False , _lowercase : Optional[str] = None , _lowercase : Optional[str] = "pt" , ):
__UpperCAmelCase = args
__UpperCAmelCase = is_language_sensitive
__UpperCAmelCase = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor()
if isinstance(_lowercase , _lowercase ):
try:
__UpperCAmelCase = Split[mode]
except KeyError:
raise KeyError('''mode is not a valid split name''' )
__UpperCAmelCase = mode
# Load data features from cache or dataset file
__UpperCAmelCase = '''v2''' if args.version_2_with_negative else '''v1'''
__UpperCAmelCase = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , F'''cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}''' , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
__UpperCAmelCase = cached_features_file + '''.lock'''
with FileLock(_lowercase ):
if os.path.exists(_lowercase ) and not args.overwrite_cache:
__UpperCAmelCase = time.time()
__UpperCAmelCase = torch.load(_lowercase )
# Legacy cache files have only features, while new cache files
# will have dataset and examples also.
__UpperCAmelCase = self.old_features['''features''']
__UpperCAmelCase = self.old_features.get('''dataset''' , _lowercase )
__UpperCAmelCase = self.old_features.get('''examples''' , _lowercase )
logger.info(
F'''Loading features from cached file {cached_features_file} [took %.3f s]''' , time.time() - start )
if self.dataset is None or self.examples is None:
logger.warning(
F'''Deleting cached file {cached_features_file} will allow dataset and examples to be cached in'''
''' future run''' )
else:
if mode == Split.dev:
__UpperCAmelCase = self.processor.get_dev_examples(args.data_dir )
else:
__UpperCAmelCase = self.processor.get_train_examples(args.data_dir )
__UpperCAmelCase , __UpperCAmelCase = squad_convert_examples_to_features(
examples=self.examples , tokenizer=_lowercase , max_seq_length=args.max_seq_length , doc_stride=args.doc_stride , max_query_length=args.max_query_length , is_training=mode == Split.train , threads=args.threads , return_dataset=_lowercase , )
__UpperCAmelCase = time.time()
torch.save(
{'''features''': self.features, '''dataset''': self.dataset, '''examples''': self.examples} , _lowercase , )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
F'''Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]''' )
def __len__( self : Dict ):
return len(self.features )
def __getitem__( self : Any , _lowercase : Optional[int] ):
# Convert to Tensors and build dataset
__UpperCAmelCase = self.features[i]
__UpperCAmelCase = torch.tensor(feature.input_ids , dtype=torch.long )
__UpperCAmelCase = torch.tensor(feature.attention_mask , dtype=torch.long )
__UpperCAmelCase = torch.tensor(feature.token_type_ids , dtype=torch.long )
__UpperCAmelCase = torch.tensor(feature.cls_index , dtype=torch.long )
__UpperCAmelCase = torch.tensor(feature.p_mask , dtype=torch.float )
__UpperCAmelCase = torch.tensor(feature.is_impossible , dtype=torch.float )
__UpperCAmelCase = {
'''input_ids''': input_ids,
'''attention_mask''': attention_mask,
'''token_type_ids''': token_type_ids,
}
if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
if self.args.model_type in ["xlnet", "xlm"]:
inputs.update({'''cls_index''': cls_index, '''p_mask''': p_mask} )
if self.args.version_2_with_negative:
inputs.update({'''is_impossible''': is_impossible} )
if self.is_language_sensitive:
inputs.update({'''langs''': (torch.ones(input_ids.shape , dtype=torch.intaa ) * self.args.lang_id)} )
if self.mode == Split.train:
__UpperCAmelCase = torch.tensor(feature.start_position , dtype=torch.long )
__UpperCAmelCase = torch.tensor(feature.end_position , dtype=torch.long )
inputs.update({'''start_positions''': start_positions, '''end_positions''': end_positions} )
return inputs
| 397
| 0
|
'''simple docstring'''
from __future__ import annotations
from random import choice
def _SCREAMING_SNAKE_CASE ( __snake_case : Optional[int] ):
return choice(__snake_case )
def _SCREAMING_SNAKE_CASE ( __snake_case : list[int] , __snake_case : int ):
_A = random_pivot(__snake_case )
# partition based on pivot
# linear time
_A = [e for e in lst if e < pivot]
_A = [e for e in lst if e > pivot]
# if we get lucky, pivot might be the element we want.
# we can easily see this:
# small (elements smaller than k)
# + pivot (kth element)
# + big (elements larger than k)
if len(__snake_case ) == k - 1:
return pivot
# pivot is in elements bigger than k
elif len(__snake_case ) < k - 1:
return kth_number(__snake_case , k - len(__snake_case ) - 1 )
# pivot is in elements smaller than k
else:
return kth_number(__snake_case , __snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 107
|
"""simple docstring"""
from __future__ import annotations
class __lowercase :
'''simple docstring'''
def __init__( self , _UpperCAmelCase , _UpperCAmelCase ):
__a , __a : List[Any] = text, pattern
__a , __a : Tuple = len(_UpperCAmelCase ), len(_UpperCAmelCase )
def _lowerCamelCase ( self , _UpperCAmelCase ):
for i in range(self.patLen - 1 , -1 , -1 ):
if char == self.pattern[i]:
return i
return -1
def _lowerCamelCase ( self , _UpperCAmelCase ):
for i in range(self.patLen - 1 , -1 , -1 ):
if self.pattern[i] != self.text[current_pos + i]:
return current_pos + i
return -1
def _lowerCamelCase ( self ):
# searches pattern in text and returns index positions
__a : Dict = []
for i in range(self.textLen - self.patLen + 1 ):
__a : List[str] = self.mismatch_in_text(_UpperCAmelCase )
if mismatch_index == -1:
positions.append(_UpperCAmelCase )
else:
__a : Tuple = self.match_in_pattern(self.text[mismatch_index] )
__a : Optional[int] = (
mismatch_index - match_index
) # shifting index lgtm [py/multiple-definition]
return positions
A = '''ABAABA'''
A = '''AB'''
A = BoyerMooreSearch(text, pattern)
A = bms.bad_character_heuristic()
if len(positions) == 0:
print('''No match found''')
else:
print('''Pattern found in following positions: ''')
print(positions)
| 52
| 0
|
import argparse
import torch
from transformers import GPTaConfig, GPTaModel, load_tf_weights_in_gpta
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def A ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if gpta_config_file == "":
UpperCAmelCase__ :List[str] = GPTaConfig()
else:
UpperCAmelCase__ :Dict = GPTaConfig.from_json_file(SCREAMING_SNAKE_CASE )
UpperCAmelCase__ :Optional[int] = GPTaModel(SCREAMING_SNAKE_CASE )
# Load weights from numpy
load_tf_weights_in_gpta(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Save pytorch-model
UpperCAmelCase__ :Any = pytorch_dump_folder_path + '/' + WEIGHTS_NAME
UpperCAmelCase__ :Dict = pytorch_dump_folder_path + '/' + CONFIG_NAME
print(f"""Save PyTorch model to {pytorch_weights_dump_path}""" )
torch.save(model.state_dict() , SCREAMING_SNAKE_CASE )
print(f"""Save configuration file to {pytorch_config_dump_path}""" )
with open(SCREAMING_SNAKE_CASE , 'w' , encoding='utf-8' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
__snake_case : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--gpt2_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--gpt2_config_file',
default='',
type=str,
help=(
'An optional config json file corresponding to the pre-trained OpenAI model. \n'
'This specifies the model architecture.'
),
)
__snake_case : int = parser.parse_args()
convert_gpta_checkpoint_to_pytorch(args.gpta_checkpoint_path, args.gpta_config_file, args.pytorch_dump_folder_path)
| 433
|
from math import factorial
def A ( SCREAMING_SNAKE_CASE = 100 ):
"""simple docstring"""
return sum(map(SCREAMING_SNAKE_CASE , str(factorial(SCREAMING_SNAKE_CASE ) ) ) )
if __name__ == "__main__":
print(solution(int(input('Enter the Number: ').strip())))
| 433
| 1
|
'''simple docstring'''
import timeit
import numpy as np
import datasets
from datasets.arrow_writer import ArrowWriter
from datasets.features.features import _ArrayXD
def _UpperCamelCase ( UpperCamelCase__ ):
def wrapper(*UpperCamelCase__ , **UpperCamelCase__ ):
UpperCAmelCase__ : List[str] = timeit.default_timer()
UpperCAmelCase__ : List[Any] = func(*UpperCamelCase__ , **UpperCamelCase__ )
UpperCAmelCase__ : Tuple = timeit.default_timer() - starttime
return delta
UpperCAmelCase__ : Union[str, Any] = func.__name__
return wrapper
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__=1_0_0 , UpperCamelCase__=None ):
UpperCAmelCase__ : str = []
UpperCAmelCase__ : List[str] = seq_shapes or {}
for i in range(UpperCamelCase__ ):
UpperCAmelCase__ : Optional[int] = {}
for col_id, (k, v) in enumerate(features.items() ):
if isinstance(UpperCamelCase__ , _ArrayXD ):
UpperCAmelCase__ : List[str] = np.random.rand(*v.shape ).astype(v.dtype )
elif isinstance(UpperCamelCase__ , datasets.Value ):
if v.dtype == "string":
UpperCAmelCase__ : Union[str, Any] = """The small grey turtle was surprisingly fast when challenged."""
else:
UpperCAmelCase__ : Dict = np.random.randint(1_0 , size=1 ).astype(v.dtype ).item()
elif isinstance(UpperCamelCase__ , datasets.Sequence ):
while isinstance(UpperCamelCase__ , datasets.Sequence ):
UpperCAmelCase__ : List[str] = v.feature
UpperCAmelCase__ : str = seq_shapes[k]
UpperCAmelCase__ : Optional[Any] = np.random.rand(*UpperCamelCase__ ).astype(v.dtype )
UpperCAmelCase__ : Tuple = data
dummy_data.append((i, example) )
return dummy_data
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=1_0_0 , UpperCamelCase__=None ):
UpperCAmelCase__ : Union[str, Any] = generate_examples(UpperCamelCase__ , num_examples=UpperCamelCase__ , seq_shapes=UpperCamelCase__ )
with ArrowWriter(features=UpperCamelCase__ , path=UpperCamelCase__ ) as writer:
for key, record in dummy_data:
UpperCAmelCase__ : Optional[int] = features.encode_example(UpperCamelCase__ )
writer.write(UpperCamelCase__ )
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = writer.finalize()
if not num_final_examples == num_examples:
raise ValueError(
f'''Error writing the dataset, wrote {num_final_examples} examples but should have written {num_examples}.''' )
UpperCAmelCase__ : Optional[int] = datasets.Dataset.from_file(filename=UpperCamelCase__ , info=datasets.DatasetInfo(features=UpperCamelCase__ ) )
return dataset
| 407
|
'''simple docstring'''
import os
import re
import shutil
from argparse import ArgumentParser, Namespace
from datasets.commands import BaseDatasetsCLICommand
from datasets.utils.logging import get_logger
__A ='<<<<<<< This should probably be modified because it mentions: '
__A ='=======\n>>>>>>>\n'
__A =[
'TextEncoderConfig',
'ByteTextEncoder',
'SubwordTextEncoder',
'encoder_config',
'maybe_build_from_corpus',
'manual_dir',
]
__A =[
# (pattern, replacement)
# Order is important here for some replacements
(R'tfds\.core', R'datasets'),
(R'tf\.io\.gfile\.GFile', R'open'),
(R'tf\.([\w\d]+)', R'datasets.Value(\'\1\')'),
(R'tfds\.features\.Text\(\)', R'datasets.Value(\'string\')'),
(R'tfds\.features\.Text\(', R'datasets.Value(\'string\'),'),
(R'features\s*=\s*tfds.features.FeaturesDict\(', R'features=datasets.Features('),
(R'tfds\.features\.FeaturesDict\(', R'dict('),
(R'The TensorFlow Datasets Authors', R'The TensorFlow Datasets Authors and the HuggingFace Datasets Authors'),
(R'tfds\.', R'datasets.'),
(R'dl_manager\.manual_dir', R'self.config.data_dir'),
(R'self\.builder_config', R'self.config'),
]
def _UpperCamelCase ( UpperCamelCase__ ):
return ConvertCommand(args.tfds_path , args.datasets_directory )
class _snake_case ( a__ ):
@staticmethod
def snake_case__ ( _lowerCamelCase):
UpperCAmelCase__ : List[str] = parser.add_parser(
"""convert""" , help="""Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset.""" , )
train_parser.add_argument(
"""--tfds_path""" , type=_lowerCamelCase , required=_lowerCamelCase , help="""Path to a TensorFlow Datasets folder to convert or a single tfds file to convert.""" , )
train_parser.add_argument(
"""--datasets_directory""" , type=_lowerCamelCase , required=_lowerCamelCase , help="""Path to the HuggingFace Datasets folder.""")
train_parser.set_defaults(func=_lowerCamelCase)
def __init__( self , _lowerCamelCase , _lowerCamelCase , *_lowerCamelCase):
UpperCAmelCase__ : Optional[Any] = get_logger("""datasets-cli/converting""")
UpperCAmelCase__ : Any = tfds_path
UpperCAmelCase__ : Any = datasets_directory
def snake_case__ ( self):
if os.path.isdir(self._tfds_path):
UpperCAmelCase__ : Dict = os.path.abspath(self._tfds_path)
elif os.path.isfile(self._tfds_path):
UpperCAmelCase__ : str = os.path.dirname(self._tfds_path)
else:
raise ValueError("""--tfds_path is neither a directory nor a file. Please check path.""")
UpperCAmelCase__ : List[Any] = os.path.abspath(self._datasets_directory)
self._logger.info(f'''Converting datasets from {abs_tfds_path} to {abs_datasets_path}''')
UpperCAmelCase__ : Optional[int] = []
UpperCAmelCase__ : Any = []
UpperCAmelCase__ : Optional[Any] = {}
if os.path.isdir(self._tfds_path):
UpperCAmelCase__ : Dict = os.listdir(_lowerCamelCase)
else:
UpperCAmelCase__ : List[Any] = [os.path.basename(self._tfds_path)]
for f_name in file_names:
self._logger.info(f'''Looking at file {f_name}''')
UpperCAmelCase__ : str = os.path.join(_lowerCamelCase , _lowerCamelCase)
UpperCAmelCase__ : Dict = os.path.join(_lowerCamelCase , _lowerCamelCase)
if not os.path.isfile(_lowerCamelCase) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name:
self._logger.info("""Skipping file""")
continue
with open(_lowerCamelCase , encoding="""utf-8""") as f:
UpperCAmelCase__ : Optional[Any] = f.readlines()
UpperCAmelCase__ : Optional[Any] = []
UpperCAmelCase__ : Optional[Any] = False
UpperCAmelCase__ : Dict = False
UpperCAmelCase__ : List[Any] = []
for line in lines:
UpperCAmelCase__ : Optional[Any] = line
# Convert imports
if "import tensorflow.compat.v2 as tf" in out_line:
continue
elif "@tfds.core" in out_line:
continue
elif "builder=self" in out_line:
continue
elif "import tensorflow_datasets.public_api as tfds" in out_line:
UpperCAmelCase__ : str = """import datasets\n"""
elif "import tensorflow" in out_line:
# order is important here
UpperCAmelCase__ : Optional[Any] = """"""
continue
elif "from absl import logging" in out_line:
UpperCAmelCase__ : List[Any] = """from datasets import logging\n"""
elif "getLogger" in out_line:
UpperCAmelCase__ : Optional[int] = out_line.replace("""getLogger""" , """get_logger""")
elif any(expression in out_line for expression in TO_HIGHLIGHT):
UpperCAmelCase__ : List[Any] = True
UpperCAmelCase__ : List[Any] = list(filter(lambda _lowerCamelCase: e in out_line , _lowerCamelCase))
out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(_lowerCamelCase) + """\n""")
out_lines.append(_lowerCamelCase)
out_lines.append(_lowerCamelCase)
continue
else:
for pattern, replacement in TO_CONVERT:
UpperCAmelCase__ : Optional[Any] = re.sub(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
# Take care of saving utilities (to later move them together with main script)
if "tensorflow_datasets" in out_line:
UpperCAmelCase__ : List[str] = re.match(r"""from\stensorflow_datasets.*import\s([^\.\r\n]+)""" , _lowerCamelCase)
tfds_imports.extend(imp.strip() for imp in match.group(1).split(""","""))
UpperCAmelCase__ : Dict = """from . import """ + match.group(1)
# Check we have not forget anything
if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line:
raise ValueError(f'''Error converting {out_line.strip()}''')
if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line:
UpperCAmelCase__ : int = True
out_lines.append(_lowerCamelCase)
if is_builder or "wmt" in f_name:
# We create a new directory for each dataset
UpperCAmelCase__ : Optional[Any] = f_name.replace(""".py""" , """""")
UpperCAmelCase__ : Union[str, Any] = os.path.join(_lowerCamelCase , _lowerCamelCase)
UpperCAmelCase__ : int = os.path.join(_lowerCamelCase , _lowerCamelCase)
os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase)
self._logger.info(f'''Adding directory {output_dir}''')
imports_to_builder_map.update({imp: output_dir for imp in tfds_imports})
else:
# Utilities will be moved at the end
utils_files.append(_lowerCamelCase)
if needs_manual_update:
with_manual_update.append(_lowerCamelCase)
with open(_lowerCamelCase , """w""" , encoding="""utf-8""") as f:
f.writelines(_lowerCamelCase)
self._logger.info(f'''Converted in {output_file}''')
for utils_file in utils_files:
try:
UpperCAmelCase__ : Optional[int] = os.path.basename(_lowerCamelCase)
UpperCAmelCase__ : int = imports_to_builder_map[f_name.replace(""".py""" , """""")]
self._logger.info(f'''Moving {dest_folder} to {utils_file}''')
shutil.copy(_lowerCamelCase , _lowerCamelCase)
except KeyError:
self._logger.error(f'''Cannot find destination folder for {utils_file}. Please copy manually.''')
if with_manual_update:
for file_path in with_manual_update:
self._logger.warning(
f'''You need to manually update file {file_path} to remove configurations using \'TextEncoderConfig\'.''')
| 407
| 1
|
"""simple docstring"""
from typing import Dict
from .base import GenericTensor, Pipeline
class __lowerCAmelCase ( lowercase_ ):
def UpperCAmelCase ( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase ):
'''simple docstring'''
if tokenize_kwargs is None:
__UpperCamelCase = {}
if truncation is not None:
if "truncation" in tokenize_kwargs:
raise ValueError(
'truncation parameter defined twice (given as keyword argument as well as in tokenize_kwargs)' )
__UpperCamelCase = truncation
__UpperCamelCase = tokenize_kwargs
__UpperCamelCase = {}
if return_tensors is not None:
__UpperCamelCase = return_tensors
return preprocess_params, {}, postprocess_params
def UpperCAmelCase ( self , __UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = self.framework
__UpperCamelCase = self.tokenizer(UpperCamelCase__ , return_tensors=UpperCamelCase__ , **UpperCamelCase__ )
return model_inputs
def UpperCAmelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = self.model(**UpperCamelCase__ )
return model_outputs
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase=False ):
'''simple docstring'''
if return_tensors:
return model_outputs[0]
if self.framework == "pt":
return model_outputs[0].tolist()
elif self.framework == "tf":
return model_outputs[0].numpy().tolist()
def __call__( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
return super().__call__(*UpperCamelCase__ , **UpperCamelCase__ )
| 711
|
"""simple docstring"""
import unittest
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BridgeTowerImageProcessor
class __lowerCAmelCase ( unittest.TestCase ):
def __init__( self , __UpperCAmelCase , __UpperCAmelCase = True , __UpperCAmelCase = None , __UpperCAmelCase = 32 , __UpperCAmelCase = True , __UpperCAmelCase = 1 / 255 , __UpperCAmelCase = True , __UpperCAmelCase = True , __UpperCAmelCase = [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3] , __UpperCAmelCase = [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1] , __UpperCAmelCase = True , __UpperCAmelCase=7 , __UpperCAmelCase=30 , __UpperCAmelCase=400 , __UpperCAmelCase=3 , ):
'''simple docstring'''
__UpperCamelCase = parent
__UpperCamelCase = do_resize
__UpperCamelCase = size if size is not None else {'shortest_edge': 288}
__UpperCamelCase = size_divisor
__UpperCamelCase = do_rescale
__UpperCamelCase = rescale_factor
__UpperCamelCase = do_normalize
__UpperCamelCase = do_center_crop
__UpperCamelCase = image_mean
__UpperCamelCase = image_std
__UpperCamelCase = do_pad
__UpperCamelCase = batch_size
__UpperCamelCase = num_channels
__UpperCamelCase = min_resolution
__UpperCamelCase = max_resolution
def UpperCAmelCase ( self ):
'''simple docstring'''
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"size_divisor": self.size_divisor,
}
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase=False ):
'''simple docstring'''
if not batched:
__UpperCamelCase = self.size['shortest_edge']
__UpperCamelCase = image_inputs[0]
if isinstance(__UpperCAmelCase , Image.Image ):
__UpperCamelCase , __UpperCamelCase = image.size
else:
__UpperCamelCase , __UpperCamelCase = image.shape[1], image.shape[2]
__UpperCamelCase = size / min(__UpperCAmelCase , __UpperCAmelCase )
if h < w:
__UpperCamelCase , __UpperCamelCase = size, scale * w
else:
__UpperCamelCase , __UpperCamelCase = scale * h, size
__UpperCamelCase = int((1333 / 800) * size )
if max(__UpperCAmelCase , __UpperCAmelCase ) > max_size:
__UpperCamelCase = max_size / max(__UpperCAmelCase , __UpperCAmelCase )
__UpperCamelCase = newh * scale
__UpperCamelCase = neww * scale
__UpperCamelCase , __UpperCamelCase = int(newh + 0.5 ), int(neww + 0.5 )
__UpperCamelCase , __UpperCamelCase = (
newh // self.size_divisor * self.size_divisor,
neww // self.size_divisor * self.size_divisor,
)
else:
__UpperCamelCase = []
for image in image_inputs:
__UpperCamelCase , __UpperCamelCase = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
__UpperCamelCase = max(__UpperCAmelCase , key=lambda __UpperCAmelCase : item[0] )[0]
__UpperCamelCase = max(__UpperCAmelCase , key=lambda __UpperCAmelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
lowercase = BridgeTowerImageProcessor if is_vision_available() else None
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = BridgeTowerImageProcessingTester(self )
@property
def UpperCAmelCase ( self ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__UpperCAmelCase , 'image_mean' ) )
self.assertTrue(hasattr(__UpperCAmelCase , 'image_std' ) )
self.assertTrue(hasattr(__UpperCAmelCase , 'do_normalize' ) )
self.assertTrue(hasattr(__UpperCAmelCase , 'do_resize' ) )
self.assertTrue(hasattr(__UpperCAmelCase , 'size' ) )
self.assertTrue(hasattr(__UpperCAmelCase , 'size_divisor' ) )
def UpperCAmelCase ( self ):
'''simple docstring'''
pass
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCAmelCase , Image.Image )
# Test not batched input
__UpperCamelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
__UpperCamelCase , __UpperCamelCase = self.image_processor_tester.get_expected_values(__UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__UpperCamelCase = image_processing(__UpperCAmelCase , return_tensors='pt' ).pixel_values
__UpperCamelCase , __UpperCamelCase = self.image_processor_tester.get_expected_values(__UpperCAmelCase , batched=__UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCAmelCase , numpify=__UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCAmelCase , np.ndarray )
# Test not batched input
__UpperCamelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
__UpperCamelCase , __UpperCamelCase = self.image_processor_tester.get_expected_values(__UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__UpperCamelCase = image_processing(__UpperCAmelCase , return_tensors='pt' ).pixel_values
__UpperCamelCase , __UpperCamelCase = self.image_processor_tester.get_expected_values(__UpperCAmelCase , batched=__UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCAmelCase , torchify=__UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCAmelCase , torch.Tensor )
# Test not batched input
__UpperCamelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
__UpperCamelCase , __UpperCamelCase = self.image_processor_tester.get_expected_values(__UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__UpperCamelCase = image_processing(__UpperCAmelCase , return_tensors='pt' ).pixel_values
__UpperCamelCase , __UpperCamelCase = self.image_processor_tester.get_expected_values(__UpperCAmelCase , batched=__UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
| 293
| 0
|
"""simple docstring"""
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel
from diffusers import DDIMScheduler, LDMPipeline, UNetaDModel, VQModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class A_ ( unittest.TestCase ):
@property
def _lowercase ( self: Dict ):
'''simple docstring'''
torch.manual_seed(0 )
_lowerCamelCase : Optional[Any] = UNetaDModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=3 ,out_channels=3 ,down_block_types=("DownBlock2D", "AttnDownBlock2D") ,up_block_types=("AttnUpBlock2D", "UpBlock2D") ,)
return model
@property
def _lowercase ( self: Dict ):
'''simple docstring'''
torch.manual_seed(0 )
_lowerCamelCase : Union[str, Any] = VQModel(
block_out_channels=[32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] ,up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] ,latent_channels=3 ,)
return model
@property
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
torch.manual_seed(0 )
_lowerCamelCase : List[str] = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1e-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1_000 ,)
return CLIPTextModel(__lowerCAmelCase )
def _lowercase ( self: List[str] ):
'''simple docstring'''
_lowerCamelCase : int = self.dummy_uncond_unet
_lowerCamelCase : Dict = DDIMScheduler()
_lowerCamelCase : List[Any] = self.dummy_vq_model
_lowerCamelCase : List[str] = LDMPipeline(unet=__lowerCAmelCase ,vqvae=__lowerCAmelCase ,scheduler=__lowerCAmelCase )
ldm.to(__lowerCAmelCase )
ldm.set_progress_bar_config(disable=__lowerCAmelCase )
_lowerCamelCase : List[Any] = torch.manual_seed(0 )
_lowerCamelCase : Optional[Any] = ldm(generator=__lowerCAmelCase ,num_inference_steps=2 ,output_type="numpy" ).images
_lowerCamelCase : Tuple = torch.manual_seed(0 )
_lowerCamelCase : List[str] = ldm(generator=__lowerCAmelCase ,num_inference_steps=2 ,output_type="numpy" ,return_dict=__lowerCAmelCase )[0]
_lowerCamelCase : Tuple = image[0, -3:, -3:, -1]
_lowerCamelCase : List[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_lowerCamelCase : Any = np.array([0.85_12, 0.8_18, 0.64_11, 0.68_08, 0.44_65, 0.56_18, 0.46, 0.62_31, 0.51_72] )
_lowerCamelCase : Union[str, Any] = 1e-2 if torch_device != "mps" else 3e-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < tolerance
@slow
@require_torch
class A_ ( unittest.TestCase ):
def _lowercase ( self: Dict ):
'''simple docstring'''
_lowerCamelCase : Optional[Any] = LDMPipeline.from_pretrained("CompVis/ldm-celebahq-256" )
ldm.to(__lowerCAmelCase )
ldm.set_progress_bar_config(disable=__lowerCAmelCase )
_lowerCamelCase : Any = torch.manual_seed(0 )
_lowerCamelCase : Any = ldm(generator=__lowerCAmelCase ,num_inference_steps=5 ,output_type="numpy" ).images
_lowerCamelCase : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
_lowerCamelCase : Dict = np.array([0.43_99, 0.4_49_75, 0.4_68_25, 0.4_74, 0.43_59, 0.45_81, 0.4_50_95, 0.43_41, 0.44_47] )
_lowerCamelCase : List[Any] = 1e-2 if torch_device != "mps" else 3e-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
| 46
|
"""simple docstring"""
import os
import random
import sys
from . import cryptomath_module as cryptoMath # noqa: N812
from . import rabin_miller as rabinMiller # noqa: N812
def lowerCamelCase_( ) -> None:
'''simple docstring'''
print("Making key files..." )
make_key_files("rsa" , 1024 )
print("Key files generation successful." )
def lowerCamelCase_( _lowerCamelCase ) -> tuple[tuple[int, int], tuple[int, int]]:
'''simple docstring'''
print("Generating prime p..." )
_lowerCamelCase : List[str] = rabinMiller.generate_large_prime(_lowerCamelCase )
print("Generating prime q..." )
_lowerCamelCase : Tuple = rabinMiller.generate_large_prime(_lowerCamelCase )
_lowerCamelCase : Dict = p * q
print("Generating e that is relatively prime to (p - 1) * (q - 1)..." )
while True:
_lowerCamelCase : Tuple = random.randrange(2 ** (key_size - 1) , 2 ** (key_size) )
if cryptoMath.gcd(_lowerCamelCase , (p - 1) * (q - 1) ) == 1:
break
print("Calculating d that is mod inverse of e..." )
_lowerCamelCase : str = cryptoMath.find_mod_inverse(_lowerCamelCase , (p - 1) * (q - 1) )
_lowerCamelCase : Dict = (n, e)
_lowerCamelCase : Dict = (n, d)
return (public_key, private_key)
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> None:
'''simple docstring'''
if os.path.exists(F"""{name}_pubkey.txt""" ) or os.path.exists(F"""{name}_privkey.txt""" ):
print("\nWARNING:" )
print(
F"""\"{name}_pubkey.txt\" or \"{name}_privkey.txt\" already exists. \n"""
"Use a different name or delete these files and re-run this program." )
sys.exit()
_lowerCamelCase, _lowerCamelCase : Dict = generate_key(_lowerCamelCase )
print(F"""\nWriting public key to file {name}_pubkey.txt...""" )
with open(F"""{name}_pubkey.txt""" , "w" ) as out_file:
out_file.write(F"""{key_size},{public_key[0]},{public_key[1]}""" )
print(F"""Writing private key to file {name}_privkey.txt...""" )
with open(F"""{name}_privkey.txt""" , "w" ) as out_file:
out_file.write(F"""{key_size},{private_key[0]},{private_key[1]}""" )
if __name__ == "__main__":
main()
| 46
| 1
|
import sys
import turtle
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
return (pa[0] + pa[0]) / 2, (pa[1] + pa[1]) / 2
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , ):
'''simple docstring'''
my_pen.up()
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.down()
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.goto(vertexa[0] , vertexa[1] )
if depth == 0:
return
triangle(lowerCAmelCase__ , get_mid(lowerCAmelCase__ , lowerCAmelCase__ ) , get_mid(lowerCAmelCase__ , lowerCAmelCase__ ) , depth - 1 )
triangle(lowerCAmelCase__ , get_mid(lowerCAmelCase__ , lowerCAmelCase__ ) , get_mid(lowerCAmelCase__ , lowerCAmelCase__ ) , depth - 1 )
triangle(lowerCAmelCase__ , get_mid(lowerCAmelCase__ , lowerCAmelCase__ ) , get_mid(lowerCAmelCase__ , lowerCAmelCase__ ) , depth - 1 )
if __name__ == "__main__":
if len(sys.argv) != 2:
raise ValueError(
"Correct format for using this script: "
"python fractals.py <int:depth_for_fractal>"
)
lowercase__ :Dict = turtle.Turtle()
my_pen.ht()
my_pen.speed(5)
my_pen.pencolor("red")
lowercase__ :Optional[int] = [(-175, -125), (0, 175), (175, -125)] # vertices of triangle
triangle(vertices[0], vertices[1], vertices[2], int(sys.argv[1]))
| 700
|
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
return int((input_a, input_a).count(0 ) == 0 )
def UpperCamelCase ( ):
'''simple docstring'''
assert and_gate(0 , 0 ) == 0
assert and_gate(0 , 1 ) == 0
assert and_gate(1 , 0 ) == 0
assert and_gate(1 , 1 ) == 1
if __name__ == "__main__":
test_and_gate()
print(and_gate(1, 0))
print(and_gate(0, 0))
print(and_gate(0, 1))
print(and_gate(1, 1))
| 633
| 0
|
from __future__ import annotations
import requests
_snake_case = set(
'''approved_at_utc approved_by author_flair_background_color
author_flair_css_class author_flair_richtext author_flair_template_id author_fullname
author_premium can_mod_post category clicked content_categories created_utc downs
edited gilded gildings hidden hide_score is_created_from_ads_ui is_meta
is_original_content is_reddit_media_domain is_video link_flair_css_class
link_flair_richtext link_flair_text link_flair_text_color media_embed mod_reason_title
name permalink pwls quarantine saved score secure_media secure_media_embed selftext
subreddit subreddit_name_prefixed subreddit_type thumbnail title top_awarded_type
total_awards_received ups upvote_ratio url user_reports'''.split()
)
def _UpperCamelCase ( snake_case__, snake_case__ = 1, snake_case__ = "new", snake_case__ = None ) -> dict:
__UpperCAmelCase : int = wanted_data or []
if invalid_search_terms := ", ".join(sorted(set(snake_case__ ) - valid_terms ) ):
__UpperCAmelCase : Any = f'''Invalid search term: {invalid_search_terms}'''
raise ValueError(snake_case__ )
__UpperCAmelCase : int = requests.get(
f'''https://reddit.com/r/{subreddit}/{age}.json?limit={limit}''', headers={"User-agent": "A random string"}, )
if response.status_code == 429:
raise requests.HTTPError
__UpperCAmelCase : int = response.json()
if not wanted_data:
return {id_: data["data"]["children"][id_] for id_ in range(snake_case__ )}
__UpperCAmelCase : str = {}
for id_ in range(snake_case__ ):
__UpperCAmelCase : List[str] = {
item: data["data"]["children"][id_]["data"][item] for item in wanted_data
}
return data_dict
if __name__ == "__main__":
# If you get Error 429, that means you are rate limited.Try after some time
print(get_subreddit_data('''learnpython''', wanted_data=['''title''', '''url''', '''selftext''']))
| 382
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
'''facebook/s2t-wav2vec2-large-en-de''': (
'''https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/config.json'''
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech2text2
}
class _snake_case ( _lowercase ):
lowerCamelCase__: List[Any] = "speech_to_text_2"
lowerCamelCase__: Optional[Any] = ["past_key_values"]
lowerCamelCase__: str = {"num_attention_heads": "decoder_attention_heads", "hidden_size": "d_model"}
def __init__( self: List[str] , __lowerCamelCase: Union[str, Any]=1_00_00 , __lowerCamelCase: str=6 , __lowerCamelCase: Dict=20_48 , __lowerCamelCase: Optional[int]=4 , __lowerCamelCase: Tuple=0.0 , __lowerCamelCase: Dict=True , __lowerCamelCase: Dict="relu" , __lowerCamelCase: Optional[int]=2_56 , __lowerCamelCase: Tuple=0.1 , __lowerCamelCase: Any=0.0 , __lowerCamelCase: List[Any]=0.0 , __lowerCamelCase: Optional[Any]=0.02 , __lowerCamelCase: Tuple=2 , __lowerCamelCase: int=True , __lowerCamelCase: Dict=1 , __lowerCamelCase: Tuple=0 , __lowerCamelCase: Tuple=2 , __lowerCamelCase: Union[str, Any]=10_24 , **__lowerCamelCase: List[Any] , ) -> List[Any]:
__UpperCAmelCase : List[str] = vocab_size
__UpperCAmelCase : Optional[int] = d_model
__UpperCAmelCase : Optional[int] = decoder_ffn_dim
__UpperCAmelCase : List[Any] = decoder_layers
__UpperCAmelCase : int = decoder_attention_heads
__UpperCAmelCase : List[str] = dropout
__UpperCAmelCase : Optional[Any] = attention_dropout
__UpperCAmelCase : List[str] = activation_dropout
__UpperCAmelCase : Any = activation_function
__UpperCAmelCase : List[Any] = init_std
__UpperCAmelCase : List[str] = decoder_layerdrop
__UpperCAmelCase : int = use_cache
__UpperCAmelCase : Tuple = decoder_layers
__UpperCAmelCase : List[Any] = scale_embedding # scale factor will be sqrt(d_model) if True
__UpperCAmelCase : str = max_target_positions
super().__init__(
pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , decoder_start_token_id=__lowerCamelCase , **__lowerCamelCase , )
| 382
| 1
|
def __UpperCAmelCase ( __a : str ) -> bool:
"""simple docstring"""
_a : List[Any] = [int(__a ) for i in ip_va_address.split('''.''' ) if i.isdigit()]
return len(__a ) == 4 and all(0 <= int(__a ) <= 254 for octet in octets )
if __name__ == "__main__":
a__ = input().strip()
a__ = '''valid''' if is_ip_va_address_valid(ip) else '''invalid'''
print(f'''{ip} is a {valid_or_invalid} IP v4 address.''')
| 578
|
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
a__ = logging.get_logger(__name__)
a__ = {
'''EleutherAI/gpt-j-6B''': '''https://huggingface.co/EleutherAI/gpt-j-6B/resolve/main/config.json''',
# See all GPT-J models at https://huggingface.co/models?filter=gpt_j
}
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
UpperCAmelCase__ : List[Any] = "gptj"
UpperCAmelCase__ : Union[str, Any] = {
"max_position_embeddings": "n_positions",
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self , _a=5_0_4_0_0 , _a=2_0_4_8 , _a=4_0_9_6 , _a=2_8 , _a=1_6 , _a=6_4 , _a=None , _a="gelu_new" , _a=0.0 , _a=0.0 , _a=0.0 , _a=1e-5 , _a=0.02 , _a=True , _a=5_0_2_5_6 , _a=5_0_2_5_6 , _a=False , **_a , ) -> str:
_a : Any = vocab_size
_a : str = n_positions
_a : Union[str, Any] = n_embd
_a : Tuple = n_layer
_a : int = n_head
_a : List[str] = n_inner
_a : List[str] = rotary_dim
_a : Optional[int] = activation_function
_a : List[str] = resid_pdrop
_a : List[str] = embd_pdrop
_a : Optional[Any] = attn_pdrop
_a : Union[str, Any] = layer_norm_epsilon
_a : Optional[Any] = initializer_range
_a : Tuple = use_cache
_a : Union[str, Any] = bos_token_id
_a : Tuple = eos_token_id
super().__init__(
bos_token_id=_a , eos_token_id=_a , tie_word_embeddings=_a , **_a )
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
def __init__( self , _a , _a = "default" , _a = None , _a = False , ) -> List[str]:
super().__init__(_a , task=_a , patching_specs=_a , use_past=_a )
if not getattr(self._config , '''pad_token_id''' , _a ):
# TODO: how to do that better?
_a : Optional[int] = 0
@property
def __lowercase ( self ) -> Mapping[str, Mapping[int, str]]:
_a : List[str] = OrderedDict({'''input_ids''': {0: '''batch''', 1: '''sequence'''}} )
if self.use_past:
self.fill_with_past_key_values_(_a , direction='''inputs''' )
_a : Optional[int] = {0: '''batch''', 1: '''past_sequence + sequence'''}
else:
_a : List[Any] = {0: '''batch''', 1: '''sequence'''}
return common_inputs
@property
def __lowercase ( self ) -> int:
return self._config.n_layer
@property
def __lowercase ( self ) -> int:
return self._config.n_head
def __lowercase ( self , _a , _a = -1 , _a = -1 , _a = False , _a = None , ) -> Mapping[str, Any]:
_a : str = super(_a , self ).generate_dummy_inputs(
_a , batch_size=_a , seq_length=_a , is_pair=_a , framework=_a )
# We need to order the input in the way they appears in the forward()
_a : List[Any] = OrderedDict({'''input_ids''': common_inputs['''input_ids''']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
_a , _a : Union[str, Any] = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
_a : Dict = seqlen + 2
_a : Optional[Any] = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
_a : Union[str, Any] = [
(torch.zeros(_a ), torch.zeros(_a )) for _ in range(self.num_layers )
]
_a : Any = common_inputs['''attention_mask''']
if self.use_past:
_a : str = ordered_inputs['''attention_mask'''].dtype
_a : Optional[int] = torch.cat(
[ordered_inputs['''attention_mask'''], torch.ones(_a , _a , dtype=_a )] , dim=1 )
return ordered_inputs
@property
def __lowercase ( self ) -> int:
return 1_3
| 578
| 1
|
import os
import torch
from ..logging import get_logger
from .constants import FSDP_PYTORCH_VERSION, MODEL_NAME, OPTIMIZER_NAME
from .versions import is_torch_version
if is_torch_version('''>=''', FSDP_PYTORCH_VERSION):
import torch.distributed.checkpoint as dist_cp
from torch.distributed.checkpoint.default_planner import DefaultLoadPlanner, DefaultSavePlanner
from torch.distributed.checkpoint.optimizer import load_sharded_optimizer_state_dict
from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
UpperCAmelCase__ : Dict = get_logger(__name__)
def _lowercase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=0 ) -> Union[str, Any]:
os.makedirs(__SCREAMING_SNAKE_CASE , exist_ok=__SCREAMING_SNAKE_CASE )
with FSDP.state_dict_type(
__SCREAMING_SNAKE_CASE , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
UpperCamelCase__ : Optional[int] = model.state_dict()
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
UpperCamelCase__ : Dict = F"""{MODEL_NAME}.bin""" if model_index == 0 else F"""{MODEL_NAME}_{model_index}.bin"""
UpperCamelCase__ : int = os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if accelerator.process_index == 0:
logger.info(F"""Saving model to {output_model_file}""" )
torch.save(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
logger.info(F"""Model saved to {output_model_file}""" )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
UpperCamelCase__ : List[Any] = (
F"""{MODEL_NAME}_rank{accelerator.process_index}.bin"""
if model_index == 0
else F"""{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin"""
)
UpperCamelCase__ : List[Any] = os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
logger.info(F"""Saving model to {output_model_file}""" )
torch.save(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
logger.info(F"""Model saved to {output_model_file}""" )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
UpperCamelCase__ : int = os.path.join(__SCREAMING_SNAKE_CASE , F"""{MODEL_NAME}_{model_index}""" )
os.makedirs(__SCREAMING_SNAKE_CASE , exist_ok=__SCREAMING_SNAKE_CASE )
logger.info(F"""Saving model to {ckpt_dir}""" )
UpperCamelCase__ : Any = {'model': state_dict}
dist_cp.save_state_dict(
state_dict=__SCREAMING_SNAKE_CASE , storage_writer=dist_cp.FileSystemWriter(__SCREAMING_SNAKE_CASE ) , planner=DefaultSavePlanner() , )
logger.info(F"""Model saved to {ckpt_dir}""" )
def _lowercase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=0 ) -> List[Any]:
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
__SCREAMING_SNAKE_CASE , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if type(__SCREAMING_SNAKE_CASE ) != FSDP and accelerator.process_index != 0:
if not fsdp_plugin.sync_module_states:
raise ValueError(
'Set the `sync_module_states` flag to `True` so that model states are synced across processes when '
'initializing FSDP object' )
return
UpperCamelCase__ : List[str] = F"""{MODEL_NAME}.bin""" if model_index == 0 else F"""{MODEL_NAME}_{model_index}.bin"""
UpperCamelCase__ : Optional[int] = os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
logger.info(F"""Loading model from {input_model_file}""" )
UpperCamelCase__ : Any = torch.load(__SCREAMING_SNAKE_CASE )
logger.info(F"""Model loaded from {input_model_file}""" )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
UpperCamelCase__ : List[str] = (
F"""{MODEL_NAME}_rank{accelerator.process_index}.bin"""
if model_index == 0
else F"""{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin"""
)
UpperCamelCase__ : Optional[int] = os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
logger.info(F"""Loading model from {input_model_file}""" )
UpperCamelCase__ : Optional[int] = torch.load(__SCREAMING_SNAKE_CASE )
logger.info(F"""Model loaded from {input_model_file}""" )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
UpperCamelCase__ : List[Any] = (
os.path.join(__SCREAMING_SNAKE_CASE , F"""{MODEL_NAME}_{model_index}""" )
if F"""{MODEL_NAME}""" not in input_dir
else input_dir
)
logger.info(F"""Loading model from {ckpt_dir}""" )
UpperCamelCase__ : Any = {'model': model.state_dict()}
dist_cp.load_state_dict(
state_dict=__SCREAMING_SNAKE_CASE , storage_reader=dist_cp.FileSystemReader(__SCREAMING_SNAKE_CASE ) , planner=DefaultLoadPlanner() , )
UpperCamelCase__ : List[str] = state_dict['model']
logger.info(F"""Model loaded from {ckpt_dir}""" )
model.load_state_dict(__SCREAMING_SNAKE_CASE )
def _lowercase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=0 ) -> Any:
os.makedirs(__SCREAMING_SNAKE_CASE , exist_ok=__SCREAMING_SNAKE_CASE )
with FSDP.state_dict_type(
__SCREAMING_SNAKE_CASE , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
UpperCamelCase__ : Dict = FSDP.optim_state_dict(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if accelerator.process_index == 0:
UpperCamelCase__ : Tuple = (
F"""{OPTIMIZER_NAME}.bin""" if optimizer_index == 0 else F"""{OPTIMIZER_NAME}_{optimizer_index}.bin"""
)
UpperCamelCase__ : List[Any] = os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
logger.info(F"""Saving Optimizer state to {output_optimizer_file}""" )
torch.save(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
logger.info(F"""Optimizer state saved in {output_optimizer_file}""" )
else:
UpperCamelCase__ : Union[str, Any] = os.path.join(__SCREAMING_SNAKE_CASE , F"""{OPTIMIZER_NAME}_{optimizer_index}""" )
os.makedirs(__SCREAMING_SNAKE_CASE , exist_ok=__SCREAMING_SNAKE_CASE )
logger.info(F"""Saving Optimizer state to {ckpt_dir}""" )
dist_cp.save_state_dict(
state_dict={'optimizer': optim_state} , storage_writer=dist_cp.FileSystemWriter(__SCREAMING_SNAKE_CASE ) , planner=DefaultSavePlanner() , )
logger.info(F"""Optimizer state saved in {ckpt_dir}""" )
def _lowercase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=0 ) -> List[str]:
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
__SCREAMING_SNAKE_CASE , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
UpperCamelCase__ : Optional[Any] = None
# below check should work but currently it isn't working (mostly opytorch issue),
# in the meantime disabling it at the cost of excess memory usage
# if accelerator.process_index == 0 or not fsdp_plugin.optim_state_dict_config.rank0_only:
UpperCamelCase__ : int = (
F"""{OPTIMIZER_NAME}.bin""" if optimizer_index == 0 else F"""{OPTIMIZER_NAME}_{optimizer_index}.bin"""
)
UpperCamelCase__ : Tuple = os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
logger.info(F"""Loading Optimizer state from {input_optimizer_file}""" )
UpperCamelCase__ : Union[str, Any] = torch.load(__SCREAMING_SNAKE_CASE )
logger.info(F"""Optimizer state loaded from {input_optimizer_file}""" )
else:
UpperCamelCase__ : Union[str, Any] = (
os.path.join(__SCREAMING_SNAKE_CASE , F"""{OPTIMIZER_NAME}_{optimizer_index}""" )
if F"""{OPTIMIZER_NAME}""" not in input_dir
else input_dir
)
logger.info(F"""Loading Optimizer from {ckpt_dir}""" )
UpperCamelCase__ : List[str] = load_sharded_optimizer_state_dict(
model_state_dict=model.state_dict() , optimizer_key='optimizer' , storage_reader=dist_cp.FileSystemReader(__SCREAMING_SNAKE_CASE ) , )
UpperCamelCase__ : Optional[Any] = optim_state['optimizer']
logger.info(F"""Optimizer loaded from {ckpt_dir}""" )
UpperCamelCase__ : Any = FSDP.optim_state_dict_to_load(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
optimizer.load_state_dict(__SCREAMING_SNAKE_CASE )
| 410
|
from __future__ import annotations
import time
from collections.abc import Sequence
from random import randint
from matplotlib import pyplot as plt
def _lowercase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> tuple[int | None, int | None, float]:
if not arr:
return None, None, 0
if low == high:
return low, high, arr[low]
UpperCamelCase__ : List[str] = (low + high) // 2
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ : int = max_subarray(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ : List[str] = max_subarray(__SCREAMING_SNAKE_CASE , mid + 1 , __SCREAMING_SNAKE_CASE )
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ : Optional[Any] = max_cross_sum(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if left_sum >= right_sum and left_sum >= cross_sum:
return left_low, left_high, left_sum
elif right_sum >= left_sum and right_sum >= cross_sum:
return right_low, right_high, right_sum
return cross_left, cross_right, cross_sum
def _lowercase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> tuple[int, int, float]:
UpperCamelCase__ , UpperCamelCase__ : Tuple = float('-inf' ), -1
UpperCamelCase__ , UpperCamelCase__ : List[Any] = float('-inf' ), -1
UpperCamelCase__ : int | float = 0
for i in range(__SCREAMING_SNAKE_CASE , low - 1 , -1 ):
summ += arr[i]
if summ > left_sum:
UpperCamelCase__ : str = summ
UpperCamelCase__ : str = i
UpperCamelCase__ : int = 0
for i in range(mid + 1 , high + 1 ):
summ += arr[i]
if summ > right_sum:
UpperCamelCase__ : Optional[Any] = summ
UpperCamelCase__ : Any = i
return max_left, max_right, (left_sum + right_sum)
def _lowercase ( __SCREAMING_SNAKE_CASE ) -> float:
UpperCamelCase__ : List[str] = [randint(1 , __SCREAMING_SNAKE_CASE ) for _ in range(__SCREAMING_SNAKE_CASE )]
UpperCamelCase__ : Optional[Any] = time.time()
max_subarray(__SCREAMING_SNAKE_CASE , 0 , input_size - 1 )
UpperCamelCase__ : Dict = time.time()
return end - start
def _lowercase ( ) -> None:
UpperCamelCase__ : str = [10, 100, 1000, 1_0000, 5_0000, 10_0000, 20_0000, 30_0000, 40_0000, 50_0000]
UpperCamelCase__ : int = [time_max_subarray(__SCREAMING_SNAKE_CASE ) for input_size in input_sizes]
print('No of Inputs\t\tTime Taken' )
for input_size, runtime in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
print(__SCREAMING_SNAKE_CASE , '\t\t' , __SCREAMING_SNAKE_CASE )
plt.plot(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
plt.xlabel('Number of Inputs' )
plt.ylabel('Time taken in seconds' )
plt.show()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 410
| 1
|
'''simple docstring'''
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import MaskaFormerConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel
if is_vision_available():
from transformers import MaskaFormerImageProcessor
if is_vision_available():
from PIL import Image
class SCREAMING_SNAKE_CASE:
def __init__( self , lowerCamelCase__ , lowerCamelCase__=2 , lowerCamelCase__=True , lowerCamelCase__=False , lowerCamelCase__=10 , lowerCamelCase__=3 , lowerCamelCase__=32 * 8 , lowerCamelCase__=32 * 8 , lowerCamelCase__=4 , lowerCamelCase__=64 , ) -> List[str]:
"""simple docstring"""
__lowercase = parent
__lowercase = batch_size
__lowercase = is_training
__lowercase = use_auxiliary_loss
__lowercase = num_queries
__lowercase = num_channels
__lowercase = min_size
__lowercase = max_size
__lowercase = num_labels
__lowercase = hidden_dim
__lowercase = hidden_dim
def snake_case__ ( self ) -> Tuple:
"""simple docstring"""
__lowercase = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
lowerCamelCase__ )
__lowercase = torch.ones([self.batch_size, self.min_size, self.max_size] , device=lowerCamelCase__ )
__lowercase = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=lowerCamelCase__ ) > 0.5
).float()
__lowercase = (torch.rand((self.batch_size, self.num_labels) , device=lowerCamelCase__ ) > 0.5).long()
__lowercase = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def snake_case__ ( self ) -> str:
"""simple docstring"""
__lowercase = MaskaFormerConfig(
hidden_size=self.hidden_dim , )
__lowercase = self.num_queries
__lowercase = self.num_labels
__lowercase = [1, 1, 1, 1]
__lowercase = self.num_channels
__lowercase = 64
__lowercase = 128
__lowercase = self.hidden_dim
__lowercase = self.hidden_dim
__lowercase = self.hidden_dim
return config
def snake_case__ ( self ) -> int:
"""simple docstring"""
__lowercase ,__lowercase ,__lowercase ,__lowercase ,__lowercase = self.prepare_config_and_inputs()
__lowercase = {"""pixel_values""": pixel_values, """pixel_mask""": pixel_mask}
return config, inputs_dict
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ ) -> List[Any]:
"""simple docstring"""
__lowercase = output.encoder_hidden_states
__lowercase = output.pixel_decoder_hidden_states
__lowercase = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(lowerCamelCase__ ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(lowerCamelCase__ ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(lowerCamelCase__ ) , config.decoder_layers )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=False ) -> Union[str, Any]:
"""simple docstring"""
with torch.no_grad():
__lowercase = MaskaFormerModel(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
__lowercase = model(pixel_values=lowerCamelCase__ , pixel_mask=lowerCamelCase__ )
__lowercase = model(lowerCamelCase__ , output_hidden_states=lowerCamelCase__ )
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.hidden_dim) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(lowerCamelCase__ , lowerCamelCase__ )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> List[Any]:
"""simple docstring"""
__lowercase = MaskaFormerForUniversalSegmentation(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
def comm_check_on_output(lowerCamelCase__ ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
__lowercase = model(pixel_values=lowerCamelCase__ , pixel_mask=lowerCamelCase__ )
__lowercase = model(lowerCamelCase__ )
comm_check_on_output(lowerCamelCase__ )
__lowercase = model(
pixel_values=lowerCamelCase__ , pixel_mask=lowerCamelCase__ , mask_labels=lowerCamelCase__ , class_labels=lowerCamelCase__ )
comm_check_on_output(lowerCamelCase__ )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class SCREAMING_SNAKE_CASE( __A , __A , unittest.TestCase ):
snake_case_ : Union[str, Any] = (MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else ()
snake_case_ : Any = {"""feature-extraction""": MaskaFormerModel} if is_torch_available() else {}
snake_case_ : Union[str, Any] = False
snake_case_ : List[str] = False
snake_case_ : Optional[Any] = False
snake_case_ : Optional[int] = False
def snake_case__ ( self ) -> str:
"""simple docstring"""
__lowercase = MaskaFormerModelTester(self )
__lowercase = ConfigTester(self , config_class=lowerCamelCase__ , has_text_modality=lowerCamelCase__ )
def snake_case__ ( self ) -> int:
"""simple docstring"""
self.config_tester.run_common_tests()
def snake_case__ ( self ) -> Dict:
"""simple docstring"""
__lowercase ,__lowercase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(lowerCamelCase__ , **lowerCamelCase__ , output_hidden_states=lowerCamelCase__ )
def snake_case__ ( self ) -> str:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*lowerCamelCase__ )
@unittest.skip(reason="""Mask2Former does not use inputs_embeds""" )
def snake_case__ ( self ) -> Dict:
"""simple docstring"""
pass
@unittest.skip(reason="""Mask2Former does not have a get_input_embeddings method""" )
def snake_case__ ( self ) -> int:
"""simple docstring"""
pass
@unittest.skip(reason="""Mask2Former is not a generative model""" )
def snake_case__ ( self ) -> List[str]:
"""simple docstring"""
pass
@unittest.skip(reason="""Mask2Former does not use token embeddings""" )
def snake_case__ ( self ) -> List[Any]:
"""simple docstring"""
pass
@require_torch_multi_gpu
@unittest.skip(
reason="""Mask2Former has some layers using `add_module` which doesn't work well with `nn.DataParallel`""" )
def snake_case__ ( self ) -> Dict:
"""simple docstring"""
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def snake_case__ ( self ) -> List[str]:
"""simple docstring"""
pass
def snake_case__ ( self ) -> List[Any]:
"""simple docstring"""
__lowercase ,__lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = model_class(lowerCamelCase__ )
__lowercase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowercase = [*signature.parameters.keys()]
__lowercase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowerCamelCase__ )
@slow
def snake_case__ ( self ) -> List[str]:
"""simple docstring"""
for model_name in ["facebook/mask2former-swin-small-coco-instance"]:
__lowercase = MaskaFormerModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
def snake_case__ ( self ) -> str:
"""simple docstring"""
__lowercase = (self.model_tester.min_size,) * 2
__lowercase = {
"""pixel_values""": torch.randn((2, 3, *size) , device=lowerCamelCase__ ),
"""mask_labels""": torch.randn((2, 10, *size) , device=lowerCamelCase__ ),
"""class_labels""": torch.zeros(2 , 10 , device=lowerCamelCase__ ).long(),
}
__lowercase = self.model_tester.get_config()
__lowercase = MaskaFormerForUniversalSegmentation(lowerCamelCase__ ).to(lowerCamelCase__ )
__lowercase = model(**lowerCamelCase__ )
self.assertTrue(outputs.loss is not None )
def snake_case__ ( self ) -> int:
"""simple docstring"""
__lowercase ,__lowercase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(lowerCamelCase__ , **lowerCamelCase__ , output_hidden_states=lowerCamelCase__ )
def snake_case__ ( self ) -> Optional[int]:
"""simple docstring"""
__lowercase ,__lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = model_class(lowerCamelCase__ ).to(lowerCamelCase__ )
__lowercase = model(**lowerCamelCase__ , output_attentions=lowerCamelCase__ )
self.assertTrue(outputs.attentions is not None )
def snake_case__ ( self ) -> Dict:
"""simple docstring"""
if not self.model_tester.is_training:
return
__lowercase = self.all_model_classes[1]
__lowercase ,__lowercase ,__lowercase ,__lowercase ,__lowercase = self.model_tester.prepare_config_and_inputs()
__lowercase = model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.train()
__lowercase = model(lowerCamelCase__ , mask_labels=lowerCamelCase__ , class_labels=lowerCamelCase__ ).loss
loss.backward()
def snake_case__ ( self ) -> Tuple:
"""simple docstring"""
__lowercase = self.all_model_classes[1]
__lowercase ,__lowercase ,__lowercase ,__lowercase ,__lowercase = self.model_tester.prepare_config_and_inputs()
__lowercase = True
__lowercase = True
__lowercase = model_class(lowerCamelCase__ ).to(lowerCamelCase__ )
model.train()
__lowercase = model(lowerCamelCase__ , mask_labels=lowerCamelCase__ , class_labels=lowerCamelCase__ )
__lowercase = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
__lowercase = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
__lowercase = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
__lowercase = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=lowerCamelCase__ )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
A : Any = 1E-4
def snake_case_ ( ):
"""simple docstring"""
__lowercase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_vision
@slow
class SCREAMING_SNAKE_CASE( unittest.TestCase ):
@cached_property
def snake_case__ ( self ) -> Dict:
"""simple docstring"""
return "facebook/mask2former-swin-small-coco-instance"
@cached_property
def snake_case__ ( self ) -> str:
"""simple docstring"""
return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints ) if is_vision_available() else None
def snake_case__ ( self ) -> List[str]:
"""simple docstring"""
__lowercase = MaskaFormerModel.from_pretrained(self.model_checkpoints ).to(lowerCamelCase__ )
__lowercase = self.default_image_processor
__lowercase = prepare_img()
__lowercase = image_processor(lowerCamelCase__ , return_tensors="""pt""" ).to(lowerCamelCase__ )
__lowercase = inputs["""pixel_values"""].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(lowerCamelCase__ , (1, 3, 384, 384) )
with torch.no_grad():
__lowercase = model(**lowerCamelCase__ )
__lowercase = torch.tensor(
[[-0.27_90, -1.07_17, -1.16_68], [-0.51_28, -0.31_28, -0.49_87], [-0.58_32, 0.19_71, -0.01_97]] ).to(lowerCamelCase__ )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) )
__lowercase = torch.tensor(
[[0.89_73, 1.18_47, 1.17_76], [1.19_34, 1.50_40, 1.51_28], [1.11_53, 1.44_86, 1.49_51]] ).to(lowerCamelCase__ )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) )
__lowercase = torch.tensor(
[[2.11_52, 1.70_00, -0.86_03], [1.58_08, 1.80_04, -0.93_53], [1.60_43, 1.74_95, -0.59_99]] ).to(lowerCamelCase__ )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) )
def snake_case__ ( self ) -> int:
"""simple docstring"""
__lowercase = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(lowerCamelCase__ ).eval()
__lowercase = self.default_image_processor
__lowercase = prepare_img()
__lowercase = image_processor(lowerCamelCase__ , return_tensors="""pt""" ).to(lowerCamelCase__ )
__lowercase = inputs["""pixel_values"""].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(lowerCamelCase__ , (1, 3, 384, 384) )
with torch.no_grad():
__lowercase = model(**lowerCamelCase__ )
# masks_queries_logits
__lowercase = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) )
__lowercase = [
[-8.78_39, -9.00_56, -8.81_21],
[-7.41_04, -7.03_13, -6.54_01],
[-6.61_05, -6.34_27, -6.46_75],
]
__lowercase = torch.tensor(lowerCamelCase__ ).to(lowerCamelCase__ )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) )
# class_queries_logits
__lowercase = outputs.class_queries_logits
self.assertEqual(class_queries_logits.shape , (1, model.config.num_queries, model.config.num_labels + 1) )
__lowercase = torch.tensor(
[
[1.83_24, -8.08_35, -4.19_22],
[0.84_50, -9.00_50, -3.60_53],
[0.30_45, -7.72_93, -3.02_75],
] ).to(lowerCamelCase__ )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) )
def snake_case__ ( self ) -> Any:
"""simple docstring"""
__lowercase = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(lowerCamelCase__ ).eval()
__lowercase = self.default_image_processor
__lowercase = image_processor(
[np.zeros((3, 800, 1333) ), np.zeros((3, 800, 1333) )] , segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] , return_tensors="""pt""" , )
__lowercase = inputs["""pixel_values"""].to(lowerCamelCase__ )
__lowercase = [el.to(lowerCamelCase__ ) for el in inputs["""mask_labels"""]]
__lowercase = [el.to(lowerCamelCase__ ) for el in inputs["""class_labels"""]]
with torch.no_grad():
__lowercase = model(**lowerCamelCase__ )
self.assertTrue(outputs.loss is not None )
| 708
|
'''simple docstring'''
import unittest
from transformers import BertGenerationTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
A : Tuple = """▁"""
A : List[Any] = get_tests_dir("""fixtures/test_sentencepiece.model""")
@require_sentencepiece
class SCREAMING_SNAKE_CASE( __A , unittest.TestCase ):
snake_case_ : Optional[int] = BertGenerationTokenizer
snake_case_ : List[str] = False
snake_case_ : Optional[int] = True
def snake_case__ ( self ) -> Optional[Any]:
"""simple docstring"""
super().setUp()
__lowercase = BertGenerationTokenizer(lowerCamelCase__ , keep_accents=lowerCamelCase__ )
tokenizer.save_pretrained(self.tmpdirname )
def snake_case__ ( self ) -> List[str]:
"""simple docstring"""
__lowercase = """<s>"""
__lowercase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCamelCase__ ) , lowerCamelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCamelCase__ ) , lowerCamelCase__ )
def snake_case__ ( self ) -> List[Any]:
"""simple docstring"""
__lowercase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<unk>""" )
self.assertEqual(vocab_keys[1] , """<s>""" )
self.assertEqual(vocab_keys[-1] , """<pad>""" )
self.assertEqual(len(lowerCamelCase__ ) , 1002 )
def snake_case__ ( self ) -> Dict:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1000 )
def snake_case__ ( self ) -> Optional[int]:
"""simple docstring"""
__lowercase = BertGenerationTokenizer(lowerCamelCase__ , keep_accents=lowerCamelCase__ )
__lowercase = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(lowerCamelCase__ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCamelCase__ ) , [285, 46, 10, 170, 382] , )
__lowercase = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
lowerCamelCase__ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
__lowercase = tokenizer.convert_tokens_to_ids(lowerCamelCase__ )
self.assertListEqual(
lowerCamelCase__ , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
__lowercase = tokenizer.convert_ids_to_tokens(lowerCamelCase__ )
self.assertListEqual(
lowerCamelCase__ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
@cached_property
def snake_case__ ( self ) -> Any:
"""simple docstring"""
return BertGenerationTokenizer.from_pretrained("""google/bert_for_seq_generation_L-24_bbc_encoder""" )
@slow
def snake_case__ ( self ) -> Tuple:
"""simple docstring"""
__lowercase = """Hello World!"""
__lowercase = [1_8536, 2260, 101]
self.assertListEqual(lowerCamelCase__ , self.big_tokenizer.encode(lowerCamelCase__ ) )
@slow
def snake_case__ ( self ) -> Any:
"""simple docstring"""
__lowercase = (
"""This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"""
""" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"""
)
__lowercase = [
871,
419,
358,
946,
991,
2521,
452,
358,
1357,
387,
7751,
3536,
112,
985,
456,
126,
865,
938,
5400,
5734,
458,
1368,
467,
786,
2462,
5246,
1159,
633,
865,
4519,
457,
582,
852,
2557,
427,
916,
508,
405,
3_4324,
497,
391,
408,
1_1342,
1244,
385,
100,
938,
985,
456,
574,
362,
1_2597,
3200,
3129,
1172,
]
self.assertListEqual(lowerCamelCase__ , self.big_tokenizer.encode(lowerCamelCase__ ) )
@require_torch
@slow
def snake_case__ ( self ) -> List[Any]:
"""simple docstring"""
import torch
from transformers import BertGenerationConfig, BertGenerationEncoder
# Build sequence
__lowercase = list(self.big_tokenizer.get_vocab().keys() )[:10]
__lowercase = """ """.join(lowerCamelCase__ )
__lowercase = self.big_tokenizer.encode_plus(lowerCamelCase__ , return_tensors="""pt""" , return_token_type_ids=lowerCamelCase__ )
__lowercase = self.big_tokenizer.batch_encode_plus(
[sequence + """ """ + sequence] , return_tensors="""pt""" , return_token_type_ids=lowerCamelCase__ )
__lowercase = BertGenerationConfig()
__lowercase = BertGenerationEncoder(lowerCamelCase__ )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**lowerCamelCase__ )
model(**lowerCamelCase__ )
@slow
def snake_case__ ( self ) -> Optional[Any]:
"""simple docstring"""
__lowercase = {"""input_ids""": [[3_9286, 458, 3_6335, 2001, 456, 1_3073, 1_3266, 455, 113, 7746, 1741, 1_1157, 391, 1_3073, 1_3266, 455, 113, 3967, 3_5412, 113, 4936, 109, 3870, 2377, 113, 3_0084, 4_5720, 458, 134, 1_7496, 112, 503, 1_1672, 113, 118, 112, 5665, 1_3347, 3_8687, 112, 1496, 3_1389, 112, 3268, 4_7264, 134, 962, 112, 1_6377, 8035, 2_3130, 430, 1_2169, 1_5518, 2_8592, 458, 146, 4_1697, 109, 391, 1_2169, 1_5518, 1_6689, 458, 146, 4_1358, 109, 452, 726, 4034, 111, 763, 3_5412, 5082, 388, 1903, 111, 9051, 391, 2870, 4_8918, 1900, 1123, 550, 998, 112, 9586, 1_5985, 455, 391, 410, 2_2955, 3_7636, 114], [448, 1_7496, 419, 3663, 385, 763, 113, 2_7533, 2870, 3283, 1_3043, 1639, 2_4713, 523, 656, 2_4013, 1_8550, 2521, 517, 2_7014, 2_1244, 420, 1212, 1465, 391, 927, 4833, 388, 578, 1_1786, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [484, 2169, 7687, 2_1932, 1_8146, 726, 363, 1_7032, 3391, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCamelCase__ , model_name="""google/bert_for_seq_generation_L-24_bbc_encoder""" , revision="""c817d1fd1be2ffa69431227a1fe320544943d4db""" , )
| 163
| 0
|
from __future__ import annotations
from bisect import bisect_left
from functools import total_ordering
from heapq import merge
@total_ordering
class lowerCamelCase (SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def __lt__( self : Optional[int] , __magic_name__ : int ) -> List[Any]:
return self[-1] < other[-1]
def __eq__( self : Any , __magic_name__ : List[str] ) -> Optional[Any]:
return self[-1] == other[-1]
def a__ ( __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = []
# sort into stacks
for element in collection:
SCREAMING_SNAKE_CASE_ = Stack([element] )
SCREAMING_SNAKE_CASE_ = bisect_left(__UpperCamelCase , __UpperCamelCase )
if i != len(__UpperCamelCase ):
stacks[i].append(__UpperCamelCase )
else:
stacks.append(__UpperCamelCase )
# use a heap-based merge to merge stack efficiently
SCREAMING_SNAKE_CASE_ = merge(*(reversed(__UpperCamelCase ) for stack in stacks) )
return collection
if __name__ == "__main__":
A : Optional[int] = input("Enter numbers separated by a comma:\n").strip()
A : int = [int(item) for item in user_input.split(",")]
print(patience_sort(unsorted))
| 140
|
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, ByTaTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
A : Tuple = "pt"
elif is_tf_available():
A : Optional[int] = "tf"
else:
A : Optional[Any] = "jax"
class lowerCamelCase (SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase__ = ByTaTokenizer
lowerCamelCase__ = False
def __A ( self : Union[str, Any] ) -> List[Any]:
super().setUp()
SCREAMING_SNAKE_CASE_ = ByTaTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def __A ( self : Tuple ) -> Union[str, Any]:
return ByTaTokenizer.from_pretrained("google/byt5-small" )
def __A ( self : List[Any] , **__magic_name__ : Dict ) -> ByTaTokenizer:
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__magic_name__ )
def __A ( self : List[str] , __magic_name__ : List[Any] , __magic_name__ : Optional[int]=False , __magic_name__ : Optional[int]=20 , __magic_name__ : Any=5 ) -> Tuple[str, list]:
# XXX The default common tokenizer tests assume that every ID is decodable on its own.
# This assumption is invalid for ByT5 because single bytes might not be
# valid utf-8 (byte 128 for instance).
# Here we're overriding the smallest possible method to provide
# a clean sequence without making the same assumption.
SCREAMING_SNAKE_CASE_ = []
for i in range(len(__magic_name__ ) ):
try:
SCREAMING_SNAKE_CASE_ = tokenizer.decode([i] , clean_up_tokenization_spaces=__magic_name__ )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
SCREAMING_SNAKE_CASE_ = list(filter(lambda __magic_name__ : re.match(r"^[ a-zA-Z]+$" , t[1] ) , __magic_name__ ) )
SCREAMING_SNAKE_CASE_ = list(filter(lambda __magic_name__ : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=__magic_name__ ) , __magic_name__ ) )
if max_length is not None and len(__magic_name__ ) > max_length:
SCREAMING_SNAKE_CASE_ = toks[:max_length]
if min_length is not None and len(__magic_name__ ) < min_length and len(__magic_name__ ) > 0:
while len(__magic_name__ ) < min_length:
SCREAMING_SNAKE_CASE_ = toks + toks
# toks_str = [t[1] for t in toks]
SCREAMING_SNAKE_CASE_ = [t[0] for t in toks]
# Ensure consistency
SCREAMING_SNAKE_CASE_ = tokenizer.decode(__magic_name__ , clean_up_tokenization_spaces=__magic_name__ )
if " " not in output_txt and len(__magic_name__ ) > 1:
SCREAMING_SNAKE_CASE_ = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=__magic_name__ )
+ " "
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=__magic_name__ )
)
if with_prefix_space:
SCREAMING_SNAKE_CASE_ = " " + output_txt
SCREAMING_SNAKE_CASE_ = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ )
return output_txt, output_ids
def __A ( self : Dict ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = self.ta_base_tokenizer
SCREAMING_SNAKE_CASE_ = tokenizer(["hi</s>", "I went to the gym</s>", "</s>"] )
SCREAMING_SNAKE_CASE_ = tokenizer(["hi", "I went to the gym", ""] )
self.assertListEqual(batch_with_eos_added["input_ids"] , batch_without_eos_added["input_ids"] )
def __A ( self : List[Any] ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = self.ta_base_tokenizer
SCREAMING_SNAKE_CASE_ = "Unicode €."
SCREAMING_SNAKE_CASE_ = tokenizer(__magic_name__ )
SCREAMING_SNAKE_CASE_ = [88, 113, 108, 102, 114, 103, 104, 35, 229, 133, 175, 49, 1]
self.assertEqual(encoded["input_ids"] , __magic_name__ )
# decoding
SCREAMING_SNAKE_CASE_ = tokenizer.decode(__magic_name__ )
self.assertEqual(__magic_name__ , "Unicode €.</s>" )
SCREAMING_SNAKE_CASE_ = tokenizer("e è é ê ë" )
SCREAMING_SNAKE_CASE_ = [104, 35, 198, 171, 35, 198, 172, 35, 198, 173, 35, 198, 174, 1]
self.assertEqual(encoded["input_ids"] , __magic_name__ )
# decoding
SCREAMING_SNAKE_CASE_ = tokenizer.decode(__magic_name__ )
self.assertEqual(__magic_name__ , "e è é ê ë</s>" )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode("e è é ê ë" ) ) , "e è é ê ë</s>" )
def __A ( self : Any ) -> int:
SCREAMING_SNAKE_CASE_ = self.ta_base_tokenizer
SCREAMING_SNAKE_CASE_ = ["A long paragraph for summarization.", "Another paragraph for summarization."]
# fmt: off
SCREAMING_SNAKE_CASE_ = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 1, 0]
# fmt: on
SCREAMING_SNAKE_CASE_ = tokenizer(__magic_name__ , padding=__magic_name__ , return_tensors=__magic_name__ )
self.assertIsInstance(__magic_name__ , __magic_name__ )
if FRAMEWORK != "jax":
SCREAMING_SNAKE_CASE_ = list(batch.input_ids.numpy()[0] )
else:
SCREAMING_SNAKE_CASE_ = list(batch.input_ids.tolist()[0] )
self.assertListEqual(__magic_name__ , __magic_name__ )
self.assertEqual((2, 37) , batch.input_ids.shape )
self.assertEqual((2, 37) , batch.attention_mask.shape )
def __A ( self : List[Any] ) -> int:
SCREAMING_SNAKE_CASE_ = self.ta_base_tokenizer
SCREAMING_SNAKE_CASE_ = ["A long paragraph for summarization.", "Another paragraph for summarization."]
SCREAMING_SNAKE_CASE_ = tokenizer(__magic_name__ , padding=__magic_name__ , return_tensors=__magic_name__ )
# check if input_ids are returned and no decoder_input_ids
self.assertIn("input_ids" , __magic_name__ )
self.assertIn("attention_mask" , __magic_name__ )
self.assertNotIn("decoder_input_ids" , __magic_name__ )
self.assertNotIn("decoder_attention_mask" , __magic_name__ )
def __A ( self : List[str] ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = self.ta_base_tokenizer
SCREAMING_SNAKE_CASE_ = [
"Summary of the text.",
"Another summary.",
]
SCREAMING_SNAKE_CASE_ = tokenizer(
text_target=__magic_name__ , max_length=32 , padding="max_length" , truncation=__magic_name__ , return_tensors=__magic_name__ )
self.assertEqual(32 , targets["input_ids"].shape[1] )
def __A ( self : str ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = self.ta_base_tokenizer
SCREAMING_SNAKE_CASE_ = ["A long paragraph for summarization. </s>"]
SCREAMING_SNAKE_CASE_ = ["Summary of the text. </s>"]
# fmt: off
SCREAMING_SNAKE_CASE_ = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 35, 1]
SCREAMING_SNAKE_CASE_ = [86, 120, 112, 112, 100, 117, 124, 35, 114, 105, 35, 119, 107, 104, 35, 119, 104, 123, 119, 49, 35, 1]
# fmt: on
SCREAMING_SNAKE_CASE_ = tokenizer(__magic_name__ , text_target=__magic_name__ )
self.assertEqual(__magic_name__ , batch["input_ids"][0] )
self.assertEqual(__magic_name__ , batch["labels"][0] )
def __A ( self : Dict ) -> List[str]:
# safety check on max_len default value so we are sure the test works
SCREAMING_SNAKE_CASE_ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
SCREAMING_SNAKE_CASE_ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
SCREAMING_SNAKE_CASE_ = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE_ = " He is very happy, UNwant\u00E9d,running"
SCREAMING_SNAKE_CASE_ = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ )
tokenizer.save_pretrained(__magic_name__ )
SCREAMING_SNAKE_CASE_ = tokenizer.__class__.from_pretrained(__magic_name__ )
SCREAMING_SNAKE_CASE_ = after_tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ )
self.assertListEqual(__magic_name__ , __magic_name__ )
shutil.rmtree(__magic_name__ )
SCREAMING_SNAKE_CASE_ = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
SCREAMING_SNAKE_CASE_ = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE_ = " He is very happy, UNwant\u00E9d,running"
tokenizer.add_tokens(["bim", "bambam"] )
SCREAMING_SNAKE_CASE_ = tokenizer.additional_special_tokens
additional_special_tokens.append("new_additional_special_token" )
tokenizer.add_special_tokens({"additional_special_tokens": additional_special_tokens} )
SCREAMING_SNAKE_CASE_ = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ )
tokenizer.save_pretrained(__magic_name__ )
SCREAMING_SNAKE_CASE_ = tokenizer.__class__.from_pretrained(__magic_name__ )
SCREAMING_SNAKE_CASE_ = after_tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ )
self.assertListEqual(__magic_name__ , __magic_name__ )
self.assertIn("new_additional_special_token" , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
SCREAMING_SNAKE_CASE_ = tokenizer.__class__.from_pretrained(__magic_name__ , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(__magic_name__ )
def __A ( self : Union[str, Any] ) -> int:
SCREAMING_SNAKE_CASE_ = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(__magic_name__ )
with open(os.path.join(__magic_name__ , "special_tokens_map.json" ) , encoding="utf-8" ) as json_file:
SCREAMING_SNAKE_CASE_ = json.load(__magic_name__ )
with open(os.path.join(__magic_name__ , "tokenizer_config.json" ) , encoding="utf-8" ) as json_file:
SCREAMING_SNAKE_CASE_ = json.load(__magic_name__ )
SCREAMING_SNAKE_CASE_ = [F'''<extra_id_{i}>''' for i in range(125 )]
SCREAMING_SNAKE_CASE_ = added_tokens_extra_ids + [
"an_additional_special_token"
]
SCREAMING_SNAKE_CASE_ = added_tokens_extra_ids + [
"an_additional_special_token"
]
with open(os.path.join(__magic_name__ , "special_tokens_map.json" ) , "w" , encoding="utf-8" ) as outfile:
json.dump(__magic_name__ , __magic_name__ )
with open(os.path.join(__magic_name__ , "tokenizer_config.json" ) , "w" , encoding="utf-8" ) as outfile:
json.dump(__magic_name__ , __magic_name__ )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
SCREAMING_SNAKE_CASE_ = tokenizer_class.from_pretrained(
__magic_name__ , )
self.assertIn(
"an_additional_special_token" , tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
["an_additional_special_token"] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(["an_additional_special_token"] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
SCREAMING_SNAKE_CASE_ = added_tokens_extra_ids + [AddedToken("a_new_additional_special_token" , lstrip=__magic_name__ )]
SCREAMING_SNAKE_CASE_ = tokenizer_class.from_pretrained(
__magic_name__ , additional_special_tokens=__magic_name__ , )
self.assertIn("a_new_additional_special_token" , tokenizer.additional_special_tokens )
self.assertEqual(
["a_new_additional_special_token"] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(["a_new_additional_special_token"] ) ) , )
def __A ( self : Optional[Any] ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(__magic_name__ )
SCREAMING_SNAKE_CASE_ = tokenizer_class.from_pretrained(__magic_name__ )
self.assertTrue(tokenizer.decode([255] ) == "" )
def __A ( self : Optional[int] ) -> List[Any]:
pass
def __A ( self : List[Any] ) -> List[Any]:
pass
def __A ( self : List[str] ) -> List[str]:
pass
def __A ( self : Any ) -> Union[str, Any]:
pass
def __A ( self : List[Any] ) -> Tuple:
# The default common tokenizer tests uses invalid tokens for ByT5 that can only accept one-character strings
# and special added tokens as tokens
SCREAMING_SNAKE_CASE_ = self.get_tokenizers(fast=__magic_name__ , do_lower_case=__magic_name__ )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
SCREAMING_SNAKE_CASE_ = ["t", "h", "i", "s", " ", "i", "s", " ", "a", " ", "t", "e", "x", "t", "</s>"]
SCREAMING_SNAKE_CASE_ = tokenizer.convert_tokens_to_string(__magic_name__ )
self.assertIsInstance(__magic_name__ , __magic_name__ )
def __A ( self : List[str] ) -> Any:
SCREAMING_SNAKE_CASE_ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
SCREAMING_SNAKE_CASE_ = [
"bos_token",
"eos_token",
"unk_token",
"sep_token",
"pad_token",
"cls_token",
"mask_token",
]
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = tokenizer.convert_ids_to_tokens(
__magic_name__ , skip_special_tokens=__magic_name__ )
for attr in attributes_list:
setattr(__magic_name__ , attr + "_id" , __magic_name__ )
self.assertEqual(getattr(__magic_name__ , __magic_name__ ) , __magic_name__ )
self.assertEqual(getattr(__magic_name__ , attr + "_id" ) , __magic_name__ )
setattr(__magic_name__ , attr + "_id" , __magic_name__ )
self.assertEqual(getattr(__magic_name__ , __magic_name__ ) , __magic_name__ )
self.assertEqual(getattr(__magic_name__ , attr + "_id" ) , __magic_name__ )
setattr(__magic_name__ , "additional_special_tokens_ids" , [] )
self.assertListEqual(getattr(__magic_name__ , "additional_special_tokens" ) , [] )
self.assertListEqual(getattr(__magic_name__ , "additional_special_tokens_ids" ) , [] )
setattr(__magic_name__ , "additional_special_tokens_ids" , [token_id_to_test_setters] )
self.assertListEqual(getattr(__magic_name__ , "additional_special_tokens" ) , [token_to_test_setters] )
self.assertListEqual(getattr(__magic_name__ , "additional_special_tokens_ids" ) , [token_id_to_test_setters] )
| 140
| 1
|
def A(__a: int = 100 ):
lowerCAmelCase_ = 0
lowerCAmelCase_ = 0
for i in range(1 , n + 1 ):
sum_of_squares += i**2
sum_of_ints += i
return sum_of_ints**2 - sum_of_squares
if __name__ == "__main__":
print(F'''{solution() = }''')
| 719
|
import copy
import tempfile
import unittest
from huggingface_hub import HfFolder, delete_repo
from parameterized import parameterized
from requests.exceptions import HTTPError
from transformers import AutoConfig, GenerationConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
class __magic_name__ (unittest.TestCase ):
@parameterized.expand([(None,), ("foo.json",)] )
def __a ( self , _a ) -> Tuple:
lowerCAmelCase_ = GenerationConfig(
do_sample=_a , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(_a , config_name=_a )
lowerCAmelCase_ = GenerationConfig.from_pretrained(_a , config_name=_a )
# Checks parameters that were specified
self.assertEqual(loaded_config.do_sample , _a )
self.assertEqual(loaded_config.temperature , 0.7 )
self.assertEqual(loaded_config.length_penalty , 1.0 )
self.assertEqual(loaded_config.bad_words_ids , [[1, 2, 3], [4, 5]] )
# Checks parameters that were not specified (defaults)
self.assertEqual(loaded_config.top_k , 50 )
self.assertEqual(loaded_config.max_length , 20 )
self.assertEqual(loaded_config.max_time , _a )
def __a ( self ) -> Union[str, Any]:
lowerCAmelCase_ = AutoConfig.from_pretrained("gpt2" )
lowerCAmelCase_ = GenerationConfig.from_model_config(_a )
lowerCAmelCase_ = GenerationConfig()
# The generation config has loaded a few non-default parameters from the model config
self.assertNotEqual(_a , _a )
# One of those parameters is eos_token_id -- check if it matches
self.assertNotEqual(generation_config_from_model.eos_token_id , default_generation_config.eos_token_id )
self.assertEqual(generation_config_from_model.eos_token_id , model_config.eos_token_id )
def __a ( self ) -> Optional[Any]:
lowerCAmelCase_ = GenerationConfig()
lowerCAmelCase_ = {
"max_new_tokens": 1024,
"foo": "bar",
}
lowerCAmelCase_ = copy.deepcopy(_a )
lowerCAmelCase_ = generation_config.update(**_a )
# update_kwargs was not modified (no side effects)
self.assertEqual(_a , _a )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(generation_config.max_new_tokens , 1024 )
# `.update()` returns a dictionary of unused kwargs
self.assertEqual(_a , {"foo": "bar"} )
def __a ( self ) -> Union[str, Any]:
lowerCAmelCase_ = GenerationConfig()
lowerCAmelCase_ = "bar"
with tempfile.TemporaryDirectory("test-generation-config" ) as tmp_dir:
generation_config.save_pretrained(_a )
lowerCAmelCase_ = GenerationConfig.from_pretrained(_a )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(new_config.foo , "bar" )
lowerCAmelCase_ = GenerationConfig.from_model_config(_a )
assert not hasattr(_a , "foo" ) # no new kwargs should be initialized if from config
def __a ( self ) -> Optional[Any]:
lowerCAmelCase_ = GenerationConfig()
self.assertEqual(default_config.temperature , 1.0 )
self.assertEqual(default_config.do_sample , _a )
self.assertEqual(default_config.num_beams , 1 )
lowerCAmelCase_ = GenerationConfig(
do_sample=_a , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
self.assertEqual(config.temperature , 0.7 )
self.assertEqual(config.do_sample , _a )
self.assertEqual(config.num_beams , 1 )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(_a )
lowerCAmelCase_ = GenerationConfig.from_pretrained(_a , temperature=1.0 )
self.assertEqual(loaded_config.temperature , 1.0 )
self.assertEqual(loaded_config.do_sample , _a )
self.assertEqual(loaded_config.num_beams , 1 ) # default value
@is_staging_test
class __magic_name__ (unittest.TestCase ):
@classmethod
def __a ( cls ) -> Optional[Any]:
lowerCAmelCase_ = TOKEN
HfFolder.save_token(_a )
@classmethod
def __a ( cls ) -> Optional[int]:
try:
delete_repo(token=cls._token , repo_id="test-generation-config" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-generation-config-org" )
except HTTPError:
pass
def __a ( self ) -> List[Any]:
lowerCAmelCase_ = GenerationConfig(
do_sample=_a , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub("test-generation-config" , use_auth_token=self._token )
lowerCAmelCase_ = GenerationConfig.from_pretrained(f"{USER}/test-generation-config" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_a , getattr(_a , _a ) )
# Reset repo
delete_repo(token=self._token , repo_id="test-generation-config" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
_a , repo_id="test-generation-config" , push_to_hub=_a , use_auth_token=self._token )
lowerCAmelCase_ = GenerationConfig.from_pretrained(f"{USER}/test-generation-config" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_a , getattr(_a , _a ) )
def __a ( self ) -> List[str]:
lowerCAmelCase_ = GenerationConfig(
do_sample=_a , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub("valid_org/test-generation-config-org" , use_auth_token=self._token )
lowerCAmelCase_ = GenerationConfig.from_pretrained("valid_org/test-generation-config-org" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_a , getattr(_a , _a ) )
# Reset repo
delete_repo(token=self._token , repo_id="valid_org/test-generation-config-org" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
_a , repo_id="valid_org/test-generation-config-org" , push_to_hub=_a , use_auth_token=self._token )
lowerCAmelCase_ = GenerationConfig.from_pretrained("valid_org/test-generation-config-org" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_a , getattr(_a , _a ) )
| 226
| 0
|
from maths.is_square_free import is_square_free
from maths.prime_factors import prime_factors
def lowerCamelCase__ ( __lowerCamelCase : int ):
__UpperCAmelCase : Optional[Any] = prime_factors(__lowerCamelCase )
if is_square_free(__lowerCamelCase ):
return -1 if len(__lowerCamelCase ) % 2 else 1
return 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 63
|
"""simple docstring"""
from ....configuration_utils import PretrainedConfig
from ....utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
"""Visual-Attention-Network/van-base""": (
"""https://huggingface.co/Visual-Attention-Network/van-base/blob/main/config.json"""
),
}
class __UpperCamelCase ( a__ ):
_UpperCAmelCase = "van"
def __init__( self ,_A=224 ,_A=3 ,_A=[7, 3, 3, 3] ,_A=[4, 2, 2, 2] ,_A=[64, 128, 320, 512] ,_A=[3, 3, 12, 3] ,_A=[8, 8, 4, 4] ,_A="gelu" ,_A=0.0_2 ,_A=1E-6 ,_A=1E-2 ,_A=0.0 ,_A=0.0 ,**_A ,):
'''simple docstring'''
super().__init__(**_A )
_lowerCAmelCase : str = image_size
_lowerCAmelCase : Optional[Any] = num_channels
_lowerCAmelCase : Optional[int] = patch_sizes
_lowerCAmelCase : Any = strides
_lowerCAmelCase : Optional[int] = hidden_sizes
_lowerCAmelCase : List[str] = depths
_lowerCAmelCase : Dict = mlp_ratios
_lowerCAmelCase : Any = hidden_act
_lowerCAmelCase : Tuple = initializer_range
_lowerCAmelCase : Optional[int] = layer_norm_eps
_lowerCAmelCase : Tuple = layer_scale_init_value
_lowerCAmelCase : Tuple = drop_path_rate
_lowerCAmelCase : str = dropout_rate
| 259
| 0
|
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Union[str, Any] ):
for param in module.parameters():
SCREAMING_SNAKE_CASE__ = False
def SCREAMING_SNAKE_CASE__ ( ):
SCREAMING_SNAKE_CASE__ = """cuda""" if torch.cuda.is_available() else """cpu"""
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
SCREAMING_SNAKE_CASE__ = """mps"""
if device == "mps":
print(
"""WARNING: MPS currently doesn't seem to work, and messes up backpropagation without any visible torch"""
""" errors. I recommend using CUDA on a colab notebook or CPU instead if you're facing inexplicable issues"""
""" with generations.""" )
return device
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: List[Any] ):
SCREAMING_SNAKE_CASE__ = plt.imshow(UpperCamelCase__ )
fig.axes.get_xaxis().set_visible(UpperCamelCase__ )
fig.axes.get_yaxis().set_visible(UpperCamelCase__ )
plt.show()
def SCREAMING_SNAKE_CASE__ ( ):
SCREAMING_SNAKE_CASE__ = datetime.now()
SCREAMING_SNAKE_CASE__ = current_time.strftime("""%H:%M:%S""" )
return timestamp
| 59
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_lowerCamelCase = logging.get_logger(__name__)
_lowerCamelCase = {
'shi-labs/nat-mini-in1k-224': 'https://huggingface.co/shi-labs/nat-mini-in1k-224/resolve/main/config.json',
# See all Nat models at https://huggingface.co/models?filter=nat
}
class UpperCamelCase_ ( UpperCamelCase__ , UpperCamelCase__ ):
lowerCamelCase_ = "nat"
lowerCamelCase_ = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self :List[Any] , __A :Optional[Any]=4 , __A :Any=3 , __A :Optional[int]=64 , __A :Optional[int]=[3, 4, 6, 5] , __A :Union[str, Any]=[2, 4, 8, 16] , __A :Optional[Any]=7 , __A :Optional[Any]=3.0 , __A :List[Any]=True , __A :int=0.0 , __A :Dict=0.0 , __A :Optional[Any]=0.1 , __A :str="gelu" , __A :Optional[Any]=0.0_2 , __A :Optional[int]=1E-5 , __A :Optional[int]=0.0 , __A :Optional[Any]=None , __A :Union[str, Any]=None , **__A :Union[str, Any] , ) -> Optional[int]:
"""simple docstring"""
super().__init__(**__A )
SCREAMING_SNAKE_CASE__ = patch_size
SCREAMING_SNAKE_CASE__ = num_channels
SCREAMING_SNAKE_CASE__ = embed_dim
SCREAMING_SNAKE_CASE__ = depths
SCREAMING_SNAKE_CASE__ = len(__A )
SCREAMING_SNAKE_CASE__ = num_heads
SCREAMING_SNAKE_CASE__ = kernel_size
SCREAMING_SNAKE_CASE__ = mlp_ratio
SCREAMING_SNAKE_CASE__ = qkv_bias
SCREAMING_SNAKE_CASE__ = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ = drop_path_rate
SCREAMING_SNAKE_CASE__ = hidden_act
SCREAMING_SNAKE_CASE__ = layer_norm_eps
SCREAMING_SNAKE_CASE__ = initializer_range
# we set the hidden_size attribute in order to make Nat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
SCREAMING_SNAKE_CASE__ = int(embed_dim * 2 ** (len(__A ) - 1) )
SCREAMING_SNAKE_CASE__ = layer_scale_init_value
SCREAMING_SNAKE_CASE__ = ["""stem"""] + [f'''stage{idx}''' for idx in range(1 , len(__A ) + 1 )]
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = get_aligned_output_features_output_indices(
out_features=__A , out_indices=__A , stage_names=self.stage_names )
| 59
| 1
|
"""simple docstring"""
from math import sqrt
def __snake_case ( SCREAMING_SNAKE_CASE: int ):
"""simple docstring"""
_lowerCAmelCase = 0
for i in range(1 , int(sqrt(SCREAMING_SNAKE_CASE ) + 1 ) ):
if n % i == 0 and i != sqrt(SCREAMING_SNAKE_CASE ):
total += i + n // i
elif i == sqrt(SCREAMING_SNAKE_CASE ):
total += i
return total - n
def __snake_case ( SCREAMING_SNAKE_CASE: int = 1_0000 ):
"""simple docstring"""
_lowerCAmelCase = sum(
i
for i in range(1 , SCREAMING_SNAKE_CASE )
if sum_of_divisors(sum_of_divisors(SCREAMING_SNAKE_CASE ) ) == i and sum_of_divisors(SCREAMING_SNAKE_CASE ) != i )
return total
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 580
|
"""simple docstring"""
from __future__ import annotations
_snake_case = [True] * 1_0_0_0_0_0_1
_snake_case = 2
while i * i <= 1_0_0_0_0_0_0:
if seive[i]:
for j in range(i * i, 1_0_0_0_0_0_1, i):
_snake_case = False
i += 1
def __snake_case ( SCREAMING_SNAKE_CASE: int ):
"""simple docstring"""
return seive[n]
def __snake_case ( SCREAMING_SNAKE_CASE: int ):
"""simple docstring"""
return any(digit in '02468' for digit in str(SCREAMING_SNAKE_CASE ) )
def __snake_case ( SCREAMING_SNAKE_CASE: int = 100_0000 ):
"""simple docstring"""
_lowerCAmelCase = [2] # result already includes the number 2.
for num in range(3 , limit + 1 , 2 ):
if is_prime(SCREAMING_SNAKE_CASE ) and not contains_an_even_digit(SCREAMING_SNAKE_CASE ):
_lowerCAmelCase = str(SCREAMING_SNAKE_CASE )
_lowerCAmelCase = [int(str_num[j:] + str_num[:j] ) for j in range(len(SCREAMING_SNAKE_CASE ) )]
if all(is_prime(SCREAMING_SNAKE_CASE ) for i in list_nums ):
result.append(SCREAMING_SNAKE_CASE )
return result
def __snake_case ( ):
"""simple docstring"""
return len(find_circular_primes() )
if __name__ == "__main__":
print(f'{len(find_circular_primes()) = }')
| 580
| 1
|
'''simple docstring'''
def snake_case ( a_ : list ) -> list:
"""simple docstring"""
if len(a_ ) <= 1:
return [tuple(a_ )]
UpperCamelCase_ : Optional[int] = []
def generate(a_ : int , a_ : list ):
UpperCamelCase_ : Union[str, Any] = [0] * n
res.append(tuple(a_ ) )
UpperCamelCase_ : List[str] = 0
while i < n:
if c[i] < i:
if i % 2 == 0:
UpperCamelCase_ : Any = arr[i], arr[0]
else:
UpperCamelCase_ : List[Any] = arr[i], arr[c[i]]
res.append(tuple(a_ ) )
c[i] += 1
UpperCamelCase_ : List[Any] = 0
else:
UpperCamelCase_ : Optional[int] = 0
i += 1
generate(len(a_ ) , a_ )
return res
if __name__ == "__main__":
UpperCamelCase =input("Enter numbers separated by a comma:\n").strip()
UpperCamelCase =[int(item) for item in user_input.split(",")]
print(heaps(arr))
| 712
|
'''simple docstring'''
import pytest
import datasets
# Import fixture modules as plugins
UpperCamelCase =["tests.fixtures.files", "tests.fixtures.hub", "tests.fixtures.fsspec"]
def snake_case ( a_ : List[str] , a_ : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
for item in items:
if any(marker in item.keywords for marker in ["""integration""", """unit"""] ):
continue
item.add_marker(pytest.mark.unit )
def snake_case ( a_ : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
config.addinivalue_line("""markers""" , """torchaudio_latest: mark test to run with torchaudio>=0.12""" )
@pytest.fixture(autouse=a_ )
def snake_case ( a_ : Any , a_ : Optional[int] ) -> Tuple:
"""simple docstring"""
UpperCamelCase_ : Optional[Any] = tmp_path_factory.getbasetemp() / """cache"""
UpperCamelCase_ : str = test_hf_cache_home / """datasets"""
UpperCamelCase_ : Any = test_hf_cache_home / """metrics"""
UpperCamelCase_ : List[Any] = test_hf_cache_home / """modules"""
monkeypatch.setattr("""datasets.config.HF_DATASETS_CACHE""" , str(a_ ) )
monkeypatch.setattr("""datasets.config.HF_METRICS_CACHE""" , str(a_ ) )
monkeypatch.setattr("""datasets.config.HF_MODULES_CACHE""" , str(a_ ) )
UpperCamelCase_ : List[Any] = test_hf_datasets_cache / """downloads"""
monkeypatch.setattr("""datasets.config.DOWNLOADED_DATASETS_PATH""" , str(a_ ) )
UpperCamelCase_ : Any = test_hf_datasets_cache / """downloads""" / """extracted"""
monkeypatch.setattr("""datasets.config.EXTRACTED_DATASETS_PATH""" , str(a_ ) )
@pytest.fixture(autouse=a_ , scope="""session""" )
def snake_case ( ) -> Any:
"""simple docstring"""
datasets.disable_progress_bar()
@pytest.fixture(autouse=a_ )
def snake_case ( a_ : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
monkeypatch.setattr("""datasets.config.HF_UPDATE_DOWNLOAD_COUNTS""" , a_ )
@pytest.fixture
def snake_case ( a_ : int ) -> Union[str, Any]:
"""simple docstring"""
monkeypatch.setattr("""sqlalchemy.util.deprecations.SILENCE_UBER_WARNING""" , a_ )
| 543
| 0
|
import os
import time
import numpy as np
import onnxruntime as ort
lowerCamelCase__ : int = '1'
lowerCamelCase__ : Optional[int] = '0'
lowerCamelCase__ : Optional[Any] = '1'
lowerCamelCase__ : int = ort.SessionOptions()
lowerCamelCase__ : List[Any] = ort.GraphOptimizationLevel.ORT_DISABLE_ALL
print('Create inference session...')
lowerCamelCase__ : List[str] = ['TensorrtExecutionProvider', 'CUDAExecutionProvider']
lowerCamelCase__ : List[str] = ort.InferenceSession('model.onnx', sess_options=sess_opt, providers=execution_provider)
lowerCamelCase__ : Union[str, Any] = ort.RunOptions()
lowerCamelCase__ : int = 128
lowerCamelCase__ : Dict = 1
lowerCamelCase__ : Tuple = np.ones((batch, sequence), dtype=np.intaa)
lowerCamelCase__ : Union[str, Any] = np.ones((batch, sequence), dtype=np.intaa)
lowerCamelCase__ : Any = np.ones((batch, sequence), dtype=np.intaa)
print('Warm up phase...')
sess.run(
None,
{
sess.get_inputs()[0].name: input_ids,
sess.get_inputs()[1].name: attention_mask,
sess.get_inputs()[2].name: token_type_ids,
},
run_options=run_opt,
)
print('Start inference...')
lowerCamelCase__ : str = time.time()
lowerCamelCase__ : int = 2_000
lowerCamelCase__ : Any = {}
for iter in range(max_iters):
lowerCamelCase__ : str = sess.run(
None,
{
sess.get_inputs()[0].name: input_ids,
sess.get_inputs()[1].name: attention_mask,
sess.get_inputs()[2].name: token_type_ids,
},
run_options=run_opt,
)
print('Average Inference Time = {:.3f} ms'.format((time.time() - start_time) * 1_000 / max_iters))
| 31
|
'''simple docstring'''
import torch
from diffusers import UnCLIPScheduler
from .test_schedulers import SchedulerCommonTest
class UpperCamelCase__ ( __lowerCAmelCase ):
lowerCAmelCase__ : Optional[int] = (UnCLIPScheduler,)
def __a ( self : Any , **lowerCamelCase : int ):
'''simple docstring'''
a__ = {
"num_train_timesteps": 1_0_0_0,
"variance_type": "fixed_small_log",
"clip_sample": True,
"clip_sample_range": 1.0,
"prediction_type": "epsilon",
}
config.update(**lowerCamelCase )
return config
def __a ( self : Dict ):
'''simple docstring'''
for timesteps in [1, 5, 1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=lowerCamelCase )
def __a ( self : List[str] ):
'''simple docstring'''
for variance in ["fixed_small_log", "learned_range"]:
self.check_over_configs(variance_type=lowerCamelCase )
def __a ( self : Any ):
'''simple docstring'''
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=lowerCamelCase )
def __a ( self : List[str] ):
'''simple docstring'''
for clip_sample_range in [1, 5, 1_0, 2_0]:
self.check_over_configs(clip_sample_range=lowerCamelCase )
def __a ( self : Optional[Any] ):
'''simple docstring'''
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(prediction_type=lowerCamelCase )
def __a ( self : str ):
'''simple docstring'''
for time_step in [0, 5_0_0, 9_9_9]:
for prev_timestep in [None, 5, 1_0_0, 2_5_0, 5_0_0, 7_5_0]:
if prev_timestep is not None and prev_timestep >= time_step:
continue
self.check_over_forward(time_step=lowerCamelCase , prev_timestep=lowerCamelCase )
def __a ( self : int ):
'''simple docstring'''
a__ = self.scheduler_classes[0]
a__ = self.get_scheduler_config(variance_type="fixed_small_log" )
a__ = scheduler_class(**lowerCamelCase )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.0000e-10 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(4_8_7 ) - 0.0549625 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(9_9_9 ) - 0.9994987 ) ) < 1e-5
def __a ( self : List[Any] ):
'''simple docstring'''
a__ = self.scheduler_classes[0]
a__ = self.get_scheduler_config(variance_type="learned_range" )
a__ = scheduler_class(**lowerCamelCase )
a__ = 0.5
assert scheduler._get_variance(1 , predicted_variance=lowerCamelCase ) - -10.1712790 < 1e-5
assert scheduler._get_variance(4_8_7 , predicted_variance=lowerCamelCase ) - -5.7998052 < 1e-5
assert scheduler._get_variance(9_9_9 , predicted_variance=lowerCamelCase ) - -0.0010011 < 1e-5
def __a ( self : List[Any] ):
'''simple docstring'''
a__ = self.scheduler_classes[0]
a__ = self.get_scheduler_config()
a__ = scheduler_class(**lowerCamelCase )
a__ = scheduler.timesteps
a__ = self.dummy_model()
a__ = self.dummy_sample_deter
a__ = torch.manual_seed(0 )
for i, t in enumerate(lowerCamelCase ):
# 1. predict noise residual
a__ = model(lowerCamelCase , lowerCamelCase )
# 2. predict previous mean of sample x_t-1
a__ = scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase , generator=lowerCamelCase ).prev_sample
a__ = pred_prev_sample
a__ = torch.sum(torch.abs(lowerCamelCase ) )
a__ = torch.mean(torch.abs(lowerCamelCase ) )
assert abs(result_sum.item() - 252.2682495 ) < 1e-2
assert abs(result_mean.item() - 0.3284743 ) < 1e-3
def __a ( self : Union[str, Any] ):
'''simple docstring'''
a__ = self.scheduler_classes[0]
a__ = self.get_scheduler_config()
a__ = scheduler_class(**lowerCamelCase )
scheduler.set_timesteps(2_5 )
a__ = scheduler.timesteps
a__ = self.dummy_model()
a__ = self.dummy_sample_deter
a__ = torch.manual_seed(0 )
for i, t in enumerate(lowerCamelCase ):
# 1. predict noise residual
a__ = model(lowerCamelCase , lowerCamelCase )
if i + 1 == timesteps.shape[0]:
a__ = None
else:
a__ = timesteps[i + 1]
# 2. predict previous mean of sample x_t-1
a__ = scheduler.step(
lowerCamelCase , lowerCamelCase , lowerCamelCase , prev_timestep=lowerCamelCase , generator=lowerCamelCase ).prev_sample
a__ = pred_prev_sample
a__ = torch.sum(torch.abs(lowerCamelCase ) )
a__ = torch.mean(torch.abs(lowerCamelCase ) )
assert abs(result_sum.item() - 258.2044983 ) < 1e-2
assert abs(result_mean.item() - 0.3362038 ) < 1e-3
def __a ( self : List[Any] ):
'''simple docstring'''
pass
def __a ( self : Dict ):
'''simple docstring'''
pass
| 489
| 0
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_pegasus import PegasusTokenizer
else:
a_ :Dict = None
a_ :List[str] = logging.get_logger(__name__)
a_ :Tuple = '▁'
a_ :int = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
a_ :int = {
'vocab_file': {'google/pegasus-xsum': 'https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model'},
'tokenizer_file': {
'google/pegasus-xsum': 'https://huggingface.co/google/pegasus-xsum/resolve/main/tokenizer.json'
},
}
a_ :Optional[int] = {
'google/pegasus-xsum': 5_12,
}
class lowercase ( _UpperCAmelCase ):
'''simple docstring'''
lowerCamelCase : List[str] = VOCAB_FILES_NAMES
lowerCamelCase : Any = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : Optional[Any] = PegasusTokenizer
lowerCamelCase : str = ['''input_ids''', '''attention_mask''']
def __init__( self : List[str] , _lowercase : str=None , _lowercase : Optional[Any]=None , _lowercase : List[Any]="<pad>" , _lowercase : Union[str, Any]="</s>" , _lowercase : Any="<unk>" , _lowercase : str="<mask_2>" , _lowercase : Optional[Any]="<mask_1>" , _lowercase : Optional[int]=None , _lowercase : List[str]=1_03 , **_lowercase : Dict , ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = offset
if additional_special_tokens is not None:
if not isinstance(_lowercase , _lowercase ):
raise TypeError(
f"""additional_special_tokens should be of type {type(_lowercase )}, but is"""
f""" {type(_lowercase )}""" )
SCREAMING_SNAKE_CASE__ : List[Any] = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
f"""<unk_{i}>""" for i in range(len(_lowercase ) , self.offset - 1 )
]
if len(set(_lowercase ) ) != len(_lowercase ):
raise ValueError(
'''Please make sure that the provided additional_special_tokens do not contain an incorrectly'''
f""" shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.""" )
SCREAMING_SNAKE_CASE__ : List[str] = additional_special_tokens_extended
else:
SCREAMING_SNAKE_CASE__ : List[Any] = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [f"""<unk_{i}>""" for i in range(2 , self.offset )]
super().__init__(
_lowercase , tokenizer_file=_lowercase , pad_token=_lowercase , eos_token=_lowercase , unk_token=_lowercase , mask_token=_lowercase , mask_token_sent=_lowercase , offset=_lowercase , additional_special_tokens=_lowercase , **_lowercase , )
SCREAMING_SNAKE_CASE__ : Optional[int] = vocab_file
SCREAMING_SNAKE_CASE__ : str = False if not self.vocab_file else True
def lowercase__ ( self : int , _lowercase : Any ):
SCREAMING_SNAKE_CASE__ : int = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
if all_special_ids != set(range(len(self.additional_special_tokens ) + 3 ) ):
raise ValueError(
'''There should be 3 special tokens: mask_token, pad_token, and eos_token +'''
f""" {len(self.additional_special_tokens )} additional_special_tokens, but got {all_special_ids}""" )
return [1 if x in all_special_ids else 0 for x in seq]
def lowercase__ ( self : str , _lowercase : List , _lowercase : Optional[List] = None , _lowercase : bool = False ):
if already_has_special_tokens:
return self._special_token_mask(_lowercase )
elif token_ids_a is None:
return self._special_token_mask(_lowercase ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def lowercase__ ( self : List[str] , _lowercase : Optional[Any] , _lowercase : Tuple=None ):
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def lowercase__ ( self : int , _lowercase : str , _lowercase : Optional[str] = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(_lowercase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
SCREAMING_SNAKE_CASE__ : Tuple = os.path.join(
_lowercase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowercase ):
copyfile(self.vocab_file , _lowercase )
return (out_vocab_file,)
| 717
|
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import (
BitConfig,
ViTHybridConfig,
ViTHybridForImageClassification,
ViTHybridImageProcessor,
ViTHybridModel,
)
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
a_ :Tuple = logging.get_logger(__name__)
def a ( A__ , A__=False ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = []
# fmt: off
# stem:
rename_keys.append(('''cls_token''', '''vit.embeddings.cls_token''') )
rename_keys.append(('''pos_embed''', '''vit.embeddings.position_embeddings''') )
rename_keys.append(('''patch_embed.proj.weight''', '''vit.embeddings.patch_embeddings.projection.weight''') )
rename_keys.append(('''patch_embed.proj.bias''', '''vit.embeddings.patch_embeddings.projection.bias''') )
# backbone
rename_keys.append(('''patch_embed.backbone.stem.conv.weight''', '''vit.embeddings.patch_embeddings.backbone.bit.embedder.convolution.weight''') )
rename_keys.append(('''patch_embed.backbone.stem.norm.weight''', '''vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.weight''') )
rename_keys.append(('''patch_embed.backbone.stem.norm.bias''', '''vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.bias''') )
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv1.weight""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv1.weight""") )
rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.weight""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.weight""") )
rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.bias""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.bias""") )
rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv2.weight""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv2.weight""") )
rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.weight""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.weight""") )
rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.bias""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.bias""") )
rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv3.weight""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv3.weight""") )
rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.weight""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.weight""") )
rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.bias""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.bias""") )
rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.conv.weight""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.conv.weight""") )
rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.weight""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.weight""") )
rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.bias""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.bias""") )
# transformer encoder
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"""blocks.{i}.norm1.weight""", f"""vit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((f"""blocks.{i}.norm1.bias""", f"""vit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append((f"""blocks.{i}.attn.proj.weight""", f"""vit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((f"""blocks.{i}.attn.proj.bias""", f"""vit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((f"""blocks.{i}.norm2.weight""", f"""vit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((f"""blocks.{i}.norm2.bias""", f"""vit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((f"""blocks.{i}.mlp.fc1.weight""", f"""vit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((f"""blocks.{i}.mlp.fc1.bias""", f"""vit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((f"""blocks.{i}.mlp.fc2.weight""", f"""vit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((f"""blocks.{i}.mlp.fc2.bias""", f"""vit.encoder.layer.{i}.output.dense.bias""") )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('''norm.weight''', '''layernorm.weight'''),
('''norm.bias''', '''layernorm.bias'''),
('''pre_logits.fc.weight''', '''pooler.dense.weight'''),
('''pre_logits.fc.bias''', '''pooler.dense.bias'''),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
SCREAMING_SNAKE_CASE__ : List[Any] = [(pair[0], pair[1][4:]) if pair[1].startswith('''vit''' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('''norm.weight''', '''vit.layernorm.weight'''),
('''norm.bias''', '''vit.layernorm.bias'''),
('''head.weight''', '''classifier.weight'''),
('''head.bias''', '''classifier.bias'''),
] )
# fmt: on
return rename_keys
def a ( A__ , A__ , A__=False ) -> Optional[Any]:
'''simple docstring'''
for i in range(config.num_hidden_layers ):
if base_model:
SCREAMING_SNAKE_CASE__ : str = ''''''
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] = '''vit.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
SCREAMING_SNAKE_CASE__ : Dict = state_dict.pop(f"""blocks.{i}.attn.qkv.weight""" )
SCREAMING_SNAKE_CASE__ : Optional[Any] = state_dict.pop(f"""blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE__ : str = in_proj_weight[
: config.hidden_size, :
]
SCREAMING_SNAKE_CASE__ : Optional[Any] = in_proj_bias[: config.hidden_size]
SCREAMING_SNAKE_CASE__ : List[Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
SCREAMING_SNAKE_CASE__ : Optional[Any] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
SCREAMING_SNAKE_CASE__ : List[Any] = in_proj_weight[
-config.hidden_size :, :
]
SCREAMING_SNAKE_CASE__ : Optional[int] = in_proj_bias[-config.hidden_size :]
def a ( A__ ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = ['''head.weight''', '''head.bias''']
for k in ignore_keys:
state_dict.pop(A__ , A__ )
def a ( A__ , A__ , A__ ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = dct.pop(A__ )
SCREAMING_SNAKE_CASE__ : Optional[int] = val
def a ( ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
SCREAMING_SNAKE_CASE__ : List[Any] = Image.open(requests.get(A__ , stream=A__ ).raw )
return im
@torch.no_grad()
def a ( A__ , A__ , A__=False ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = BitConfig(
global_padding='''same''' , layer_type='''bottleneck''' , depths=(3, 4, 9) , out_features=['''stage3'''] , embedding_dynamic_padding=A__ , )
SCREAMING_SNAKE_CASE__ : Dict = ViTHybridConfig(backbone_config=A__ , image_size=3_8_4 , num_labels=1_0_0_0 )
SCREAMING_SNAKE_CASE__ : Optional[Any] = False
# load original model from timm
SCREAMING_SNAKE_CASE__ : int = timm.create_model(A__ , pretrained=A__ )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
SCREAMING_SNAKE_CASE__ : Union[str, Any] = timm_model.state_dict()
if base_model:
remove_classification_head_(A__ )
SCREAMING_SNAKE_CASE__ : Dict = create_rename_keys(A__ , A__ )
for src, dest in rename_keys:
rename_key(A__ , A__ , A__ )
read_in_q_k_v(A__ , A__ , A__ )
SCREAMING_SNAKE_CASE__ : Tuple = '''huggingface/label-files'''
SCREAMING_SNAKE_CASE__ : List[Any] = '''imagenet-1k-id2label.json'''
SCREAMING_SNAKE_CASE__ : Tuple = json.load(open(hf_hub_download(A__ , A__ , repo_type='''dataset''' ) , '''r''' ) )
SCREAMING_SNAKE_CASE__ : int = {int(A__ ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE__ : Optional[int] = idalabel
SCREAMING_SNAKE_CASE__ : int = {v: k for k, v in idalabel.items()}
# load HuggingFace model
if vit_name[-5:] == "in21k":
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ViTHybridModel(A__ ).eval()
else:
SCREAMING_SNAKE_CASE__ : List[Any] = ViTHybridForImageClassification(A__ ).eval()
model.load_state_dict(A__ )
# create image processor
SCREAMING_SNAKE_CASE__ : Union[str, Any] = create_transform(**resolve_data_config({} , model=A__ ) )
SCREAMING_SNAKE_CASE__ : Tuple = transform.transforms
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {
'''bilinear''': PILImageResampling.BILINEAR,
'''bicubic''': PILImageResampling.BICUBIC,
'''nearest''': PILImageResampling.NEAREST,
}
SCREAMING_SNAKE_CASE__ : List[Any] = ViTHybridImageProcessor(
do_resize=A__ , size={'''shortest_edge''': timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=A__ , crop_size={'''height''': timm_transforms[1].size[0], '''width''': timm_transforms[1].size[1]} , do_normalize=A__ , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
SCREAMING_SNAKE_CASE__ : Dict = prepare_img()
SCREAMING_SNAKE_CASE__ : Any = transform(A__ ).unsqueeze(0 )
SCREAMING_SNAKE_CASE__ : Dict = processor(A__ , return_tensors='''pt''' ).pixel_values
# verify pixel values
assert torch.allclose(A__ , A__ )
# verify logits
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Dict = model(A__ )
SCREAMING_SNAKE_CASE__ : Dict = outputs.logits
print('''Predicted class:''' , logits.argmax(-1 ).item() )
if base_model:
SCREAMING_SNAKE_CASE__ : str = timm_model.forward_features(A__ )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(A__ , outputs.pooler_output , atol=1e-3 )
else:
SCREAMING_SNAKE_CASE__ : Dict = timm_model(A__ )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(A__ , outputs.logits , atol=1e-3 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
Path(A__ ).mkdir(exist_ok=A__ )
print(f"""Saving model {vit_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(A__ )
print(f"""Saving processor to {pytorch_dump_folder_path}""" )
processor.save_pretrained(A__ )
if push_to_hub:
print(f"""Pushing model and processor to the hub {vit_name}""" )
model.push_to_hub(f"""ybelkada/{vit_name}""" )
processor.push_to_hub(f"""ybelkada/{vit_name}""" )
if __name__ == "__main__":
a_ :str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--vit_name',
default='vit_base_r50_s16_384',
type=str,
help='Name of the hybrid ViT timm model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether to upload the model to the HuggingFace hub.'
)
a_ :Union[str, Any] = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 250
| 0
|
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
__snake_case :str =False
class lowerCAmelCase__ ( unittest.TestCase ):
pass
@nightly
@require_torch_gpu
class lowerCAmelCase__ ( unittest.TestCase ):
def __UpperCamelCase ( self : Dict ) -> List[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCamelCase ( self : Union[str, Any] ) -> List[Any]:
A = VersatileDiffusionPipeline.from_pretrained('shi-labs/versatile-diffusion' , torch_dtype=torch.floataa )
pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
A = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg' )
A = torch.manual_seed(0 )
A = pipe.dual_guided(
prompt='first prompt' , image=__UpperCamelCase , text_to_image_strength=0.7_5 , generator=__UpperCamelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type='numpy' , ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(__UpperCamelCase )
A = VersatileDiffusionPipeline.from_pretrained(__UpperCamelCase , torch_dtype=torch.floataa )
pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
A = generator.manual_seed(0 )
A = pipe.dual_guided(
prompt='first prompt' , image=__UpperCamelCase , text_to_image_strength=0.7_5 , generator=__UpperCamelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type='numpy' , ).images
assert np.abs(image - new_image ).sum() < 1e-5, "Models don't have the same forward pass"
def __UpperCamelCase ( self : Tuple ) -> List[str]:
A = VersatileDiffusionPipeline.from_pretrained('shi-labs/versatile-diffusion' , torch_dtype=torch.floataa )
pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
A = 'cyberpunk 2077'
A = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg' )
A = torch.manual_seed(0 )
A = pipe.dual_guided(
prompt=__UpperCamelCase , image=__UpperCamelCase , text_to_image_strength=0.7_5 , generator=__UpperCamelCase , guidance_scale=7.5 , num_inference_steps=50 , output_type='numpy' , ).images
A = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
A = np.array([0.1_4_4_8, 0.1_6_1_9, 0.1_7_4_1, 0.1_0_8_6, 0.1_1_4_7, 0.1_1_2_8, 0.1_1_9_9, 0.1_1_6_5, 0.1_0_0_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
A = 'A painting of a squirrel eating a burger '
A = torch.manual_seed(0 )
A = pipe.text_to_image(
prompt=__UpperCamelCase , generator=__UpperCamelCase , guidance_scale=7.5 , num_inference_steps=50 , output_type='numpy' ).images
A = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
A = np.array([0.3_3_6_7, 0.3_1_6_9, 0.2_6_5_6, 0.3_8_7_0, 0.4_7_9_0, 0.3_7_9_6, 0.4_0_0_9, 0.4_8_7_8, 0.4_7_7_8] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
A = pipe.image_variation(__UpperCamelCase , generator=__UpperCamelCase , output_type='numpy' ).images
A = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
A = np.array([0.3_0_7_6, 0.3_1_2_3, 0.3_2_8_4, 0.3_7_8_2, 0.3_7_7_0, 0.3_8_9_4, 0.4_2_9_7, 0.4_3_3_1, 0.4_4_5_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
| 106
|
"""simple docstring"""
def __snake_case ( __A : list , __A : int , __A : int = 0 , __A : int = 0 ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = right or len(__A ) - 1
if left > right:
return -1
elif list_data[left] == key:
return left
elif list_data[right] == key:
return right
else:
return search(__A , __A , left + 1 , right - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 265
| 0
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a_ : Any = logging.get_logger(__name__)
a_ : Tuple = {
"facebook/xmod-base": "https://huggingface.co/facebook/xmod-base/resolve/main/config.json",
"facebook/xmod-large-prenorm": "https://huggingface.co/facebook/xmod-large-prenorm/resolve/main/config.json",
"facebook/xmod-base-13-125k": "https://huggingface.co/facebook/xmod-base-13-125k/resolve/main/config.json",
"facebook/xmod-base-30-125k": "https://huggingface.co/facebook/xmod-base-30-125k/resolve/main/config.json",
"facebook/xmod-base-30-195k": "https://huggingface.co/facebook/xmod-base-30-195k/resolve/main/config.json",
"facebook/xmod-base-60-125k": "https://huggingface.co/facebook/xmod-base-60-125k/resolve/main/config.json",
"facebook/xmod-base-60-265k": "https://huggingface.co/facebook/xmod-base-60-265k/resolve/main/config.json",
"facebook/xmod-base-75-125k": "https://huggingface.co/facebook/xmod-base-75-125k/resolve/main/config.json",
"facebook/xmod-base-75-269k": "https://huggingface.co/facebook/xmod-base-75-269k/resolve/main/config.json",
}
class UpperCamelCase ( UpperCamelCase__ ):
__UpperCamelCase ="xmod"
def __init__( self : Optional[Any] , snake_case__ : Optional[int]=3_0_5_2_2 , snake_case__ : int=7_6_8 , snake_case__ : str=1_2 , snake_case__ : Dict=1_2 , snake_case__ : List[str]=3_0_7_2 , snake_case__ : List[str]="gelu" , snake_case__ : Optional[int]=0.1 , snake_case__ : Optional[Any]=0.1 , snake_case__ : Any=5_1_2 , snake_case__ : int=2 , snake_case__ : str=0.02 , snake_case__ : Optional[int]=1E-12 , snake_case__ : int=1 , snake_case__ : Any=0 , snake_case__ : List[Any]=2 , snake_case__ : Union[str, Any]="absolute" , snake_case__ : List[Any]=True , snake_case__ : int=None , snake_case__ : Optional[Any]=False , snake_case__ : List[str]=2 , snake_case__ : Tuple=False , snake_case__ : int=True , snake_case__ : Optional[Any]=True , snake_case__ : int=("en_XX",) , snake_case__ : Dict=None , **snake_case__ : Any , ):
"""simple docstring"""
super().__init__(pad_token_id=_a , bos_token_id=_a , eos_token_id=_a , **_a )
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE = max_position_embeddings
SCREAMING_SNAKE_CASE = type_vocab_size
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = layer_norm_eps
SCREAMING_SNAKE_CASE = position_embedding_type
SCREAMING_SNAKE_CASE = use_cache
SCREAMING_SNAKE_CASE = classifier_dropout
SCREAMING_SNAKE_CASE = pre_norm
SCREAMING_SNAKE_CASE = adapter_reduction_factor
SCREAMING_SNAKE_CASE = adapter_layer_norm
SCREAMING_SNAKE_CASE = adapter_reuse_layer_norm
SCREAMING_SNAKE_CASE = ln_before_adapter
SCREAMING_SNAKE_CASE = list(_a )
SCREAMING_SNAKE_CASE = default_language
class UpperCamelCase ( UpperCamelCase__ ):
@property
def UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
SCREAMING_SNAKE_CASE = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 710
|
from ....configuration_utils import PretrainedConfig
from ....utils import logging
a_ : Any = logging.get_logger(__name__)
a_ : Dict = {
"Visual-Attention-Network/van-base": (
"https://huggingface.co/Visual-Attention-Network/van-base/blob/main/config.json"
),
}
class UpperCamelCase ( SCREAMING_SNAKE_CASE ):
__UpperCamelCase ="van"
def __init__( self : Optional[Any] , snake_case__ : Tuple=2_2_4 , snake_case__ : Dict=3 , snake_case__ : Union[str, Any]=[7, 3, 3, 3] , snake_case__ : str=[4, 2, 2, 2] , snake_case__ : Optional[Any]=[6_4, 1_2_8, 3_2_0, 5_1_2] , snake_case__ : Optional[Any]=[3, 3, 1_2, 3] , snake_case__ : Tuple=[8, 8, 4, 4] , snake_case__ : Any="gelu" , snake_case__ : Dict=0.02 , snake_case__ : List[str]=1E-6 , snake_case__ : int=1E-2 , snake_case__ : Any=0.0 , snake_case__ : Tuple=0.0 , **snake_case__ : Any , ):
"""simple docstring"""
super().__init__(**snake_case__ )
SCREAMING_SNAKE_CASE = image_size
SCREAMING_SNAKE_CASE = num_channels
SCREAMING_SNAKE_CASE = patch_sizes
SCREAMING_SNAKE_CASE = strides
SCREAMING_SNAKE_CASE = hidden_sizes
SCREAMING_SNAKE_CASE = depths
SCREAMING_SNAKE_CASE = mlp_ratios
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = layer_norm_eps
SCREAMING_SNAKE_CASE = layer_scale_init_value
SCREAMING_SNAKE_CASE = drop_path_rate
SCREAMING_SNAKE_CASE = dropout_rate
| 673
| 0
|
'''simple docstring'''
def UpperCamelCase_ ( A__ , A__ , A__ ):
def count_of_possible_combinations(A__ ) -> int:
if target < 0:
return 0
if target == 0:
return 1
return sum(count_of_possible_combinations(target - item ) for item in array )
return count_of_possible_combinations(A__ )
def UpperCamelCase_ ( A__ , A__ , A__ ):
def count_of_possible_combinations_with_dp_array(
A__ , A__ ) -> int:
if target < 0:
return 0
if target == 0:
return 1
if dp_array[target] != -1:
return dp_array[target]
a_ = sum(
count_of_possible_combinations_with_dp_array(target - item , A__ )
for item in array )
a_ = answer
return answer
a_ = [-1] * (target + 1)
return count_of_possible_combinations_with_dp_array(A__ , A__ )
def UpperCamelCase_ ( A__ , A__ , A__ ):
a_ = [0] * (target + 1)
a_ = 1
for i in range(1 , target + 1 ):
for j in range(A__ ):
if i - array[j] >= 0:
dp_array[i] += dp_array[i - array[j]]
return dp_array[target]
if __name__ == "__main__":
import doctest
doctest.testmod()
lowercase__ =3
lowercase__ =5
lowercase__ =[1, 2, 5]
print(combination_sum_iv(n, array, target))
| 263
|
'''simple docstring'''
from __future__ import annotations
def UpperCamelCase_ ( A__ ):
a_ = [True] * limit
a_ = False
a_ = False
a_ = True
for i in range(3 , int(limit**0.5 + 1 ) , 2 ):
a_ = i * 2
while index < limit:
a_ = False
a_ = index + i
a_ = [2]
for i in range(3 , A__ , 2 ):
if is_prime[i]:
primes.append(A__ )
return primes
def UpperCamelCase_ ( A__ = 1_00_00_00 ):
a_ = prime_sieve(A__ )
a_ = 0
a_ = 0
for i in range(len(A__ ) ):
for j in range(i + length , len(A__ ) ):
a_ = sum(primes[i:j] )
if sol >= ceiling:
break
if sol in primes:
a_ = j - i
a_ = sol
return largest
if __name__ == "__main__":
print(F"""{solution() = }""")
| 263
| 1
|
"""simple docstring"""
from argparse import ArgumentParser
from .env import EnvironmentCommand
def a ( ) -> str:
__magic_name__: Any = ArgumentParser("""Diffusers CLI tool""" , usage="""diffusers-cli <command> [<args>]""" )
__magic_name__: List[Any] = parser.add_subparsers(help="""diffusers-cli command helpers""" )
# Register commands
EnvironmentCommand.register_subcommand(__UpperCAmelCase )
# Let's go
__magic_name__: List[Any] = parser.parse_args()
if not hasattr(__UpperCAmelCase , """func""" ):
parser.print_help()
exit(1 )
# Run
__magic_name__: int = args.func(__UpperCAmelCase )
service.run()
if __name__ == "__main__":
main()
| 213
|
"""simple docstring"""
from copy import deepcopy
from typing import Optional, Union
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_tf_available, is_torch_available
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
class __A ( SCREAMING_SNAKE_CASE_ ):
UpperCAmelCase__ = ["image_processor"]
UpperCAmelCase__ = "SamImageProcessor"
def __init__( self : str , __snake_case : Union[str, Any] ) -> str:
super().__init__(__snake_case )
__magic_name__: List[Any] = self.image_processor
__magic_name__: Optional[int] = -1_0
__magic_name__: Dict = self.image_processor.size["""longest_edge"""]
def __call__( self : List[Any] , __snake_case : Tuple=None , __snake_case : Any=None , __snake_case : Optional[Any]=None , __snake_case : str=None , __snake_case : Optional[Union[str, TensorType]] = None , **__snake_case : Dict , ) -> BatchEncoding:
__magic_name__: Optional[int] = self.image_processor(
__snake_case , return_tensors=__snake_case , **__snake_case , )
# pop arguments that are not used in the foward but used nevertheless
__magic_name__: int = encoding_image_processor["""original_sizes"""]
if hasattr(__snake_case , """numpy""" ): # Checks if Torch or TF tensor
__magic_name__: Optional[Any] = original_sizes.numpy()
__magic_name__, __magic_name__, __magic_name__: Any = self._check_and_preprocess_points(
input_points=__snake_case , input_labels=__snake_case , input_boxes=__snake_case , )
__magic_name__: Optional[int] = self._normalize_and_convert(
__snake_case , __snake_case , input_points=__snake_case , input_labels=__snake_case , input_boxes=__snake_case , return_tensors=__snake_case , )
return encoding_image_processor
def lowerCamelCase__ ( self : Tuple , __snake_case : Optional[Any] , __snake_case : Tuple , __snake_case : Any=None , __snake_case : Dict=None , __snake_case : Union[str, Any]=None , __snake_case : Union[str, Any]="pt" , ) -> Any:
if input_points is not None:
if len(__snake_case ) != len(__snake_case ):
__magic_name__: str = [
self._normalize_coordinates(self.target_size , __snake_case , original_sizes[0] ) for point in input_points
]
else:
__magic_name__: List[str] = [
self._normalize_coordinates(self.target_size , __snake_case , __snake_case )
for point, original_size in zip(__snake_case , __snake_case )
]
# check that all arrays have the same shape
if not all(point.shape == input_points[0].shape for point in input_points ):
if input_labels is not None:
__magic_name__, __magic_name__: Tuple = self._pad_points_and_labels(__snake_case , __snake_case )
__magic_name__: Tuple = np.array(__snake_case )
if input_labels is not None:
__magic_name__: List[Any] = np.array(__snake_case )
if input_boxes is not None:
if len(__snake_case ) != len(__snake_case ):
__magic_name__: List[str] = [
self._normalize_coordinates(self.target_size , __snake_case , original_sizes[0] , is_bounding_box=__snake_case )
for box in input_boxes
]
else:
__magic_name__: List[Any] = [
self._normalize_coordinates(self.target_size , __snake_case , __snake_case , is_bounding_box=__snake_case )
for box, original_size in zip(__snake_case , __snake_case )
]
__magic_name__: int = np.array(__snake_case )
if input_boxes is not None:
if return_tensors == "pt":
__magic_name__: Union[str, Any] = torch.from_numpy(__snake_case )
# boxes batch size of 1 by default
__magic_name__: Union[str, Any] = input_boxes.unsqueeze(1 ) if len(input_boxes.shape ) != 3 else input_boxes
elif return_tensors == "tf":
__magic_name__: List[Any] = tf.convert_to_tensor(__snake_case )
# boxes batch size of 1 by default
__magic_name__: Optional[Any] = tf.expand_dims(__snake_case , 1 ) if len(input_boxes.shape ) != 3 else input_boxes
encoding_image_processor.update({"""input_boxes""": input_boxes} )
if input_points is not None:
if return_tensors == "pt":
__magic_name__: Union[str, Any] = torch.from_numpy(__snake_case )
# point batch size of 1 by default
__magic_name__: Optional[int] = input_points.unsqueeze(1 ) if len(input_points.shape ) != 4 else input_points
elif return_tensors == "tf":
__magic_name__: Dict = tf.convert_to_tensor(__snake_case )
# point batch size of 1 by default
__magic_name__: Union[str, Any] = tf.expand_dims(__snake_case , 1 ) if len(input_points.shape ) != 4 else input_points
encoding_image_processor.update({"""input_points""": input_points} )
if input_labels is not None:
if return_tensors == "pt":
__magic_name__: Union[str, Any] = torch.from_numpy(__snake_case )
# point batch size of 1 by default
__magic_name__: Optional[Any] = input_labels.unsqueeze(1 ) if len(input_labels.shape ) != 3 else input_labels
elif return_tensors == "tf":
__magic_name__: Union[str, Any] = tf.convert_to_tensor(__snake_case )
# point batch size of 1 by default
__magic_name__: int = tf.expand_dims(__snake_case , 1 ) if len(input_labels.shape ) != 3 else input_labels
encoding_image_processor.update({"""input_labels""": input_labels} )
return encoding_image_processor
def lowerCamelCase__ ( self : List[str] , __snake_case : Tuple , __snake_case : Dict ) -> Optional[int]:
__magic_name__: Union[str, Any] = max([point.shape[0] for point in input_points] )
__magic_name__: Any = []
for i, point in enumerate(__snake_case ):
if point.shape[0] != expected_nb_points:
__magic_name__: Optional[Any] = np.concatenate(
[point, np.zeros((expected_nb_points - point.shape[0], 2) ) + self.point_pad_value] , axis=0 )
__magic_name__: str = np.append(input_labels[i] , [self.point_pad_value] )
processed_input_points.append(__snake_case )
__magic_name__: str = processed_input_points
return input_points, input_labels
def lowerCamelCase__ ( self : Tuple , __snake_case : int , __snake_case : np.ndarray , __snake_case : Tuple , __snake_case : List[str]=False ) -> np.ndarray:
__magic_name__, __magic_name__: Any = original_size
__magic_name__, __magic_name__: Tuple = self.image_processor._get_preprocess_shape(__snake_case , longest_edge=__snake_case )
__magic_name__: List[str] = deepcopy(__snake_case ).astype(__snake_case )
if is_bounding_box:
__magic_name__: List[str] = coords.reshape(-1 , 2 , 2 )
__magic_name__: str = coords[..., 0] * (new_w / old_w)
__magic_name__: int = coords[..., 1] * (new_h / old_h)
if is_bounding_box:
__magic_name__: str = coords.reshape(-1 , 4 )
return coords
def lowerCamelCase__ ( self : int , __snake_case : Optional[Any]=None , __snake_case : Optional[int]=None , __snake_case : int=None , ) -> Dict:
if input_points is not None:
if hasattr(__snake_case , """numpy""" ): # Checks for TF or Torch tensor
__magic_name__: Union[str, Any] = input_points.numpy().tolist()
if not isinstance(__snake_case , __snake_case ) or not isinstance(input_points[0] , __snake_case ):
raise ValueError("""Input points must be a list of list of floating points.""" )
__magic_name__: Dict = [np.array(__snake_case ) for input_point in input_points]
else:
__magic_name__: str = None
if input_labels is not None:
if hasattr(__snake_case , """numpy""" ):
__magic_name__: Optional[int] = input_labels.numpy().tolist()
if not isinstance(__snake_case , __snake_case ) or not isinstance(input_labels[0] , __snake_case ):
raise ValueError("""Input labels must be a list of list integers.""" )
__magic_name__: Tuple = [np.array(__snake_case ) for label in input_labels]
else:
__magic_name__: str = None
if input_boxes is not None:
if hasattr(__snake_case , """numpy""" ):
__magic_name__: Tuple = input_boxes.numpy().tolist()
if (
not isinstance(__snake_case , __snake_case )
or not isinstance(input_boxes[0] , __snake_case )
or not isinstance(input_boxes[0][0] , __snake_case )
):
raise ValueError("""Input boxes must be a list of list of list of floating points.""" )
__magic_name__: List[Any] = [np.array(__snake_case ).astype(np.floataa ) for box in input_boxes]
else:
__magic_name__: List[str] = None
return input_points, input_labels, input_boxes
@property
def lowerCamelCase__ ( self : Optional[int] ) -> Any:
__magic_name__: int = self.image_processor.model_input_names
return list(dict.fromkeys(__snake_case ) )
def lowerCamelCase__ ( self : Any , *__snake_case : str , **__snake_case : Union[str, Any] ) -> Optional[Any]:
return self.image_processor.post_process_masks(*__snake_case , **__snake_case )
| 213
| 1
|
from __future__ import annotations
import math
from collections import Counter
from string import ascii_lowercase
def lowerCAmelCase_ ( lowerCamelCase ):
__magic_name__ , __magic_name__ : Union[str, Any] =analyze_text(lowerCamelCase )
__magic_name__ : List[Any] =list(""" """ + ascii_lowercase )
# what is our total sum of probabilities.
__magic_name__ : Union[str, Any] =sum(single_char_strings.values() )
# one length string
__magic_name__ : Optional[int] =0
# for each alpha we go in our dict and if it is in it we calculate entropy
for ch in my_alphas:
if ch in single_char_strings:
__magic_name__ : str =single_char_strings[ch]
__magic_name__ : Dict =my_str / all_sum
my_fir_sum += prob * math.loga(lowerCamelCase ) # entropy formula.
# print entropy
print(F"{round(-1 * my_fir_sum ):.1f}" )
# two len string
__magic_name__ : Optional[Any] =sum(two_char_strings.values() )
__magic_name__ : int =0
# for each alpha (two in size) calculate entropy.
for cha in my_alphas:
for cha in my_alphas:
__magic_name__ : List[str] =cha + cha
if sequence in two_char_strings:
__magic_name__ : str =two_char_strings[sequence]
__magic_name__ : Union[str, Any] =int(lowerCamelCase ) / all_sum
my_sec_sum += prob * math.loga(lowerCamelCase )
# print second entropy
print(F"{round(-1 * my_sec_sum ):.1f}" )
# print the difference between them
print(F"{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}" )
def lowerCAmelCase_ ( lowerCamelCase ):
__magic_name__ : Tuple =Counter() # type: ignore
__magic_name__ : int =Counter() # type: ignore
single_char_strings[text[-1]] += 1
# first case when we have space at start.
two_char_strings[" " + text[0]] += 1
for i in range(0 , len(lowerCamelCase ) - 1 ):
single_char_strings[text[i]] += 1
two_char_strings[text[i : i + 2]] += 1
return single_char_strings, two_char_strings
def lowerCAmelCase_ ( ):
import doctest
doctest.testmod()
# text = (
# "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark "
# "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest "
# "jointure saw horrible. He private he on be imagine suppose. Fertile "
# "beloved evident through no service elderly is. Blind there if every no so "
# "at. Own neglected you preferred way sincerity delivered his attempted. To "
# "of message cottage windows do besides against uncivil. Delightful "
# "unreserved impossible few estimating men favourable see entreaties. She "
# "propriety immediate was improving. He or entrance humoured likewise "
# "moderate. Much nor game son say feel. Fat make met can must form into "
# "gate. Me we offending prevailed discovery. "
# )
# calculate_prob(text)
if __name__ == "__main__":
main()
| 21
|
'''simple docstring'''
import colorsys
from PIL import Image # type: ignore
def lowerCamelCase_ ( __UpperCamelCase : float , __UpperCamelCase : float , __UpperCamelCase : int ) -> float:
"""simple docstring"""
_A = x
_A = y
for step in range(__UpperCamelCase ): # noqa: B007
_A = a * a - b * b + x
_A = 2 * a * b + y
_A = a_new
# divergence happens for all complex number with an absolute value
# greater than 4
if a * a + b * b > 4:
break
return step / (max_step - 1)
def lowerCamelCase_ ( __UpperCamelCase : float ) -> tuple:
"""simple docstring"""
if distance == 1:
return (0, 0, 0)
else:
return (2_5_5, 2_5_5, 2_5_5)
def lowerCamelCase_ ( __UpperCamelCase : float ) -> tuple:
"""simple docstring"""
if distance == 1:
return (0, 0, 0)
else:
return tuple(round(i * 2_5_5 ) for i in colorsys.hsv_to_rgb(__UpperCamelCase , 1 , 1 ) )
def lowerCamelCase_ ( __UpperCamelCase : int = 8_0_0 , __UpperCamelCase : int = 6_0_0 , __UpperCamelCase : float = -0.6 , __UpperCamelCase : float = 0 , __UpperCamelCase : float = 3.2 , __UpperCamelCase : int = 5_0 , __UpperCamelCase : bool = True , ) -> Image.Image:
"""simple docstring"""
_A = Image.new('RGB' , (image_width, image_height) )
_A = img.load()
# loop through the image-coordinates
for image_x in range(__UpperCamelCase ):
for image_y in range(__UpperCamelCase ):
# determine the figure-coordinates based on the image-coordinates
_A = figure_width / image_width * image_height
_A = figure_center_x + (image_x / image_width - 0.5) * figure_width
_A = figure_center_y + (image_y / image_height - 0.5) * figure_height
_A = get_distance(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# color the corresponding pixel based on the selected coloring-function
if use_distance_color_coding:
_A = get_color_coded_rgb(__UpperCamelCase )
else:
_A = get_black_and_white_rgb(__UpperCamelCase )
return img
if __name__ == "__main__":
import doctest
doctest.testmod()
# colored version, full figure
lowerCAmelCase = get_image()
# uncomment for colored version, different section, zoomed in
# img = get_image(figure_center_x = -0.6, figure_center_y = -0.4,
# figure_width = 0.8)
# uncomment for black and white version, full figure
# img = get_image(use_distance_color_coding = False)
# uncomment to save the image
# img.save("mandelbrot.png")
img.show()
| 292
| 0
|
def UpperCamelCase ( lowerCAmelCase_ ) -> str:
'''simple docstring'''
if number > 0:
raise ValueError('input must be a negative integer' )
_A= len(bin(lowerCAmelCase_ )[3:] )
_A= bin(abs(lowerCAmelCase_ ) - (1 << binary_number_length) )[3:]
_A= (
(
'1'
+ '0' * (binary_number_length - len(lowerCAmelCase_ ))
+ twos_complement_number
)
if number < 0
else '0'
)
return "0b" + twos_complement_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 711
|
import warnings
from ...utils import logging
from .image_processing_perceiver import PerceiverImageProcessor
UpperCAmelCase_ = logging.get_logger(__name__)
class lowerCAmelCase ( _a ):
def __init__( self , *lowerCAmelCase__ , **lowerCAmelCase__ ):
warnings.warn(
'The class PerceiverFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use PerceiverImageProcessor instead.' , lowerCAmelCase__ , )
super().__init__(*lowerCAmelCase__ , **lowerCAmelCase__ )
| 476
| 0
|
import warnings
from ...utils import is_sklearn_available, requires_backends
if is_sklearn_available():
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
__snake_case = (
'''This metric will be removed from the library soon, metrics should be handled with the 🤗 Evaluate '''
'''library. You can have a look at this example script for pointers: '''
'''https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py'''
)
def _A ( _lowercase , _lowercase ) -> str:
"""simple docstring"""
warnings.warn(_lowercase , _lowercase )
requires_backends(_lowercase , 'sklearn' )
return (preds == labels).mean()
def _A ( _lowercase , _lowercase ) -> Optional[int]:
"""simple docstring"""
warnings.warn(_lowercase , _lowercase )
requires_backends(_lowercase , 'sklearn' )
__UpperCamelCase = simple_accuracy(_lowercase , _lowercase )
__UpperCamelCase = fa_score(y_true=_lowercase , y_pred=_lowercase )
return {
"acc": acc,
"f1": fa,
"acc_and_f1": (acc + fa) / 2,
}
def _A ( _lowercase , _lowercase ) -> List[str]:
"""simple docstring"""
warnings.warn(_lowercase , _lowercase )
requires_backends(_lowercase , 'sklearn' )
__UpperCamelCase = pearsonr(_lowercase , _lowercase )[0]
__UpperCamelCase = spearmanr(_lowercase , _lowercase )[0]
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
"corr": (pearson_corr + spearman_corr) / 2,
}
def _A ( _lowercase , _lowercase , _lowercase ) -> Dict:
"""simple docstring"""
warnings.warn(_lowercase , _lowercase )
requires_backends(_lowercase , 'sklearn' )
assert len(_lowercase ) == len(_lowercase ), f'''Predictions and labels have mismatched lengths {len(_lowercase )} and {len(_lowercase )}'''
if task_name == "cola":
return {"mcc": matthews_corrcoef(_lowercase , _lowercase )}
elif task_name == "sst-2":
return {"acc": simple_accuracy(_lowercase , _lowercase )}
elif task_name == "mrpc":
return acc_and_fa(_lowercase , _lowercase )
elif task_name == "sts-b":
return pearson_and_spearman(_lowercase , _lowercase )
elif task_name == "qqp":
return acc_and_fa(_lowercase , _lowercase )
elif task_name == "mnli":
return {"mnli/acc": simple_accuracy(_lowercase , _lowercase )}
elif task_name == "mnli-mm":
return {"mnli-mm/acc": simple_accuracy(_lowercase , _lowercase )}
elif task_name == "qnli":
return {"acc": simple_accuracy(_lowercase , _lowercase )}
elif task_name == "rte":
return {"acc": simple_accuracy(_lowercase , _lowercase )}
elif task_name == "wnli":
return {"acc": simple_accuracy(_lowercase , _lowercase )}
elif task_name == "hans":
return {"acc": simple_accuracy(_lowercase , _lowercase )}
else:
raise KeyError(_lowercase )
def _A ( _lowercase , _lowercase , _lowercase ) -> Union[str, Any]:
"""simple docstring"""
warnings.warn(_lowercase , _lowercase )
requires_backends(_lowercase , 'sklearn' )
if len(_lowercase ) != len(_lowercase ):
raise ValueError(f'''Predictions and labels have mismatched lengths {len(_lowercase )} and {len(_lowercase )}''' )
if task_name == "xnli":
return {"acc": simple_accuracy(_lowercase , _lowercase )}
else:
raise KeyError(_lowercase )
| 1
|
'''simple docstring'''
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import symbol_database as _symbol_database
from google.protobuf.internal import builder as _builder
# @@protoc_insertion_point(imports)
__snake_case : Union[str, Any] = _symbol_database.Default()
__snake_case : Union[str, Any] = _descriptor_pool.Default().AddSerializedFile(
b"\n\x19sentencepiece_model.proto\x12\rsentencepiece\"\x80\x0c\n\x0bTrainerSpec\x12\r\n\x05input\x18\x01 \x03(\t\x12\x14\n\x0cinput_format\x18\x07 \x01(\t\x12\x14\n\x0cmodel_prefix\x18\x02 \x01(\t\x12\x41\n\nmodel_type\x18\x03 \x01(\x0e\x32$.sentencepiece.TrainerSpec.ModelType:\x07UNIGRAM\x12\x18\n\nvocab_size\x18\x04 \x01(\x05:\x04\x38\x30\x30\x30\x12\x17\n\x0f\x61\x63\x63\x65pt_language\x18\x05 \x03(\t\x12 \n\x15self_test_sample_size\x18\x06 \x01(\x05:\x01\x30\x12*\n\x1b\x65nable_differential_privacy\x18\x32 \x01(\x08:\x05\x66\x61lse\x12+\n differential_privacy_noise_level\x18\x33 \x01(\x02:\x01\x30\x12\x32\n\'differential_privacy_clipping_threshold\x18\x34 \x01(\x04:\x01\x30\x12\"\n\x12\x63haracter_coverage\x18\n \x01(\x02:\x06\x30.9995\x12\x1e\n\x13input_sentence_size\x18\x0b \x01(\x04:\x01\x30\x12$\n\x16shuffle_input_sentence\x18\x13 \x01(\x08:\x04true\x12 \n\x14mining_sentence_size\x18\x0c \x01(\x05\x42\x02\x18\x01\x12\"\n\x16training_sentence_size\x18\r \x01(\x05\x42\x02\x18\x01\x12(\n\x17seed_sentencepiece_size\x18\x0e \x01(\x05:\x07\x31\x30\x30\x30\x30\x30\x30\x12\x1e\n\x10shrinking_factor\x18\x0f \x01(\x02:\x04\x30.75\x12!\n\x13max_sentence_length\x18\x12 \x01(\x05:\x04\x34\x31\x39\x32\x12\x17\n\x0bnum_threads\x18\x10 \x01(\x05:\x02\x31\x36\x12\x1d\n\x12num_sub_iterations\x18\x11 \x01(\x05:\x01\x32\x12$\n\x18max_sentencepiece_length\x18\x14 \x01(\x05:\x02\x31\x36\x12%\n\x17split_by_unicode_script\x18\x15 \x01(\x08:\x04true\x12\x1d\n\x0fsplit_by_number\x18\x17 \x01(\x08:\x04true\x12!\n\x13split_by_whitespace\x18\x16 \x01(\x08:\x04true\x12)\n\x1atreat_whitespace_as_suffix\x18\x18 \x01(\x08:\x05\x66\x61lse\x12+\n\x1c\x61llow_whitespace_only_pieces\x18\x1a \x01(\x08:\x05\x66\x61lse\x12\x1b\n\x0csplit_digits\x18\x19 \x01(\x08:\x05\x66\x61lse\x12#\n\x19pretokenization_delimiter\x18\x35 \x01(\t:\x00\x12\x17\n\x0f\x63ontrol_symbols\x18\x1e \x03(\t\x12\x1c\n\x14user_defined_symbols\x18\x1f \x03(\t\x12\x16\n\x0erequired_chars\x18$ \x01(\t\x12\x1c\n\rbyte_fallback\x18# \x01(\x08:\x05\x66\x61lse\x12+\n\x1dvocabulary_output_piece_score\x18 \x01(\x08:\x04true\x12\x1e\n\x10hard_vocab_limit\x18! \x01(\x08:\x04true\x12\x1c\n\ruse_all_vocab\x18\" \x01(\x08:\x05\x66\x61lse\x12\x11\n\x06unk_id\x18( \x01(\x05:\x01\x30\x12\x11\n\x06\x62os_id\x18) \x01(\x05:\x01\x31\x12\x11\n\x06\x65os_id\x18* \x01(\x05:\x01\x32\x12\x12\n\x06pad_id\x18+ \x01(\x05:\x02-1\x12\x18\n\tunk_piece\x18- \x01(\t:\x05<unk>\x12\x16\n\tbos_piece\x18. \x01(\t:\x03<s>\x12\x17\n\teos_piece\x18/ \x01(\t:\x04</s>\x12\x18\n\tpad_piece\x18\x30 \x01(\t:\x05<pad>\x12\x1a\n\x0bunk_surface\x18, \x01(\t:\x05 \xe2\x81\x87 \x12+\n\x1ctrain_extremely_large_corpus\x18\x31 \x01(\x08:\x05\x66\x61lse\"5\n\tModelType\x12\x0b\n\x07UNIGRAM\x10\x01\x12\x07\n\x03\x42PE\x10\x02\x12\x08\n\x04WORD\x10\x03\x12\x08\n\x04\x43HAR\x10\x04*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"\xd1\x01\n\x0eNormalizerSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x1c\n\x14precompiled_charsmap\x18\x02 \x01(\x0c\x12\x1e\n\x10\x61\x64\x64_dummy_prefix\x18\x03 \x01(\x08:\x04true\x12&\n\x18remove_extra_whitespaces\x18\x04 \x01(\x08:\x04true\x12 \n\x12\x65scape_whitespaces\x18\x05 \x01(\x08:\x04true\x12\x1e\n\x16normalization_rule_tsv\x18\x06 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"y\n\x0cSelfTestData\x12\x33\n\x07samples\x18\x01 \x03(\x0b\x32\".sentencepiece.SelfTestData.Sample\x1a)\n\x06Sample\x12\r\n\x05input\x18\x01 \x01(\t\x12\x10\n\x08\x65xpected\x18\x02 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"\xfe\x03\n\nModelProto\x12\x37\n\x06pieces\x18\x01 \x03(\x0b\x32\'.sentencepiece.ModelProto.SentencePiece\x12\x30\n\x0ctrainer_spec\x18\x02 \x01(\x0b\x32\x1a.sentencepiece.TrainerSpec\x12\x36\n\x0fnormalizer_spec\x18\x03 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x12\x33\n\x0eself_test_data\x18\x04 \x01(\x0b\x32\x1b.sentencepiece.SelfTestData\x12\x38\n\x11\x64\x65normalizer_spec\x18\x05 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x1a\xd2\x01\n\rSentencePiece\x12\r\n\x05piece\x18\x01 \x01(\t\x12\r\n\x05score\x18\x02 \x01(\x02\x12\x42\n\x04type\x18\x03 \x01(\x0e\x32,.sentencepiece.ModelProto.SentencePiece.Type:\x06NORMAL\"T\n\x04Type\x12\n\n\x06NORMAL\x10\x01\x12\x0b\n\x07UNKNOWN\x10\x02\x12\x0b\n\x07\x43ONTROL\x10\x03\x12\x10\n\x0cUSER_DEFINED\x10\x04\x12\x08\n\x04\x42YTE\x10\x06\x12\n\n\x06UNUSED\x10\x05*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\x42\x02H\x03"
)
__snake_case : Optional[Any] = globals()
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals)
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, "sentencepiece_model_pb2", _globals)
if _descriptor._USE_C_DESCRIPTORS is False:
__snake_case : Optional[int] = None
__snake_case : List[Any] = b"H\003"
# (generated by protobuf compiler, but `_TRAINERSPEC` is not defined)
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._serialized_options = b"\030\001"
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._serialized_options = b"\030\001"
__snake_case : int = 45
__snake_case : Any = 1581
__snake_case : str = 1517
__snake_case : Tuple = 1570
__snake_case : List[Any] = 1584
__snake_case : List[str] = 1793
__snake_case : Optional[Any] = 1795
__snake_case : Tuple = 1916
__snake_case : str = 1864
__snake_case : Dict = 1905
__snake_case : str = 1919
__snake_case : int = 2429
__snake_case : str = 2208
__snake_case : Tuple = 2418
__snake_case : List[Any] = 2323
__snake_case : str = 2407
# @@protoc_insertion_point(module_scope)
| 131
| 0
|
'''simple docstring'''
def A__ ( __lowerCAmelCase : float , __lowerCAmelCase : float , __lowerCAmelCase : float , __lowerCAmelCase : float , __lowerCAmelCase : float , ):
lowerCamelCase__ = [redshift, radiation_density, matter_density, dark_energy]
if any(p < 0 for p in parameters ):
raise ValueError("""All input parameters must be positive""" )
if any(p > 1 for p in parameters[1:4] ):
raise ValueError("""Relative densities cannot be greater than one""" )
else:
lowerCamelCase__ = 1 - (matter_density + radiation_density + dark_energy)
lowerCamelCase__ = (
radiation_density * (redshift + 1) ** 4
+ matter_density * (redshift + 1) ** 3
+ curvature * (redshift + 1) ** 2
+ dark_energy
)
lowerCamelCase__ = hubble_constant * e_a ** (1 / 2)
return hubble
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# demo LCDM approximation
UpperCamelCase : Any = 0.3
print(
hubble_parameter(
hubble_constant=68.3,
radiation_density=1E-4,
matter_density=matter_density,
dark_energy=1 - matter_density,
redshift=0,
)
)
| 720
|
'''simple docstring'''
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BertTokenizer, BlipImageProcessor, BlipProcessor, PreTrainedTokenizerFast
@require_vision
class UpperCamelCase__ (unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self ):
lowerCamelCase__ = tempfile.mkdtemp()
lowerCamelCase__ = BlipImageProcessor()
lowerCamelCase__ = BertTokenizer.from_pretrained("""hf-internal-testing/tiny-random-BertModel""" )
lowerCamelCase__ = BlipProcessor(_lowerCAmelCase ,_lowerCAmelCase )
processor.save_pretrained(self.tmpdirname )
def UpperCamelCase_ ( self ,**_lowerCAmelCase ):
return AutoProcessor.from_pretrained(self.tmpdirname ,**_lowerCAmelCase ).tokenizer
def UpperCamelCase_ ( self ,**_lowerCAmelCase ):
return AutoProcessor.from_pretrained(self.tmpdirname ,**_lowerCAmelCase ).image_processor
def UpperCamelCase_ ( self ):
shutil.rmtree(self.tmpdirname )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = [np.random.randint(2_55 ,size=(3, 30, 4_00) ,dtype=np.uinta )]
lowerCamelCase__ = [Image.fromarray(np.moveaxis(_lowerCAmelCase ,0 ,-1 ) ) for x in image_inputs]
return image_inputs
def UpperCamelCase_ ( self ):
lowerCamelCase__ = BlipProcessor(tokenizer=self.get_tokenizer() ,image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowerCamelCase__ = self.get_tokenizer(bos_token="""(BOS)""" ,eos_token="""(EOS)""" )
lowerCamelCase__ = self.get_image_processor(do_normalize=_lowerCAmelCase ,padding_value=1.0 )
lowerCamelCase__ = BlipProcessor.from_pretrained(
self.tmpdirname ,bos_token="""(BOS)""" ,eos_token="""(EOS)""" ,do_normalize=_lowerCAmelCase ,padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer ,_lowerCAmelCase )
self.assertEqual(processor.image_processor.to_json_string() ,image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor ,_lowerCAmelCase )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = self.get_image_processor()
lowerCamelCase__ = self.get_tokenizer()
lowerCamelCase__ = BlipProcessor(tokenizer=_lowerCAmelCase ,image_processor=_lowerCAmelCase )
lowerCamelCase__ = self.prepare_image_inputs()
lowerCamelCase__ = image_processor(_lowerCAmelCase ,return_tensors="""np""" )
lowerCamelCase__ = processor(images=_lowerCAmelCase ,return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() ,input_processor[key].sum() ,delta=1E-2 )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = self.get_image_processor()
lowerCamelCase__ = self.get_tokenizer()
lowerCamelCase__ = BlipProcessor(tokenizer=_lowerCAmelCase ,image_processor=_lowerCAmelCase )
lowerCamelCase__ = """lower newer"""
lowerCamelCase__ = processor(text=_lowerCAmelCase )
lowerCamelCase__ = tokenizer(_lowerCAmelCase ,return_token_type_ids=_lowerCAmelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] ,encoded_processor[key] )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = self.get_image_processor()
lowerCamelCase__ = self.get_tokenizer()
lowerCamelCase__ = BlipProcessor(tokenizer=_lowerCAmelCase ,image_processor=_lowerCAmelCase )
lowerCamelCase__ = """lower newer"""
lowerCamelCase__ = self.prepare_image_inputs()
lowerCamelCase__ = processor(text=_lowerCAmelCase ,images=_lowerCAmelCase )
self.assertListEqual(list(inputs.keys() ) ,["""pixel_values""", """input_ids""", """attention_mask"""] )
# test if it raises when no input is passed
with pytest.raises(_lowerCAmelCase ):
processor()
def UpperCamelCase_ ( self ):
lowerCamelCase__ = self.get_image_processor()
lowerCamelCase__ = self.get_tokenizer()
lowerCamelCase__ = BlipProcessor(tokenizer=_lowerCAmelCase ,image_processor=_lowerCAmelCase )
lowerCamelCase__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowerCamelCase__ = processor.batch_decode(_lowerCAmelCase )
lowerCamelCase__ = tokenizer.batch_decode(_lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase ,_lowerCAmelCase )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = self.get_image_processor()
lowerCamelCase__ = self.get_tokenizer()
lowerCamelCase__ = BlipProcessor(tokenizer=_lowerCAmelCase ,image_processor=_lowerCAmelCase )
lowerCamelCase__ = """lower newer"""
lowerCamelCase__ = self.prepare_image_inputs()
lowerCamelCase__ = processor(text=_lowerCAmelCase ,images=_lowerCAmelCase )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) ,["""pixel_values""", """input_ids""", """attention_mask"""] )
| 9
| 0
|
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer
@dataclass
class UpperCAmelCase__ ( lowerCamelCase__ ):
"""simple docstring"""
lowerCAmelCase__ : torch.FloatTensor
class UpperCAmelCase__ ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
@register_to_config
def __init__( self: List[str] , __lowerCAmelCase: int = 3 , __lowerCAmelCase: List[Any] = 3 , __lowerCAmelCase: Tuple = ("DownEncoderBlock2D",) , __lowerCAmelCase: Optional[int] = ("UpDecoderBlock2D",) , __lowerCAmelCase: Optional[int] = (64,) , __lowerCAmelCase: Union[str, Any] = 1 , __lowerCAmelCase: Tuple = "silu" , __lowerCAmelCase: Dict = 3 , __lowerCAmelCase: List[str] = 32 , __lowerCAmelCase: Union[str, Any] = 256 , __lowerCAmelCase: Optional[Any] = 32 , __lowerCAmelCase: str = None , __lowerCAmelCase: Optional[int] = 0.18215 , __lowerCAmelCase: Optional[Any] = "group" , ) -> Dict:
'''simple docstring'''
super().__init__()
# pass init params to Encoder
__UpperCAmelCase = Encoder(
in_channels=__lowerCAmelCase , out_channels=__lowerCAmelCase , down_block_types=__lowerCAmelCase , block_out_channels=__lowerCAmelCase , layers_per_block=__lowerCAmelCase , act_fn=__lowerCAmelCase , norm_num_groups=__lowerCAmelCase , double_z=__lowerCAmelCase , )
__UpperCAmelCase = vq_embed_dim if vq_embed_dim is not None else latent_channels
__UpperCAmelCase = nn.Convad(__lowerCAmelCase , __lowerCAmelCase , 1 )
__UpperCAmelCase = VectorQuantizer(__lowerCAmelCase , __lowerCAmelCase , beta=0.25 , remap=__lowerCAmelCase , sane_index_shape=__lowerCAmelCase )
__UpperCAmelCase = nn.Convad(__lowerCAmelCase , __lowerCAmelCase , 1 )
# pass init params to Decoder
__UpperCAmelCase = Decoder(
in_channels=__lowerCAmelCase , out_channels=__lowerCAmelCase , up_block_types=__lowerCAmelCase , block_out_channels=__lowerCAmelCase , layers_per_block=__lowerCAmelCase , act_fn=__lowerCAmelCase , norm_num_groups=__lowerCAmelCase , norm_type=__lowerCAmelCase , )
@apply_forward_hook
def _UpperCAmelCase ( self: Dict , __lowerCAmelCase: List[str] , __lowerCAmelCase: int = True ) -> Union[str, Any]:
'''simple docstring'''
__UpperCAmelCase = self.encoder(__lowerCAmelCase )
__UpperCAmelCase = self.quant_conv(__lowerCAmelCase )
if not return_dict:
return (h,)
return VQEncoderOutput(latents=__lowerCAmelCase )
@apply_forward_hook
def _UpperCAmelCase ( self: int , __lowerCAmelCase: List[str] , __lowerCAmelCase: Any = False , __lowerCAmelCase: Tuple = True ) -> List[Any]:
'''simple docstring'''
if not force_not_quantize:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = self.quantize(__lowerCAmelCase )
else:
__UpperCAmelCase = h
__UpperCAmelCase = self.post_quant_conv(__lowerCAmelCase )
__UpperCAmelCase = self.decoder(__lowerCAmelCase , quant if self.config.norm_type == "spatial" else None )
if not return_dict:
return (dec,)
return DecoderOutput(sample=__lowerCAmelCase )
def _UpperCAmelCase ( self: Union[str, Any] , __lowerCAmelCase: Union[str, Any] , __lowerCAmelCase: Optional[Any] = True ) -> Any:
'''simple docstring'''
__UpperCAmelCase = sample
__UpperCAmelCase = self.encode(__lowerCAmelCase ).latents
__UpperCAmelCase = self.decode(__lowerCAmelCase ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=__lowerCAmelCase )
| 221
|
'''simple docstring'''
import argparse
import torch
from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert
from transformers.utils import logging
logging.set_verbosity_info()
def a_ ( __snake_case : Optional[int] , __snake_case : Union[str, Any] , __snake_case : Any ) -> str:
"""simple docstring"""
# Initialise PyTorch model
lowerCamelCase_ =BertConfig.from_json_file(__snake_case )
print(F'''Building PyTorch model from configuration: {config}''' )
lowerCamelCase_ =BertForPreTraining(__snake_case )
# Load weights from tf checkpoint
load_tf_weights_in_bert(__snake_case , __snake_case , __snake_case )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , __snake_case )
if __name__ == "__main__":
a_ : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--bert_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained BERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
a_ : Optional[Any] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 676
| 0
|
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
import torch.nn.functional as F
from transformers import (
ClapTextConfig,
ClapTextModelWithProjection,
RobertaTokenizer,
SpeechTaHifiGan,
SpeechTaHifiGanConfig,
)
from diffusers import (
AudioLDMPipeline,
AutoencoderKL,
DDIMScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_AUDIO_BATCH_PARAMS, TEXT_TO_AUDIO_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class _lowerCamelCase ( snake_case_ , unittest.TestCase ):
'''simple docstring'''
__lowercase : Optional[Any] = AudioLDMPipeline
__lowercase : Any = TEXT_TO_AUDIO_PARAMS
__lowercase : Optional[Any] = TEXT_TO_AUDIO_BATCH_PARAMS
__lowercase : Optional[Any] = frozenset(
[
'''num_inference_steps''',
'''num_waveforms_per_prompt''',
'''generator''',
'''latents''',
'''output_type''',
'''return_dict''',
'''callback''',
'''callback_steps''',
] )
def snake_case__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
__A : Union[str, Any] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=(32, 64) , class_embed_type='simple_projection' , projection_class_embeddings_input_dim=32 , class_embeddings_concat=__lowercase , )
__A : int = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='scaled_linear' , clip_sample=__lowercase , set_alpha_to_one=__lowercase , )
torch.manual_seed(0 )
__A : List[Any] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=1 , out_channels=1 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0 )
__A : str = ClapTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , projection_dim=32 , )
__A : str = ClapTextModelWithProjection(__lowercase )
__A : Tuple = RobertaTokenizer.from_pretrained('hf-internal-testing/tiny-random-roberta' , model_max_length=77 )
__A : List[Any] = SpeechTaHifiGanConfig(
model_in_dim=8 , sampling_rate=16_000 , upsample_initial_channel=16 , upsample_rates=[2, 2] , upsample_kernel_sizes=[4, 4] , resblock_kernel_sizes=[3, 7] , resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5]] , normalize_before=__lowercase , )
__A : Optional[Any] = SpeechTaHifiGan(__lowercase )
__A : int = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'vocoder': vocoder,
}
return components
def snake_case__ ( self , __lowercase , __lowercase=0 ):
"""simple docstring"""
if str(__lowercase ).startswith('mps' ):
__A : int = torch.manual_seed(__lowercase )
else:
__A : Optional[int] = torch.Generator(device=__lowercase ).manual_seed(__lowercase )
__A : str = {
'prompt': 'A hammer hitting a wooden surface',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
}
return inputs
def snake_case__ ( self ):
"""simple docstring"""
__A : Tuple = 'cpu' # ensure determinism for the device-dependent torch.Generator
__A : List[str] = self.get_dummy_components()
__A : Any = AudioLDMPipeline(**__lowercase )
__A : Tuple = audioldm_pipe.to(__lowercase )
audioldm_pipe.set_progress_bar_config(disable=__lowercase )
__A : List[Any] = self.get_dummy_inputs(__lowercase )
__A : Any = audioldm_pipe(**__lowercase )
__A : Tuple = output.audios[0]
assert audio.ndim == 1
assert len(__lowercase ) == 256
__A : int = audio[:10]
__A : Dict = np.array(
[-0.0_0_5_0, 0.0_0_5_0, -0.0_0_6_0, 0.0_0_3_3, -0.0_0_2_6, 0.0_0_3_3, -0.0_0_2_7, 0.0_0_3_3, -0.0_0_2_8, 0.0_0_3_3] )
assert np.abs(audio_slice - expected_slice ).max() < 1E-2
def snake_case__ ( self ):
"""simple docstring"""
__A : Union[str, Any] = self.get_dummy_components()
__A : Any = AudioLDMPipeline(**__lowercase )
__A : List[Any] = audioldm_pipe.to(__lowercase )
__A : List[str] = audioldm_pipe.to(__lowercase )
audioldm_pipe.set_progress_bar_config(disable=__lowercase )
__A : str = self.get_dummy_inputs(__lowercase )
__A : List[Any] = 3 * [inputs['prompt']]
# forward
__A : str = audioldm_pipe(**__lowercase )
__A : Union[str, Any] = output.audios[0]
__A : Tuple = self.get_dummy_inputs(__lowercase )
__A : Tuple = 3 * [inputs.pop('prompt' )]
__A : Dict = audioldm_pipe.tokenizer(
__lowercase , padding='max_length' , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=__lowercase , return_tensors='pt' , )
__A : List[Any] = text_inputs['input_ids'].to(__lowercase )
__A : int = audioldm_pipe.text_encoder(
__lowercase , )
__A : str = prompt_embeds.text_embeds
# additional L_2 normalization over each hidden-state
__A : Optional[int] = F.normalize(__lowercase , dim=-1 )
__A : Any = prompt_embeds
# forward
__A : Tuple = audioldm_pipe(**__lowercase )
__A : List[Any] = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1E-2
def snake_case__ ( self ):
"""simple docstring"""
__A : str = self.get_dummy_components()
__A : Tuple = AudioLDMPipeline(**__lowercase )
__A : List[Any] = audioldm_pipe.to(__lowercase )
__A : Optional[int] = audioldm_pipe.to(__lowercase )
audioldm_pipe.set_progress_bar_config(disable=__lowercase )
__A : Optional[int] = self.get_dummy_inputs(__lowercase )
__A : Optional[int] = 3 * ['this is a negative prompt']
__A : str = negative_prompt
__A : Optional[int] = 3 * [inputs['prompt']]
# forward
__A : str = audioldm_pipe(**__lowercase )
__A : Optional[Any] = output.audios[0]
__A : str = self.get_dummy_inputs(__lowercase )
__A : List[str] = 3 * [inputs.pop('prompt' )]
__A : Optional[int] = []
for p in [prompt, negative_prompt]:
__A : Optional[Any] = audioldm_pipe.tokenizer(
__lowercase , padding='max_length' , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=__lowercase , return_tensors='pt' , )
__A : Optional[Any] = text_inputs['input_ids'].to(__lowercase )
__A : str = audioldm_pipe.text_encoder(
__lowercase , )
__A : Union[str, Any] = text_embeds.text_embeds
# additional L_2 normalization over each hidden-state
__A : Any = F.normalize(__lowercase , dim=-1 )
embeds.append(__lowercase )
__A : Union[str, Any] = embeds
# forward
__A : str = audioldm_pipe(**__lowercase )
__A : Any = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1E-2
def snake_case__ ( self ):
"""simple docstring"""
__A : List[Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator
__A : List[str] = self.get_dummy_components()
__A : Dict = PNDMScheduler(skip_prk_steps=__lowercase )
__A : Optional[int] = AudioLDMPipeline(**__lowercase )
__A : Optional[Any] = audioldm_pipe.to(__lowercase )
audioldm_pipe.set_progress_bar_config(disable=__lowercase )
__A : Tuple = self.get_dummy_inputs(__lowercase )
__A : int = 'egg cracking'
__A : Union[str, Any] = audioldm_pipe(**__lowercase , negative_prompt=__lowercase )
__A : Union[str, Any] = output.audios[0]
assert audio.ndim == 1
assert len(__lowercase ) == 256
__A : str = audio[:10]
__A : List[Any] = np.array(
[-0.0_0_5_1, 0.0_0_5_0, -0.0_0_6_0, 0.0_0_3_4, -0.0_0_2_6, 0.0_0_3_3, -0.0_0_2_7, 0.0_0_3_3, -0.0_0_2_8, 0.0_0_3_2] )
assert np.abs(audio_slice - expected_slice ).max() < 1E-2
def snake_case__ ( self ):
"""simple docstring"""
__A : Optional[int] = 'cpu' # ensure determinism for the device-dependent torch.Generator
__A : Any = self.get_dummy_components()
__A : Dict = PNDMScheduler(skip_prk_steps=__lowercase )
__A : Optional[int] = AudioLDMPipeline(**__lowercase )
__A : Union[str, Any] = audioldm_pipe.to(__lowercase )
audioldm_pipe.set_progress_bar_config(disable=__lowercase )
__A : Tuple = 'A hammer hitting a wooden surface'
# test num_waveforms_per_prompt=1 (default)
__A : Any = audioldm_pipe(__lowercase , num_inference_steps=2 ).audios
assert audios.shape == (1, 256)
# test num_waveforms_per_prompt=1 (default) for batch of prompts
__A : Union[str, Any] = 2
__A : Dict = audioldm_pipe([prompt] * batch_size , num_inference_steps=2 ).audios
assert audios.shape == (batch_size, 256)
# test num_waveforms_per_prompt for single prompt
__A : Dict = 2
__A : Tuple = audioldm_pipe(__lowercase , num_inference_steps=2 , num_waveforms_per_prompt=__lowercase ).audios
assert audios.shape == (num_waveforms_per_prompt, 256)
# test num_waveforms_per_prompt for batch of prompts
__A : Optional[Any] = 2
__A : Union[str, Any] = audioldm_pipe(
[prompt] * batch_size , num_inference_steps=2 , num_waveforms_per_prompt=__lowercase ).audios
assert audios.shape == (batch_size * num_waveforms_per_prompt, 256)
def snake_case__ ( self ):
"""simple docstring"""
__A : Any = 'cpu' # ensure determinism for the device-dependent torch.Generator
__A : List[str] = self.get_dummy_components()
__A : Union[str, Any] = AudioLDMPipeline(**__lowercase )
__A : List[Any] = audioldm_pipe.to(__lowercase )
audioldm_pipe.set_progress_bar_config(disable=__lowercase )
__A : List[Any] = audioldm_pipe.vocoder.config.sampling_rate
__A : Union[str, Any] = self.get_dummy_inputs(__lowercase )
__A : Tuple = audioldm_pipe(audio_length_in_s=0.0_1_6 , **__lowercase )
__A : str = output.audios[0]
assert audio.ndim == 1
assert len(__lowercase ) / vocoder_sampling_rate == 0.0_1_6
__A : List[Any] = audioldm_pipe(audio_length_in_s=0.0_3_2 , **__lowercase )
__A : Optional[int] = output.audios[0]
assert audio.ndim == 1
assert len(__lowercase ) / vocoder_sampling_rate == 0.0_3_2
def snake_case__ ( self ):
"""simple docstring"""
__A : int = self.get_dummy_components()
__A : Any = AudioLDMPipeline(**__lowercase )
__A : int = audioldm_pipe.to(__lowercase )
audioldm_pipe.set_progress_bar_config(disable=__lowercase )
__A : Tuple = ['hey']
__A : Union[str, Any] = audioldm_pipe(__lowercase , num_inference_steps=1 )
__A : Any = output.audios.shape
assert audio_shape == (1, 256)
__A : Optional[int] = audioldm_pipe.vocoder.config
config.model_in_dim *= 2
__A : Tuple = SpeechTaHifiGan(__lowercase ).to(__lowercase )
__A : List[str] = audioldm_pipe(__lowercase , num_inference_steps=1 )
__A : List[str] = output.audios.shape
# waveform shape is unchanged, we just have 2x the number of mel channels in the spectrogram
assert audio_shape == (1, 256)
def snake_case__ ( self ):
"""simple docstring"""
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=__lowercase )
def snake_case__ ( self ):
"""simple docstring"""
self._test_inference_batch_single_identical(test_mean_pixel_difference=__lowercase )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def snake_case__ ( self ):
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=__lowercase )
@slow
class _lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def snake_case__ ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case__ ( self , __lowercase , __lowercase="cpu" , __lowercase=torch.floataa , __lowercase=0 ):
"""simple docstring"""
__A : str = torch.Generator(device=__lowercase ).manual_seed(__lowercase )
__A : List[str] = np.random.RandomState(__lowercase ).standard_normal((1, 8, 128, 16) )
__A : Optional[int] = torch.from_numpy(__lowercase ).to(device=__lowercase , dtype=__lowercase )
__A : str = {
'prompt': 'A hammer hitting a wooden surface',
'latents': latents,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 2.5,
}
return inputs
def snake_case__ ( self ):
"""simple docstring"""
__A : Any = AudioLDMPipeline.from_pretrained('cvssp/audioldm' )
__A : Any = audioldm_pipe.to(__lowercase )
audioldm_pipe.set_progress_bar_config(disable=__lowercase )
__A : int = self.get_inputs(__lowercase )
__A : Dict = 25
__A : Optional[Any] = audioldm_pipe(**__lowercase ).audios[0]
assert audio.ndim == 1
assert len(__lowercase ) == 81_920
__A : Dict = audio[77_230:77_240]
__A : Dict = np.array(
[-0.4_8_8_4, -0.4_6_0_7, 0.0_0_2_3, 0.5_0_0_7, 0.5_8_9_6, 0.5_1_5_1, 0.3_8_1_3, -0.0_2_0_8, -0.3_6_8_7, -0.4_3_1_5] )
__A : Optional[int] = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 1E-2
def snake_case__ ( self ):
"""simple docstring"""
__A : Tuple = AudioLDMPipeline.from_pretrained('cvssp/audioldm' )
__A : int = LMSDiscreteScheduler.from_config(audioldm_pipe.scheduler.config )
__A : Dict = audioldm_pipe.to(__lowercase )
audioldm_pipe.set_progress_bar_config(disable=__lowercase )
__A : Union[str, Any] = self.get_inputs(__lowercase )
__A : List[Any] = audioldm_pipe(**__lowercase ).audios[0]
assert audio.ndim == 1
assert len(__lowercase ) == 81_920
__A : List[Any] = audio[27_780:27_790]
__A : Optional[int] = np.array([-0.2_1_3_1, -0.0_8_7_3, -0.0_1_2_4, -0.0_1_8_9, 0.0_5_6_9, 0.1_3_7_3, 0.1_8_8_3, 0.2_8_8_6, 0.3_2_9_7, 0.2_2_1_2] )
__A : Dict = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 3E-2
| 712
|
'''simple docstring'''
from heapq import heappop, heappush
import numpy as np
def _lowercase ( UpperCamelCase__ : np.ndarray, UpperCamelCase__ : tuple[int, int], UpperCamelCase__ : tuple[int, int], UpperCamelCase__ : bool, ):
__A ,__A : Optional[Any] = grid.shape
__A : List[Any] = [-1, 1, 0, 0]
__A : Optional[int] = [0, 0, -1, 1]
if allow_diagonal:
dx += [-1, -1, 1, 1]
dy += [-1, 1, -1, 1]
__A ,__A : int = [(0, source)], set()
__A : Any = np.full((rows, cols), np.inf )
__A : int = 0
__A : Any = np.empty((rows, cols), dtype=UpperCamelCase__ )
__A : List[Any] = None
while queue:
((__A) ,(__A)) : List[Any] = heappop(UpperCamelCase__ )
if (x, y) in visited:
continue
visited.add((x, y) )
if (x, y) == destination:
__A : Tuple = []
while (x, y) != source:
path.append((x, y) )
__A ,__A : int = predecessors[x, y]
path.append(UpperCamelCase__ ) # add the source manually
path.reverse()
return matrix[destination], path
for i in range(len(UpperCamelCase__ ) ):
__A ,__A : List[Any] = x + dx[i], y + dy[i]
if 0 <= nx < rows and 0 <= ny < cols:
__A : str = grid[nx][ny]
if next_node == 1 and matrix[nx, ny] > dist + 1:
heappush(UpperCamelCase__, (dist + 1, (nx, ny)) )
__A : int = dist + 1
__A : List[str] = (x, y)
return np.inf, []
if __name__ == "__main__":
import doctest
doctest.testmod()
| 540
| 0
|
import argparse
import json
import math
import os
import time
import traceback
import zipfile
from collections import Counter
import requests
def lowerCamelCase__ ( __lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[int]=None ):
__UpperCAmelCase : Dict = None
if token is not None:
__UpperCAmelCase : List[Any] = {"""Accept""": """application/vnd.github+json""", """Authorization""": f"""Bearer {token}"""}
__UpperCAmelCase : str = f"""https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100"""
__UpperCAmelCase : List[Any] = requests.get(__lowerCamelCase , headers=__lowerCamelCase ).json()
__UpperCAmelCase : Optional[Any] = {}
try:
job_links.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
__UpperCAmelCase : Union[str, Any] = math.ceil((result["""total_count"""] - 100) / 100 )
for i in range(__lowerCamelCase ):
__UpperCAmelCase : Optional[int] = requests.get(url + f"""&page={i + 2}""" , headers=__lowerCamelCase ).json()
job_links.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
return job_links
except Exception:
print(f"""Unknown error, could not fetch links:\n{traceback.format_exc()}""" )
return {}
def lowerCamelCase__ ( __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[int]=None ):
__UpperCAmelCase : List[Any] = None
if token is not None:
__UpperCAmelCase : List[Any] = {"""Accept""": """application/vnd.github+json""", """Authorization""": f"""Bearer {token}"""}
__UpperCAmelCase : List[Any] = f"""https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100"""
__UpperCAmelCase : Union[str, Any] = requests.get(__lowerCamelCase , headers=__lowerCamelCase ).json()
__UpperCAmelCase : Optional[int] = {}
try:
artifacts.update({artifact["""name"""]: artifact["""archive_download_url"""] for artifact in result["""artifacts"""]} )
__UpperCAmelCase : Optional[int] = math.ceil((result["""total_count"""] - 100) / 100 )
for i in range(__lowerCamelCase ):
__UpperCAmelCase : Any = requests.get(url + f"""&page={i + 2}""" , headers=__lowerCamelCase ).json()
artifacts.update({artifact["""name"""]: artifact["""archive_download_url"""] for artifact in result["""artifacts"""]} )
return artifacts
except Exception:
print(f"""Unknown error, could not fetch links:\n{traceback.format_exc()}""" )
return {}
def lowerCamelCase__ ( __lowerCamelCase : Optional[Any] , __lowerCamelCase : Dict , __lowerCamelCase : Any , __lowerCamelCase : Optional[int] ):
__UpperCAmelCase : Union[str, Any] = None
if token is not None:
__UpperCAmelCase : int = {"""Accept""": """application/vnd.github+json""", """Authorization""": f"""Bearer {token}"""}
__UpperCAmelCase : Tuple = requests.get(__lowerCamelCase , headers=__lowerCamelCase , allow_redirects=__lowerCamelCase )
__UpperCAmelCase : Optional[Any] = result.headers["""Location"""]
__UpperCAmelCase : List[Any] = requests.get(__lowerCamelCase , allow_redirects=__lowerCamelCase )
__UpperCAmelCase : Dict = os.path.join(__lowerCamelCase , f"""{artifact_name}.zip""" )
with open(__lowerCamelCase , """wb""" ) as fp:
fp.write(response.content )
def lowerCamelCase__ ( __lowerCamelCase : Tuple , __lowerCamelCase : Optional[int]=None ):
__UpperCAmelCase : str = []
__UpperCAmelCase : Optional[Any] = []
__UpperCAmelCase : Tuple = None
with zipfile.ZipFile(__lowerCamelCase ) as z:
for filename in z.namelist():
if not os.path.isdir(__lowerCamelCase ):
# read the file
if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]:
with z.open(__lowerCamelCase ) as f:
for line in f:
__UpperCAmelCase : Dict = line.decode("""UTF-8""" ).strip()
if filename == "failures_line.txt":
try:
# `error_line` is the place where `error` occurs
__UpperCAmelCase : Union[str, Any] = line[: line.index(""": """ )]
__UpperCAmelCase : str = line[line.index(""": """ ) + len(""": """ ) :]
errors.append([error_line, error] )
except Exception:
# skip un-related lines
pass
elif filename == "summary_short.txt" and line.startswith("""FAILED """ ):
# `test` is the test method that failed
__UpperCAmelCase : Optional[int] = line[len("""FAILED """ ) :]
failed_tests.append(__lowerCamelCase )
elif filename == "job_name.txt":
__UpperCAmelCase : Optional[int] = line
if len(__lowerCamelCase ) != len(__lowerCamelCase ):
raise ValueError(
f"""`errors` and `failed_tests` should have the same number of elements. Got {len(__lowerCamelCase )} for `errors` """
f"""and {len(__lowerCamelCase )} for `failed_tests` instead. The test reports in {artifact_zip_path} have some"""
""" problem.""" )
__UpperCAmelCase : Dict = None
if job_name and job_links:
__UpperCAmelCase : List[str] = job_links.get(__lowerCamelCase , __lowerCamelCase )
# A list with elements of the form (line of error, error, failed test)
__UpperCAmelCase : Union[str, Any] = [x + [y] + [job_link] for x, y in zip(__lowerCamelCase , __lowerCamelCase )]
return result
def lowerCamelCase__ ( __lowerCamelCase : List[Any] , __lowerCamelCase : Any=None ):
__UpperCAmelCase : Optional[int] = []
__UpperCAmelCase : List[Any] = [os.path.join(__lowerCamelCase , __lowerCamelCase ) for p in os.listdir(__lowerCamelCase ) if p.endswith(""".zip""" )]
for p in paths:
errors.extend(get_errors_from_single_artifact(__lowerCamelCase , job_links=__lowerCamelCase ) )
return errors
def lowerCamelCase__ ( __lowerCamelCase : Any , __lowerCamelCase : int=None ):
__UpperCAmelCase : str = Counter()
counter.update([x[1] for x in logs] )
__UpperCAmelCase : Union[str, Any] = counter.most_common()
__UpperCAmelCase : Optional[int] = {}
for error, count in counts:
if error_filter is None or error not in error_filter:
__UpperCAmelCase : str = {"""count""": count, """failed_tests""": [(x[2], x[0]) for x in logs if x[1] == error]}
__UpperCAmelCase : Dict = dict(sorted(r.items() , key=lambda __lowerCamelCase : item[1]["count"] , reverse=__lowerCamelCase ) )
return r
def lowerCamelCase__ ( __lowerCamelCase : Tuple ):
__UpperCAmelCase : List[str] = test.split("""::""" )[0]
if test.startswith("""tests/models/""" ):
__UpperCAmelCase : List[Any] = test.split("""/""" )[2]
else:
__UpperCAmelCase : Any = None
return test
def lowerCamelCase__ ( __lowerCamelCase : List[Any] , __lowerCamelCase : List[str]=None ):
__UpperCAmelCase : Union[str, Any] = [(x[0], x[1], get_model(x[2] )) for x in logs]
__UpperCAmelCase : Tuple = [x for x in logs if x[2] is not None]
__UpperCAmelCase : Union[str, Any] = {x[2] for x in logs}
__UpperCAmelCase : Optional[int] = {}
for test in tests:
__UpperCAmelCase : Tuple = Counter()
# count by errors in `test`
counter.update([x[1] for x in logs if x[2] == test] )
__UpperCAmelCase : List[str] = counter.most_common()
__UpperCAmelCase : Any = {error: count for error, count in counts if (error_filter is None or error not in error_filter)}
__UpperCAmelCase : Dict = sum(error_counts.values() )
if n_errors > 0:
__UpperCAmelCase : str = {"""count""": n_errors, """errors""": error_counts}
__UpperCAmelCase : Any = dict(sorted(r.items() , key=lambda __lowerCamelCase : item[1]["count"] , reverse=__lowerCamelCase ) )
return r
def lowerCamelCase__ ( __lowerCamelCase : Union[str, Any] ):
__UpperCAmelCase : Union[str, Any] = """| no. | error | status |"""
__UpperCAmelCase : str = """|-:|:-|:-|"""
__UpperCAmelCase : str = [header, sep]
for error in reduced_by_error:
__UpperCAmelCase : Tuple = reduced_by_error[error]["""count"""]
__UpperCAmelCase : List[Any] = f"""| {count} | {error[:100]} | |"""
lines.append(__lowerCamelCase )
return "\n".join(__lowerCamelCase )
def lowerCamelCase__ ( __lowerCamelCase : List[Any] ):
__UpperCAmelCase : int = """| model | no. of errors | major error | count |"""
__UpperCAmelCase : int = """|-:|-:|-:|-:|"""
__UpperCAmelCase : int = [header, sep]
for model in reduced_by_model:
__UpperCAmelCase : Optional[int] = reduced_by_model[model]["""count"""]
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = list(reduced_by_model[model]["""errors"""].items() )[0]
__UpperCAmelCase : Dict = f"""| {model} | {count} | {error[:60]} | {_count} |"""
lines.append(__lowerCamelCase )
return "\n".join(__lowerCamelCase )
if __name__ == "__main__":
a : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--workflow_run_id", type=str, required=True, help="A GitHub Actions workflow run id.")
parser.add_argument(
"--output_dir",
type=str,
required=True,
help="Where to store the downloaded artifacts and other result files.",
)
parser.add_argument("--token", default=None, type=str, help="A token that has actions:read permission.")
a : Union[str, Any] = parser.parse_args()
os.makedirs(args.output_dir, exist_ok=True)
a : List[str] = get_job_links(args.workflow_run_id, token=args.token)
a : Optional[Any] = {}
# To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee.
# For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`.
if _job_links:
for k, v in _job_links.items():
# This is how GitHub actions combine job names.
if " / " in k:
a : Dict = k.find(" / ")
a : Optional[Any] = k[index + len(" / ") :]
a : Tuple = v
with open(os.path.join(args.output_dir, "job_links.json"), "w", encoding="UTF-8") as fp:
json.dump(job_links, fp, ensure_ascii=False, indent=4)
a : Any = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, "artifacts.json"), "w", encoding="UTF-8") as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
for idx, (name, url) in enumerate(artifacts.items()):
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
a : Optional[Any] = get_all_errors(args.output_dir, job_links=job_links)
# `e[1]` is the error
a : Optional[Any] = Counter()
counter.update([e[1] for e in errors])
# print the top 30 most common test errors
a : List[str] = counter.most_common(30)
for item in most_common:
print(item)
with open(os.path.join(args.output_dir, "errors.json"), "w", encoding="UTF-8") as fp:
json.dump(errors, fp, ensure_ascii=False, indent=4)
a : List[str] = reduce_by_error(errors)
a : str = reduce_by_model(errors)
a : Optional[Any] = make_github_table(reduced_by_error)
a : Union[str, Any] = make_github_table_per_model(reduced_by_model)
with open(os.path.join(args.output_dir, "reduced_by_error.txt"), "w", encoding="UTF-8") as fp:
fp.write(sa)
with open(os.path.join(args.output_dir, "reduced_by_model.txt"), "w", encoding="UTF-8") as fp:
fp.write(sa)
| 63
|
'''simple docstring'''
import os
import sys
import warnings
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen
from ..table import array_cast
from ..utils.file_utils import is_local_path
from ..utils.py_utils import first_non_null_value, no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
import PIL.Image
from .features import FeatureType
__lowerCAmelCase = None
__lowerCAmelCase = '<' if sys.byteorder == 'little' else '>'
# Origin: https://github.com/python-pillow/Pillow/blob/698951e19e19972aeed56df686868f1329981c12/src/PIL/Image.py#L3126 minus "|i1" which values are not preserved correctly when saving and loading an image
__lowerCAmelCase = [
np.dtype('|b1'),
np.dtype('|u1'),
np.dtype('<u2'),
np.dtype('>u2'),
np.dtype('<i2'),
np.dtype('>i2'),
np.dtype('<u4'),
np.dtype('>u4'),
np.dtype('<i4'),
np.dtype('>i4'),
np.dtype('<f4'),
np.dtype('>f4'),
np.dtype('<f8'),
np.dtype('>f8'),
]
@dataclass
class _lowerCAmelCase :
'''simple docstring'''
lowerCAmelCase_ = True
lowerCAmelCase_ = None
# Automatically constructed
lowerCAmelCase_ = "PIL.Image.Image"
lowerCAmelCase_ = pa.struct({"bytes": pa.binary(), "path": pa.string()} )
lowerCAmelCase_ = field(default="Image" , init=__snake_case , repr=__snake_case )
def __call__(self ) -> Optional[int]:
return self.pa_type
def lowercase (self , UpperCAmelCase ) -> dict:
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("""To support encoding images, please install 'Pillow'.""" )
if isinstance(UpperCAmelCase , UpperCAmelCase ):
_snake_case = np.array(UpperCAmelCase )
if isinstance(UpperCAmelCase , UpperCAmelCase ):
return {"path": value, "bytes": None}
elif isinstance(UpperCAmelCase , UpperCAmelCase ):
return {"path": None, "bytes": value}
elif isinstance(UpperCAmelCase , np.ndarray ):
# convert the image array to PNG/TIFF bytes
return encode_np_array(UpperCAmelCase )
elif isinstance(UpperCAmelCase , PIL.Image.Image ):
# convert the PIL image to bytes (default format is PNG/TIFF)
return encode_pil_image(UpperCAmelCase )
elif value.get("""path""" ) is not None and os.path.isfile(value["""path"""] ):
# we set "bytes": None to not duplicate the data if they're already available locally
return {"bytes": None, "path": value.get("""path""" )}
elif value.get("""bytes""" ) is not None or value.get("""path""" ) is not None:
# store the image bytes, and path is used to infer the image format using the file extension
return {"bytes": value.get("""bytes""" ), "path": value.get("""path""" )}
else:
raise ValueError(
f"""An image sample should have one of 'path' or 'bytes' but they are missing or None in {value}.""" )
def lowercase (self , UpperCAmelCase , UpperCAmelCase=None ) -> "PIL.Image.Image":
if not self.decode:
raise RuntimeError("""Decoding is disabled for this feature. Please use Image(decode=True) instead.""" )
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("""To support decoding images, please install 'Pillow'.""" )
if token_per_repo_id is None:
_snake_case = {}
_snake_case, _snake_case = value["""path"""], value["""bytes"""]
if bytes_ is None:
if path is None:
raise ValueError(f"""An image should have one of 'path' or 'bytes' but both are None in {value}.""" )
else:
if is_local_path(UpperCAmelCase ):
_snake_case = PIL.Image.open(UpperCAmelCase )
else:
_snake_case = path.split("""::""" )[-1]
try:
_snake_case = string_to_dict(UpperCAmelCase , config.HUB_DATASETS_URL )["""repo_id"""]
_snake_case = token_per_repo_id.get(UpperCAmelCase )
except ValueError:
_snake_case = None
with xopen(UpperCAmelCase , """rb""" , use_auth_token=UpperCAmelCase ) as f:
_snake_case = BytesIO(f.read() )
_snake_case = PIL.Image.open(bytes_ )
else:
_snake_case = PIL.Image.open(BytesIO(bytes_ ) )
image.load() # to avoid "Too many open files" errors
return image
def lowercase (self ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
from .features import Value
return (
self
if self.decode
else {
"bytes": Value("""binary""" ),
"path": Value("""string""" ),
}
)
def lowercase (self , UpperCAmelCase ) -> pa.StructArray:
if pa.types.is_string(storage.type ):
_snake_case = pa.array([None] * len(UpperCAmelCase ) , type=pa.binary() )
_snake_case = pa.StructArray.from_arrays([bytes_array, storage] , ["""bytes""", """path"""] , mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
_snake_case = pa.array([None] * len(UpperCAmelCase ) , type=pa.string() )
_snake_case = pa.StructArray.from_arrays([storage, path_array] , ["""bytes""", """path"""] , mask=storage.is_null() )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index("""bytes""" ) >= 0:
_snake_case = storage.field("""bytes""" )
else:
_snake_case = pa.array([None] * len(UpperCAmelCase ) , type=pa.binary() )
if storage.type.get_field_index("""path""" ) >= 0:
_snake_case = storage.field("""path""" )
else:
_snake_case = pa.array([None] * len(UpperCAmelCase ) , type=pa.string() )
_snake_case = pa.StructArray.from_arrays([bytes_array, path_array] , ["""bytes""", """path"""] , mask=storage.is_null() )
elif pa.types.is_list(storage.type ):
_snake_case = pa.array(
[encode_np_array(np.array(UpperCAmelCase ) )["""bytes"""] if arr is not None else None for arr in storage.to_pylist()] , type=pa.binary() , )
_snake_case = pa.array([None] * len(UpperCAmelCase ) , type=pa.string() )
_snake_case = pa.StructArray.from_arrays(
[bytes_array, path_array] , ["""bytes""", """path"""] , mask=bytes_array.is_null() )
return array_cast(UpperCAmelCase , self.pa_type )
def lowercase (self , UpperCAmelCase ) -> pa.StructArray:
@no_op_if_value_is_null
def path_to_bytes(UpperCAmelCase ):
with xopen(UpperCAmelCase , """rb""" ) as f:
_snake_case = f.read()
return bytes_
_snake_case = pa.array(
[
(path_to_bytes(x["""path"""] ) if x["""bytes"""] is None else x["""bytes"""]) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
_snake_case = pa.array(
[os.path.basename(UpperCAmelCase ) if path is not None else None for path in storage.field("""path""" ).to_pylist()] , type=pa.string() , )
_snake_case = pa.StructArray.from_arrays([bytes_array, path_array] , ["""bytes""", """path"""] , mask=bytes_array.is_null() )
return array_cast(UpperCAmelCase , self.pa_type )
def __SCREAMING_SNAKE_CASE ( ):
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("""To support encoding images, please install 'Pillow'.""" )
global _IMAGE_COMPRESSION_FORMATS
if _IMAGE_COMPRESSION_FORMATS is None:
PIL.Image.init()
_snake_case = list(set(PIL.Image.OPEN.keys() ) & set(PIL.Image.SAVE.keys() ) )
return _IMAGE_COMPRESSION_FORMATS
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ):
_snake_case = BytesIO()
if image.format in list_image_compression_formats():
_snake_case = image.format
else:
_snake_case = """PNG""" if image.mode in ["""1""", """L""", """LA""", """RGB""", """RGBA"""] else """TIFF"""
image.save(_SCREAMING_SNAKE_CASE , format=_SCREAMING_SNAKE_CASE )
return buffer.getvalue()
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ):
if hasattr(_SCREAMING_SNAKE_CASE , """filename""" ) and image.filename != "":
return {"path": image.filename, "bytes": None}
else:
return {"path": None, "bytes": image_to_bytes(_SCREAMING_SNAKE_CASE )}
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ):
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("""To support encoding images, please install 'Pillow'.""" )
_snake_case = array.dtype
_snake_case = dtype.byteorder if dtype.byteorder != """=""" else _NATIVE_BYTEORDER
_snake_case = dtype.kind
_snake_case = dtype.itemsize
_snake_case = None
# Multi-channel array case (only np.dtype("|u1") is allowed)
if array.shape[2:]:
_snake_case = np.dtype("""|u1""" )
if dtype_kind not in ["u", "i"]:
raise TypeError(
f"""Unsupported array dtype {dtype} for image encoding. Only {dest_dtype} is supported for multi-channel arrays.""" )
if dtype is not dest_dtype:
warnings.warn(f"""Downcasting array dtype {dtype} to {dest_dtype} to be compatible with 'Pillow'""" )
# Exact match
elif dtype in _VALID_IMAGE_ARRAY_DTPYES:
_snake_case = dtype
else: # Downcast the type within the kind (np.can_cast(from_type, to_type, casting="same_kind") doesn't behave as expected, so do it manually)
while dtype_itemsize >= 1:
_snake_case = dtype_byteorder + dtype_kind + str(_SCREAMING_SNAKE_CASE )
_snake_case = np.dtype(_SCREAMING_SNAKE_CASE )
if dest_dtype in _VALID_IMAGE_ARRAY_DTPYES:
warnings.warn(f"""Downcasting array dtype {dtype} to {dest_dtype} to be compatible with 'Pillow'""" )
break
else:
dtype_itemsize //= 2
if dest_dtype is None:
raise TypeError(
f"""Cannot convert dtype {dtype} to a valid image dtype. Valid image dtypes: {_VALID_IMAGE_ARRAY_DTPYES}""" )
_snake_case = PIL.Image.fromarray(array.astype(_SCREAMING_SNAKE_CASE ) )
return {"path": None, "bytes": image_to_bytes(_SCREAMING_SNAKE_CASE )}
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ):
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("""To support encoding images, please install 'Pillow'.""" )
if objs:
_snake_case, _snake_case = first_non_null_value(_SCREAMING_SNAKE_CASE )
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
return [{"path": obj, "bytes": None} if obj is not None else None for obj in objs]
if isinstance(_SCREAMING_SNAKE_CASE , np.ndarray ):
_snake_case = no_op_if_value_is_null(_SCREAMING_SNAKE_CASE )
return [obj_to_image_dict_func(_SCREAMING_SNAKE_CASE ) for obj in objs]
elif isinstance(_SCREAMING_SNAKE_CASE , PIL.Image.Image ):
_snake_case = no_op_if_value_is_null(_SCREAMING_SNAKE_CASE )
return [obj_to_image_dict_func(_SCREAMING_SNAKE_CASE ) for obj in objs]
else:
return objs
else:
return objs
| 585
| 0
|
"""simple docstring"""
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
A_ = logging.get_logger(__name__)
A_ = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt'''}
A_ = {
'''vocab_file''': {
'''allenai/longformer-base-4096''': '''https://huggingface.co/allenai/longformer-base-4096/resolve/main/vocab.json''',
'''allenai/longformer-large-4096''': (
'''https://huggingface.co/allenai/longformer-large-4096/resolve/main/vocab.json'''
),
'''allenai/longformer-large-4096-finetuned-triviaqa''': (
'''https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/vocab.json'''
),
'''allenai/longformer-base-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/vocab.json'''
),
'''allenai/longformer-large-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/vocab.json'''
),
},
'''merges_file''': {
'''allenai/longformer-base-4096''': '''https://huggingface.co/allenai/longformer-base-4096/resolve/main/merges.txt''',
'''allenai/longformer-large-4096''': (
'''https://huggingface.co/allenai/longformer-large-4096/resolve/main/merges.txt'''
),
'''allenai/longformer-large-4096-finetuned-triviaqa''': (
'''https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/merges.txt'''
),
'''allenai/longformer-base-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/merges.txt'''
),
'''allenai/longformer-large-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/merges.txt'''
),
},
}
A_ = {
'''allenai/longformer-base-4096''': 4096,
'''allenai/longformer-large-4096''': 4096,
'''allenai/longformer-large-4096-finetuned-triviaqa''': 4096,
'''allenai/longformer-base-4096-extra.pos.embd.only''': 4096,
'''allenai/longformer-large-4096-extra.pos.embd.only''': 4096,
}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def _lowerCAmelCase ( ) ->List[str]:
A__ : Tuple = (
list(range(ord("""!""" ), ord("""~""" ) + 1 ) ) + list(range(ord("""¡""" ), ord("""¬""" ) + 1 ) ) + list(range(ord("""®""" ), ord("""ÿ""" ) + 1 ) )
)
A__ : int = bs[:]
A__ : Optional[Any] = 0
for b in range(2**8 ):
if b not in bs:
bs.append(UpperCAmelCase__ )
cs.append(2**8 + n )
n += 1
A__ : Optional[Any] = [chr(UpperCAmelCase__ ) for n in cs]
return dict(zip(UpperCAmelCase__, UpperCAmelCase__ ) )
def _lowerCAmelCase ( UpperCAmelCase__ : int ) ->Tuple:
A__ : Tuple = set()
A__ : Tuple = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
A__ : str = char
return pairs
class __SCREAMING_SNAKE_CASE ( UpperCamelCase ):
snake_case_ = VOCAB_FILES_NAMES
snake_case_ = PRETRAINED_VOCAB_FILES_MAP
snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case_ = ['input_ids', 'attention_mask']
def __init__( self : List[str] , snake_case : List[str] , snake_case : str , snake_case : Dict="replace" , snake_case : Optional[int]="<s>" , snake_case : Any="</s>" , snake_case : List[Any]="</s>" , snake_case : str="<s>" , snake_case : Tuple="<unk>" , snake_case : List[str]="<pad>" , snake_case : Optional[Any]="<mask>" , snake_case : Tuple=False , **snake_case : Dict , ):
'''simple docstring'''
A__ : Optional[Any] = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else bos_token
A__ : Optional[Any] = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else eos_token
A__ : int = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else sep_token
A__ : str = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else cls_token
A__ : Optional[Any] = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else unk_token
A__ : Optional[int] = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
A__ : Union[str, Any] = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else mask_token
super().__init__(
errors=snake_case , bos_token=snake_case , eos_token=snake_case , unk_token=snake_case , sep_token=snake_case , cls_token=snake_case , pad_token=snake_case , mask_token=snake_case , add_prefix_space=snake_case , **snake_case , )
with open(snake_case , encoding="""utf-8""" ) as vocab_handle:
A__ : Tuple = json.load(snake_case )
A__ : Dict = {v: k for k, v in self.encoder.items()}
A__ : Dict = errors # how to handle errors in decoding
A__ : Dict = bytes_to_unicode()
A__ : Optional[int] = {v: k for k, v in self.byte_encoder.items()}
with open(snake_case , encoding="""utf-8""" ) as merges_handle:
A__ : List[str] = merges_handle.read().split("""\n""" )[1:-1]
A__ : Dict = [tuple(merge.split() ) for merge in bpe_merges]
A__ : Union[str, Any] = dict(zip(snake_case , range(len(snake_case ) ) ) )
A__ : Tuple = {}
A__ : Optional[int] = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
A__ : Union[str, Any] = re.compile(r"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""" )
@property
def _UpperCamelCase ( self : List[Any] ):
'''simple docstring'''
return len(self.encoder )
def _UpperCamelCase ( self : List[Any] ):
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def _UpperCamelCase ( self : str , snake_case : Tuple ):
'''simple docstring'''
if token in self.cache:
return self.cache[token]
A__ : str = tuple(snake_case )
A__ : Any = get_pairs(snake_case )
if not pairs:
return token
while True:
A__ : int = min(snake_case , key=lambda snake_case : self.bpe_ranks.get(snake_case , float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
A__ , A__ : int = bigram
A__ : List[str] = []
A__ : Dict = 0
while i < len(snake_case ):
try:
A__ : int = word.index(snake_case , snake_case )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
A__ : str = j
if word[i] == first and i < len(snake_case ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
A__ : Any = tuple(snake_case )
A__ : Optional[Any] = new_word
if len(snake_case ) == 1:
break
else:
A__ : int = get_pairs(snake_case )
A__ : Dict = """ """.join(snake_case )
A__ : int = word
return word
def _UpperCamelCase ( self : int , snake_case : List[str] ):
'''simple docstring'''
A__ : int = []
for token in re.findall(self.pat , snake_case ):
A__ : Union[str, Any] = """""".join(
self.byte_encoder[b] for b in token.encode("""utf-8""" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(snake_case ).split(""" """ ) )
return bpe_tokens
def _UpperCamelCase ( self : Optional[Any] , snake_case : Dict ):
'''simple docstring'''
return self.encoder.get(snake_case , self.encoder.get(self.unk_token ) )
def _UpperCamelCase ( self : Optional[int] , snake_case : int ):
'''simple docstring'''
return self.decoder.get(snake_case )
def _UpperCamelCase ( self : Union[str, Any] , snake_case : Union[str, Any] ):
'''simple docstring'''
A__ : Union[str, Any] = """""".join(snake_case )
A__ : Optional[Any] = bytearray([self.byte_decoder[c] for c in text] ).decode("""utf-8""" , errors=self.errors )
return text
def _UpperCamelCase ( self : List[str] , snake_case : str , snake_case : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(snake_case ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
A__ : Any = os.path.join(
snake_case , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
A__ : int = os.path.join(
snake_case , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(snake_case , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=snake_case , ensure_ascii=snake_case ) + """\n""" )
A__ : Optional[int] = 0
with open(snake_case , """w""" , encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda snake_case : kv[1] ):
if index != token_index:
logger.warning(
F'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'
""" Please check that the tokenizer is not corrupted!""" )
A__ : Dict = token_index
writer.write(""" """.join(snake_case ) + """\n""" )
index += 1
return vocab_file, merge_file
def _UpperCamelCase ( self : List[Any] , snake_case : List[int] , snake_case : Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
A__ : Optional[int] = [self.cls_token_id]
A__ : int = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _UpperCamelCase ( self : int , snake_case : List[int] , snake_case : Optional[List[int]] = None , snake_case : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case , token_ids_a=snake_case , already_has_special_tokens=snake_case )
if token_ids_a is None:
return [1] + ([0] * len(snake_case )) + [1]
return [1] + ([0] * len(snake_case )) + [1, 1] + ([0] * len(snake_case )) + [1]
def _UpperCamelCase ( self : Union[str, Any] , snake_case : List[int] , snake_case : Optional[List[int]] = None ):
'''simple docstring'''
A__ : int = [self.sep_token_id]
A__ : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _UpperCamelCase ( self : str , snake_case : Optional[Any] , snake_case : List[str]=False , **snake_case : Optional[Any] ):
'''simple docstring'''
A__ : Optional[Any] = kwargs.pop("""add_prefix_space""" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(snake_case ) > 0 and not text[0].isspace()):
A__ : str = """ """ + text
return (text, kwargs)
| 498
|
"""simple docstring"""
from __future__ import annotations
class __SCREAMING_SNAKE_CASE :
def __init__( self : Any , snake_case : str , snake_case : str ):
'''simple docstring'''
A__ , A__ : List[str] = text, pattern
A__ , A__ : List[str] = len(snake_case ), len(snake_case )
def _UpperCamelCase ( self : str , snake_case : str ):
'''simple docstring'''
for i in range(self.patLen - 1 , -1 , -1 ):
if char == self.pattern[i]:
return i
return -1
def _UpperCamelCase ( self : Any , snake_case : int ):
'''simple docstring'''
for i in range(self.patLen - 1 , -1 , -1 ):
if self.pattern[i] != self.text[current_pos + i]:
return current_pos + i
return -1
def _UpperCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
A__ : str = []
for i in range(self.textLen - self.patLen + 1 ):
A__ : List[Any] = self.mismatch_in_text(snake_case )
if mismatch_index == -1:
positions.append(snake_case )
else:
A__ : Dict = self.match_in_pattern(self.text[mismatch_index] )
A__ : Union[str, Any] = (
mismatch_index - match_index
) # shifting index lgtm [py/multiple-definition]
return positions
A_ = '''ABAABA'''
A_ = '''AB'''
A_ = BoyerMooreSearch(text, pattern)
A_ = bms.bad_character_heuristic()
if len(positions) == 0:
print('''No match found''')
else:
print('''Pattern found in following positions: ''')
print(positions)
| 498
| 1
|
from typing import List, Optional, Union
import numpy as np
import torch
import torchaudio.compliance.kaldi as ta_kaldi
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
UpperCamelCase : Tuple = logging.get_logger(__name__)
class A__ ( A__ ):
"""simple docstring"""
_lowercase = ['input_features', 'attention_mask']
def __init__( self : str , lowerCamelCase__ : List[str]=80 , lowerCamelCase__ : Any=16_000 , lowerCamelCase__ : int=80 , lowerCamelCase__ : Optional[Any]=0.0 , lowerCamelCase__ : Union[str, Any]=True , lowerCamelCase__ : Dict=True , lowerCamelCase__ : Tuple=True , **lowerCamelCase__ : Optional[int] , ):
super().__init__(feature_size=lowerCamelCase__ , sampling_rate=lowerCamelCase__ , padding_value=lowerCamelCase__ , **lowerCamelCase__ )
a__ : Dict = num_mel_bins
a__ : Optional[int] = do_ceptral_normalize
a__ : List[str] = normalize_means
a__ : Any = normalize_vars
a__ : Optional[Any] = True
def _UpperCamelCase( self : Optional[Any] , lowerCamelCase__ : np.ndarray , ):
a__ : Tuple = waveform * (2**15) # Kaldi compliance: 16-bit signed integers
a__ : Union[str, Any] = torch.from_numpy(lowerCamelCase__ ).unsqueeze(0 )
a__ : str = ta_kaldi.fbank(lowerCamelCase__ , num_mel_bins=self.num_mel_bins , sample_frequency=self.sampling_rate )
return features.numpy()
@staticmethod
def _UpperCamelCase( lowerCamelCase__ : np.ndarray , lowerCamelCase__ : int , lowerCamelCase__ : Optional[bool] = True , lowerCamelCase__ : Optional[bool] = True , lowerCamelCase__ : float = 0.0 , ):
# make sure we normalize float32 arrays
if normalize_means:
a__ : List[str] = x[:input_length].mean(axis=0 )
a__ : Dict = np.subtract(lowerCamelCase__ , lowerCamelCase__ )
if normalize_vars:
a__ : Any = x[:input_length].std(axis=0 )
a__ : List[str] = np.divide(lowerCamelCase__ , lowerCamelCase__ )
if input_length < x.shape[0]:
a__ : str = padding_value
# make sure array is in float32
a__ : List[Any] = x.astype(np.floataa )
return x
def _UpperCamelCase( self : Tuple , lowerCamelCase__ : List[np.ndarray] , lowerCamelCase__ : Optional[np.ndarray] = None ):
a__ : List[str] = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [
self.utterance_cmvn(lowerCamelCase__ , lowerCamelCase__ , self.normalize_means , self.normalize_vars , self.padding_value )
for x, n in zip(lowerCamelCase__ , lowerCamelCase__ )
]
def __call__( self : str , lowerCamelCase__ : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , lowerCamelCase__ : Union[bool, str, PaddingStrategy] = False , lowerCamelCase__ : Optional[int] = None , lowerCamelCase__ : bool = False , lowerCamelCase__ : Optional[int] = None , lowerCamelCase__ : Optional[Union[str, TensorType]] = None , lowerCamelCase__ : Optional[int] = None , lowerCamelCase__ : Optional[bool] = None , **lowerCamelCase__ : List[Any] , ):
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of'''
f''' {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with'''
f''' {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
"It is strongly recommended to pass the `sampling_rate` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
a__ : List[str] = isinstance(lowerCamelCase__ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'''Only mono-channel audio is supported for input to {self}''' )
a__ : Dict = is_batched_numpy or (
isinstance(lowerCamelCase__ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
a__ : str = [np.asarray(lowerCamelCase__ , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(lowerCamelCase__ , np.ndarray ):
a__ : Tuple = np.asarray(lowerCamelCase__ , dtype=np.floataa )
elif isinstance(lowerCamelCase__ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
a__ : str = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
a__ : List[str] = [raw_speech]
# extract fbank features
a__ : Any = [self._extract_fbank_features(lowerCamelCase__ ) for waveform in raw_speech]
# convert into correct format for padding
a__ : Any = BatchFeature({"input_features": features} )
a__ : Any = self.pad(
lowerCamelCase__ , padding=lowerCamelCase__ , max_length=lowerCamelCase__ , truncation=lowerCamelCase__ , pad_to_multiple_of=lowerCamelCase__ , return_attention_mask=lowerCamelCase__ , **lowerCamelCase__ , )
# make sure list is in array format
a__ : List[str] = padded_inputs.get("input_features" )
if isinstance(input_features[0] , lowerCamelCase__ ):
a__ : Optional[int] = [np.asarray(lowerCamelCase__ , dtype=np.floataa ) for feature in input_features]
a__ : Dict = padded_inputs.get("attention_mask" )
if attention_mask is not None:
a__ : Optional[int] = [np.asarray(lowerCamelCase__ , dtype=np.intaa ) for array in attention_mask]
# Utterance-level cepstral mean and variance normalization
if self.do_ceptral_normalize:
a__ : Optional[Any] = (
np.array(lowerCamelCase__ , dtype=np.intaa )
if self._get_padding_strategies(lowerCamelCase__ , max_length=lowerCamelCase__ ) is not PaddingStrategy.DO_NOT_PAD
else None
)
a__ : Optional[Any] = self.normalize(
padded_inputs["input_features"] , attention_mask=lowerCamelCase__ )
if return_tensors is not None:
a__ : Optional[Any] = padded_inputs.convert_to_tensors(lowerCamelCase__ )
return padded_inputs
| 37
|
import io
import json
import unittest
from parameterized import parameterized
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device
from utils import calculate_bleu
__lowerCAmelCase : Any = get_tests_dir() + "/test_data/fsmt/fsmt_val_data.json"
with io.open(filename, "r", encoding="utf-8") as f:
__lowerCAmelCase : List[Any] = json.load(f)
@require_torch
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def snake_case_ ( self : List[str] , _snake_case : List[Any] ):
return FSMTTokenizer.from_pretrained(_snake_case )
def snake_case_ ( self : Any , _snake_case : List[str] ):
__lowercase : str = FSMTForConditionalGeneration.from_pretrained(_snake_case ).to(_snake_case )
if torch_device == "cuda":
model.half()
return model
@parameterized.expand(
[
['''en-ru''', 26.0],
['''ru-en''', 22.0],
['''en-de''', 22.0],
['''de-en''', 29.0],
] )
@slow
def snake_case_ ( self : Tuple , _snake_case : int , _snake_case : Union[str, Any] ):
# note: this test is not testing the best performance since it only evals a small batch
# but it should be enough to detect a regression in the output quality
__lowercase : Tuple = F'facebook/wmt19-{pair}'
__lowercase : Tuple = self.get_tokenizer(_snake_case )
__lowercase : Dict = self.get_model(_snake_case )
__lowercase : Dict = bleu_data[pair]['''src''']
__lowercase : Any = bleu_data[pair]['''tgt''']
__lowercase : Any = tokenizer(_snake_case , return_tensors='''pt''' , truncation=_snake_case , padding='''longest''' ).to(_snake_case )
__lowercase : Optional[int] = model.generate(
input_ids=batch.input_ids , num_beams=8 , )
__lowercase : Any = tokenizer.batch_decode(
_snake_case , skip_special_tokens=_snake_case , clean_up_tokenization_spaces=_snake_case )
__lowercase : Tuple = calculate_bleu(_snake_case , _snake_case )
print(_snake_case )
self.assertGreaterEqual(scores['''bleu'''] , _snake_case )
| 509
| 0
|
import warnings
from .generation import TFGenerationMixin
class SCREAMING_SNAKE_CASE ( snake_case ):
"""simple docstring"""
warnings.warn(
"Importing `TFGenerationMixin` from `src/transformers/generation_tf_utils.py` is deprecated and will "
"be removed in Transformers v5. Import as `from transformers import TFGenerationMixin` instead." , snake_case , )
| 62
|
def __A ( _lowercase , _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
global f # a global dp table for knapsack
if f[i][j] < 0:
if j < wt[i - 1]:
_A = mf_knapsack(i - 1 , _lowercase , _lowercase , _lowercase )
else:
_A = max(
mf_knapsack(i - 1 , _lowercase , _lowercase , _lowercase ) , mf_knapsack(i - 1 , _lowercase , _lowercase , j - wt[i - 1] ) + val[i - 1] , )
_A = val
return f[i][j]
def __A ( _lowercase , _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
_A = [[0] * (w + 1) for _ in range(n + 1 )]
for i in range(1 , n + 1 ):
for w_ in range(1 , w + 1 ):
if wt[i - 1] <= w_:
_A = max(val[i - 1] + dp[i - 1][w_ - wt[i - 1]] , dp[i - 1][w_] )
else:
_A = dp[i - 1][w_]
return dp[n][w_], dp
def __A ( _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
if not (isinstance(_lowercase , (list, tuple) ) and isinstance(_lowercase , (list, tuple) )):
raise ValueError(
'''Both the weights and values vectors must be either lists or tuples''' )
_A = len(_lowercase )
if num_items != len(_lowercase ):
_A = (
'''The number of weights must be the same as the number of values.\n'''
f"""But got {num_items} weights and {len(_lowercase )} values"""
)
raise ValueError(_lowercase )
for i in range(_lowercase ):
if not isinstance(wt[i] , _lowercase ):
_A = (
'''All weights must be integers but got weight of '''
f"""type {type(wt[i] )} at index {i}"""
)
raise TypeError(_lowercase )
_A ,_A = knapsack(_lowercase , _lowercase , _lowercase , _lowercase )
_A = set()
_construct_solution(_lowercase , _lowercase , _lowercase , _lowercase , _lowercase )
return optimal_val, example_optional_set
def __A ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
if i > 0 and j > 0:
if dp[i - 1][j] == dp[i][j]:
_construct_solution(_lowercase , _lowercase , i - 1 , _lowercase , _lowercase )
else:
optimal_set.add(_lowercase )
_construct_solution(_lowercase , _lowercase , i - 1 , j - wt[i - 1] , _lowercase )
if __name__ == "__main__":
__A = [3, 2, 4, 4]
__A = [4, 3, 2, 3]
__A = 4
__A = 6
__A = [[0] * (w + 1)] + [[0] + [-1] * (w + 1) for _ in range(n + 1)]
__A , __A = knapsack(w, wt, val, n)
print(optimal_solution)
print(mf_knapsack(n, wt, val, w)) # switched the n and w
# testing the dynamic programming problem with example
# the optimal subset for the above example are items 3 and 4
__A , __A = knapsack_with_example_solution(w, wt, val)
assert optimal_solution == 8
assert optimal_subset == {3, 4}
print('optimal_value = ', optimal_solution)
print('An optimal subset corresponding to the optimal value', optimal_subset)
| 62
| 1
|
'''simple docstring'''
from __future__ import annotations
from fractions import Fraction
from math import gcd, sqrt
def UpperCAmelCase ( A : str ):
SCREAMING_SNAKE_CASE : Any = int(number**0.5 )
return number == sq * sq
def UpperCAmelCase ( A : Tuple , A : Tuple , A : Any , A : Union[str, Any] , A : int , A : Optional[int] ):
SCREAMING_SNAKE_CASE : str = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den
SCREAMING_SNAKE_CASE : Union[str, Any] = x_den * y_den * z_den
SCREAMING_SNAKE_CASE : Tuple = gcd(_A , _A )
top //= hcf
bottom //= hcf
return top, bottom
def UpperCAmelCase ( A : List[str] = 35 ):
SCREAMING_SNAKE_CASE : Optional[int] = set()
SCREAMING_SNAKE_CASE : List[str] = 42
SCREAMING_SNAKE_CASE : int = Fraction(0 )
SCREAMING_SNAKE_CASE : List[str] = 42
for x_num in range(1 , order + 1 ):
for x_den in range(x_num + 1 , order + 1 ):
for y_num in range(1 , order + 1 ):
for y_den in range(y_num + 1 , order + 1 ):
# n=1
SCREAMING_SNAKE_CASE : Dict = x_num * y_den + x_den * y_num
SCREAMING_SNAKE_CASE : List[Any] = x_den * y_den
SCREAMING_SNAKE_CASE : str = gcd(_A , _A )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
SCREAMING_SNAKE_CASE : Optional[Any] = add_three(
_A , _A , _A , _A , _A , _A )
unique_s.add(_A )
# n=2
SCREAMING_SNAKE_CASE : Any = (
x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num
)
SCREAMING_SNAKE_CASE : Tuple = x_den * x_den * y_den * y_den
if is_sq(_A ) and is_sq(_A ):
SCREAMING_SNAKE_CASE : Union[str, Any] = int(sqrt(_A ) )
SCREAMING_SNAKE_CASE : Optional[int] = int(sqrt(_A ) )
SCREAMING_SNAKE_CASE : List[str] = gcd(_A , _A )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
SCREAMING_SNAKE_CASE : Dict = add_three(
_A , _A , _A , _A , _A , _A )
unique_s.add(_A )
# n=-1
SCREAMING_SNAKE_CASE : str = x_num * y_num
SCREAMING_SNAKE_CASE : List[str] = x_den * y_num + x_num * y_den
SCREAMING_SNAKE_CASE : List[str] = gcd(_A , _A )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
SCREAMING_SNAKE_CASE : str = add_three(
_A , _A , _A , _A , _A , _A )
unique_s.add(_A )
# n=2
SCREAMING_SNAKE_CASE : List[Any] = x_num * x_num * y_num * y_num
SCREAMING_SNAKE_CASE : str = (
x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den
)
if is_sq(_A ) and is_sq(_A ):
SCREAMING_SNAKE_CASE : Optional[Any] = int(sqrt(_A ) )
SCREAMING_SNAKE_CASE : int = int(sqrt(_A ) )
SCREAMING_SNAKE_CASE : Optional[int] = gcd(_A , _A )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
SCREAMING_SNAKE_CASE : List[Any] = add_three(
_A , _A , _A , _A , _A , _A )
unique_s.add(_A )
for num, den in unique_s:
total += Fraction(_A , _A )
return total.denominator + total.numerator
if __name__ == "__main__":
print(f'{solution() = }')
| 527
|
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ConditionalDetrImageProcessor
class __SCREAMING_SNAKE_CASE( unittest.TestCase ):
def __init__( self: int , UpperCamelCase: List[Any] , UpperCamelCase: Optional[int]=7 , UpperCamelCase: List[Any]=3 , UpperCamelCase: List[Any]=30 , UpperCamelCase: List[Any]=4_00 , UpperCamelCase: Union[str, Any]=True , UpperCamelCase: Any=None , UpperCamelCase: Tuple=True , UpperCamelCase: List[str]=[0.5, 0.5, 0.5] , UpperCamelCase: Dict=[0.5, 0.5, 0.5] , UpperCamelCase: Tuple=True , UpperCamelCase: List[str]=1 / 2_55 , UpperCamelCase: Union[str, Any]=True , ) -> List[str]:
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
snake_case__ = size if size is not None else {'shortest_edge': 18, 'longest_edge': 13_33}
snake_case__ = parent
snake_case__ = batch_size
snake_case__ = num_channels
snake_case__ = min_resolution
snake_case__ = max_resolution
snake_case__ = do_resize
snake_case__ = size
snake_case__ = do_normalize
snake_case__ = image_mean
snake_case__ = image_std
snake_case__ = do_rescale
snake_case__ = rescale_factor
snake_case__ = do_pad
def lowerCAmelCase_ ( self: Dict ) -> str:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def lowerCAmelCase_ ( self: Any , UpperCamelCase: List[Any] , UpperCamelCase: Optional[int]=False ) -> int:
if not batched:
snake_case__ = image_inputs[0]
if isinstance(UpperCamelCase , Image.Image ):
snake_case__ , snake_case__ = image.size
else:
snake_case__ , snake_case__ = image.shape[1], image.shape[2]
if w < h:
snake_case__ = int(self.size['shortest_edge'] * h / w )
snake_case__ = self.size['shortest_edge']
elif w > h:
snake_case__ = self.size['shortest_edge']
snake_case__ = int(self.size['shortest_edge'] * w / h )
else:
snake_case__ = self.size['shortest_edge']
snake_case__ = self.size['shortest_edge']
else:
snake_case__ = []
for image in image_inputs:
snake_case__ , snake_case__ = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
snake_case__ = max(UpperCamelCase , key=lambda UpperCamelCase : item[0] )[0]
snake_case__ = max(UpperCamelCase , key=lambda UpperCamelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class __SCREAMING_SNAKE_CASE( a_ , unittest.TestCase ):
_UpperCAmelCase = ConditionalDetrImageProcessor if is_vision_available() else None
def lowerCAmelCase_ ( self: Dict ) -> Union[str, Any]:
snake_case__ = ConditionalDetrImageProcessingTester(self )
@property
def lowerCAmelCase_ ( self: Optional[Any] ) -> Optional[int]:
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase_ ( self: Dict ) -> Union[str, Any]:
snake_case__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCamelCase , 'image_mean' ) )
self.assertTrue(hasattr(UpperCamelCase , 'image_std' ) )
self.assertTrue(hasattr(UpperCamelCase , 'do_normalize' ) )
self.assertTrue(hasattr(UpperCamelCase , 'do_resize' ) )
self.assertTrue(hasattr(UpperCamelCase , 'size' ) )
def lowerCAmelCase_ ( self: int ) -> Optional[int]:
snake_case__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 18, 'longest_edge': 13_33} )
self.assertEqual(image_processor.do_pad , UpperCamelCase )
snake_case__ = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=UpperCamelCase )
self.assertEqual(image_processor.size , {'shortest_edge': 42, 'longest_edge': 84} )
self.assertEqual(image_processor.do_pad , UpperCamelCase )
def lowerCAmelCase_ ( self: List[str] ) -> List[Any]:
pass
def lowerCAmelCase_ ( self: Optional[int] ) -> Tuple:
# Initialize image_processing
snake_case__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase , Image.Image )
# Test not batched input
snake_case__ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
snake_case__ , snake_case__ = self.image_processor_tester.get_expected_values(UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case__ , snake_case__ = self.image_processor_tester.get_expected_values(UpperCamelCase , batched=UpperCamelCase )
snake_case__ = image_processing(UpperCamelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCAmelCase_ ( self: Tuple ) -> List[str]:
# Initialize image_processing
snake_case__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase , numpify=UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase , np.ndarray )
# Test not batched input
snake_case__ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
snake_case__ , snake_case__ = self.image_processor_tester.get_expected_values(UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case__ = image_processing(UpperCamelCase , return_tensors='pt' ).pixel_values
snake_case__ , snake_case__ = self.image_processor_tester.get_expected_values(UpperCamelCase , batched=UpperCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCAmelCase_ ( self: List[str] ) -> Union[str, Any]:
# Initialize image_processing
snake_case__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase , torchify=UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase , torch.Tensor )
# Test not batched input
snake_case__ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
snake_case__ , snake_case__ = self.image_processor_tester.get_expected_values(UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case__ = image_processing(UpperCamelCase , return_tensors='pt' ).pixel_values
snake_case__ , snake_case__ = self.image_processor_tester.get_expected_values(UpperCamelCase , batched=UpperCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def lowerCAmelCase_ ( self: str ) -> Any:
# prepare image and target
snake_case__ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt' , 'r' ) as f:
snake_case__ = json.loads(f.read() )
snake_case__ = {'image_id': 3_97_69, 'annotations': target}
# encode them
snake_case__ = ConditionalDetrImageProcessor.from_pretrained('microsoft/conditional-detr-resnet-50' )
snake_case__ = image_processing(images=UpperCamelCase , annotations=UpperCamelCase , return_tensors='pt' )
# verify pixel values
snake_case__ = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding['pixel_values'].shape , UpperCamelCase )
snake_case__ = torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , UpperCamelCase , atol=1e-4 ) )
# verify area
snake_case__ = torch.tensor([5_887.9_600, 11_250.2_061, 489_353.8_438, 837_122.7_500, 147_967.5_156, 165_732.3_438] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , UpperCamelCase ) )
# verify boxes
snake_case__ = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , UpperCamelCase )
snake_case__ = torch.tensor([0.5_503, 0.2_765, 0.0_604, 0.2_215] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , UpperCamelCase , atol=1e-3 ) )
# verify image_id
snake_case__ = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , UpperCamelCase ) )
# verify is_crowd
snake_case__ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , UpperCamelCase ) )
# verify class_labels
snake_case__ = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , UpperCamelCase ) )
# verify orig_size
snake_case__ = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , UpperCamelCase ) )
# verify size
snake_case__ = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , UpperCamelCase ) )
@slow
def lowerCAmelCase_ ( self: List[Any] ) -> Dict:
# prepare image, target and masks_path
snake_case__ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt' , 'r' ) as f:
snake_case__ = json.loads(f.read() )
snake_case__ = {'file_name': '000000039769.png', 'image_id': 3_97_69, 'segments_info': target}
snake_case__ = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic' )
# encode them
snake_case__ = ConditionalDetrImageProcessor(format='coco_panoptic' )
snake_case__ = image_processing(images=UpperCamelCase , annotations=UpperCamelCase , masks_path=UpperCamelCase , return_tensors='pt' )
# verify pixel values
snake_case__ = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding['pixel_values'].shape , UpperCamelCase )
snake_case__ = torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , UpperCamelCase , atol=1e-4 ) )
# verify area
snake_case__ = torch.tensor([147_979.6_875, 165_527.0_469, 484_638.5_938, 11_292.9_375, 5_879.6_562, 7_634.1_147] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , UpperCamelCase ) )
# verify boxes
snake_case__ = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , UpperCamelCase )
snake_case__ = torch.tensor([0.2_625, 0.5_437, 0.4_688, 0.8_625] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , UpperCamelCase , atol=1e-3 ) )
# verify image_id
snake_case__ = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , UpperCamelCase ) )
# verify is_crowd
snake_case__ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , UpperCamelCase ) )
# verify class_labels
snake_case__ = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , UpperCamelCase ) )
# verify masks
snake_case__ = 82_28_73
self.assertEqual(encoding['labels'][0]['masks'].sum().item() , UpperCamelCase )
# verify orig_size
snake_case__ = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , UpperCamelCase ) )
# verify size
snake_case__ = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , UpperCamelCase ) )
| 328
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
snake_case__ : Tuple = {
"configuration_layoutlmv3": [
"LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP",
"LayoutLMv3Config",
"LayoutLMv3OnnxConfig",
],
"processing_layoutlmv3": ["LayoutLMv3Processor"],
"tokenization_layoutlmv3": ["LayoutLMv3Tokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ : List[Any] = ["LayoutLMv3TokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ : List[str] = [
"LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST",
"LayoutLMv3ForQuestionAnswering",
"LayoutLMv3ForSequenceClassification",
"LayoutLMv3ForTokenClassification",
"LayoutLMv3Model",
"LayoutLMv3PreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ : str = [
"TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFLayoutLMv3ForQuestionAnswering",
"TFLayoutLMv3ForSequenceClassification",
"TFLayoutLMv3ForTokenClassification",
"TFLayoutLMv3Model",
"TFLayoutLMv3PreTrainedModel",
]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ : Dict = ["LayoutLMv3FeatureExtractor"]
snake_case__ : Dict = ["LayoutLMv3ImageProcessor"]
if TYPE_CHECKING:
from .configuration_layoutlmva import (
LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP,
LayoutLMvaConfig,
LayoutLMvaOnnxConfig,
)
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_layoutlmva import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
TFLayoutLMvaPreTrainedModel,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
else:
import sys
snake_case__ : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 705
|
import datetime
import platform
import subprocess
from typing import Optional, Tuple, Union
import numpy as np
def lowercase ( __A : bytes , __A : int ) -> np.array:
'''simple docstring'''
snake_case : List[str] = f"""{sampling_rate}"""
snake_case : Union[str, Any] = """1"""
snake_case : List[str] = """f32le"""
snake_case : Optional[Any] = [
"""ffmpeg""",
"""-i""",
"""pipe:0""",
"""-ac""",
ac,
"""-ar""",
ar,
"""-f""",
format_for_conversion,
"""-hide_banner""",
"""-loglevel""",
"""quiet""",
"""pipe:1""",
]
try:
with subprocess.Popen(__A , stdin=subprocess.PIPE , stdout=subprocess.PIPE ) as ffmpeg_process:
snake_case : str = ffmpeg_process.communicate(__A )
except FileNotFoundError as error:
raise ValueError("""ffmpeg was not found but is required to load audio files from filename""" ) from error
snake_case : int = output_stream[0]
snake_case : Tuple = np.frombuffer(__A , np.floataa )
if audio.shape[0] == 0:
raise ValueError("""Malformed soundfile""" )
return audio
def lowercase ( __A : int , __A : float , __A : str = "f32le" , ) -> Optional[Any]:
'''simple docstring'''
snake_case : Dict = f"""{sampling_rate}"""
snake_case : int = """1"""
if format_for_conversion == "s16le":
snake_case : Dict = 2
elif format_for_conversion == "f32le":
snake_case : Optional[Any] = 4
else:
raise ValueError(f"""Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`""" )
snake_case : Dict = platform.system()
if system == "Linux":
snake_case : List[str] = """alsa"""
snake_case : Union[str, Any] = """default"""
elif system == "Darwin":
snake_case : Optional[int] = """avfoundation"""
snake_case : str = """:0"""
elif system == "Windows":
snake_case : List[str] = """dshow"""
snake_case : Union[str, Any] = """default"""
snake_case : Union[str, Any] = [
"""ffmpeg""",
"""-f""",
format_,
"""-i""",
input_,
"""-ac""",
ac,
"""-ar""",
ar,
"""-f""",
format_for_conversion,
"""-fflags""",
"""nobuffer""",
"""-hide_banner""",
"""-loglevel""",
"""quiet""",
"""pipe:1""",
]
snake_case : List[str] = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
snake_case : Optional[Any] = _ffmpeg_stream(__A , __A )
for item in iterator:
yield item
def lowercase ( __A : int , __A : float , __A : Optional[int] = None , __A : Optional[Union[Tuple[float, float], float]] = None , __A : str = "f32le" , ) -> Optional[Any]:
'''simple docstring'''
if stream_chunk_s is not None:
snake_case : List[str] = stream_chunk_s
else:
snake_case : Tuple = chunk_length_s
snake_case : Optional[Any] = ffmpeg_microphone(__A , __A , format_for_conversion=__A )
if format_for_conversion == "s16le":
snake_case : List[Any] = np.intaa
snake_case : Dict = 2
elif format_for_conversion == "f32le":
snake_case : List[Any] = np.floataa
snake_case : Optional[Any] = 4
else:
raise ValueError(f"""Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`""" )
if stride_length_s is None:
snake_case : Tuple = chunk_length_s / 6
snake_case : str = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
if isinstance(__A , (int, float) ):
snake_case : int = [stride_length_s, stride_length_s]
snake_case : Any = int(round(sampling_rate * stride_length_s[0] ) ) * size_of_sample
snake_case : int = int(round(sampling_rate * stride_length_s[1] ) ) * size_of_sample
snake_case : str = datetime.datetime.now()
snake_case : Tuple = datetime.timedelta(seconds=__A )
for item in chunk_bytes_iter(__A , __A , stride=(stride_left, stride_right) , stream=__A ):
# Put everything back in numpy scale
snake_case : List[str] = np.frombuffer(item["""raw"""] , dtype=__A )
snake_case : List[Any] = (
item["""stride"""][0] // size_of_sample,
item["""stride"""][1] // size_of_sample,
)
snake_case : Tuple = sampling_rate
audio_time += delta
if datetime.datetime.now() > audio_time + 10 * delta:
# We're late !! SKIP
continue
yield item
def lowercase ( __A : Optional[Any] , __A : int , __A : Tuple[int, int] , __A : bool = False ) -> List[Any]:
'''simple docstring'''
snake_case : Optional[Any] = b""""""
snake_case , snake_case : str = stride
if stride_left + stride_right >= chunk_len:
raise ValueError(
f"""Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}""" )
snake_case : List[Any] = 0
for raw in iterator:
acc += raw
if stream and len(__A ) < chunk_len:
snake_case : Dict = (_stride_left, 0)
yield {"raw": acc[:chunk_len], "stride": stride, "partial": True}
else:
while len(__A ) >= chunk_len:
# We are flushing the accumulator
snake_case : str = (_stride_left, stride_right)
snake_case : str = {"""raw""": acc[:chunk_len], """stride""": stride}
if stream:
snake_case : Optional[Any] = False
yield item
snake_case : int = stride_left
snake_case : Union[str, Any] = acc[chunk_len - stride_left - stride_right :]
# Last chunk
if len(__A ) > stride_left:
snake_case : Dict = {"""raw""": acc, """stride""": (_stride_left, 0)}
if stream:
snake_case : Tuple = False
yield item
def lowercase ( __A : Optional[int] , __A : int ) -> List[str]:
'''simple docstring'''
snake_case : List[str] = 2**24 # 16Mo
try:
with subprocess.Popen(__A , stdout=subprocess.PIPE , bufsize=__A ) as ffmpeg_process:
while True:
snake_case : Union[str, Any] = ffmpeg_process.stdout.read(__A )
if raw == b"":
break
yield raw
except FileNotFoundError as error:
raise ValueError("""ffmpeg was not found but is required to stream audio files from filename""" ) from error
| 315
| 0
|
"""simple docstring"""
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XLMRobertaTokenizer, XLMRobertaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
snake_case_ : Any = get_tests_dir("""fixtures/test_sentencepiece.model""")
@require_sentencepiece
@require_tokenizers
class snake_case__ ( lowerCAmelCase_ , unittest.TestCase ):
SCREAMING_SNAKE_CASE__ = XLMRobertaTokenizer
SCREAMING_SNAKE_CASE__ = XLMRobertaTokenizerFast
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = True
def __lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
UpperCAmelCase : Optional[Any] = XLMRobertaTokenizer(lowercase , keep_accents=lowercase )
tokenizer.save_pretrained(self.tmpdirname )
def __lowerCAmelCase ( self : Any ):
'''simple docstring'''
UpperCAmelCase : Any = "<pad>"
UpperCAmelCase : Any = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowercase ) , lowercase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowercase ) , lowercase )
def __lowerCAmelCase ( self : Any ):
'''simple docstring'''
UpperCAmelCase : Optional[Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<s>" )
self.assertEqual(vocab_keys[1] , "<pad>" )
self.assertEqual(vocab_keys[-1] , "<mask>" )
self.assertEqual(len(lowercase ) , 10_02 )
def __lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 10_02 )
def __lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase : List[str] = XLMRobertaTokenizer(lowercase , keep_accents=lowercase )
UpperCAmelCase : Optional[Any] = tokenizer.tokenize("This is a test" )
self.assertListEqual(lowercase , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowercase ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , )
UpperCAmelCase : Optional[int] = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
lowercase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
UpperCAmelCase : List[Any] = tokenizer.convert_tokens_to_ids(lowercase )
self.assertListEqual(
lowercase , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
UpperCAmelCase : List[str] = tokenizer.convert_ids_to_tokens(lowercase )
self.assertListEqual(
lowercase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
def __lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
UpperCAmelCase : Union[str, Any] = (self.rust_tokenizer_class, "hf-internal-testing/tiny-xlm-roberta", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
UpperCAmelCase : str = self.rust_tokenizer_class.from_pretrained(lowercase , **lowercase )
UpperCAmelCase : List[Any] = self.tokenizer_class.from_pretrained(lowercase , **lowercase )
UpperCAmelCase : Tuple = tempfile.mkdtemp()
UpperCAmelCase : int = tokenizer_r.save_pretrained(lowercase )
UpperCAmelCase : Any = tokenizer_p.save_pretrained(lowercase )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files ) )
UpperCAmelCase : Optional[int] = tuple(f for f in tokenizer_r_files if "tokenizer.json" not in f )
self.assertSequenceEqual(lowercase , lowercase )
# Checks everything loads correctly in the same way
UpperCAmelCase : int = tokenizer_r.from_pretrained(lowercase )
UpperCAmelCase : str = tokenizer_p.from_pretrained(lowercase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowercase , lowercase ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(lowercase )
# Save tokenizer rust, legacy_format=True
UpperCAmelCase : Dict = tempfile.mkdtemp()
UpperCAmelCase : Tuple = tokenizer_r.save_pretrained(lowercase , legacy_format=lowercase )
UpperCAmelCase : Any = tokenizer_p.save_pretrained(lowercase )
# Checks it save with the same files
self.assertSequenceEqual(lowercase , lowercase )
# Checks everything loads correctly in the same way
UpperCAmelCase : str = tokenizer_r.from_pretrained(lowercase )
UpperCAmelCase : int = tokenizer_p.from_pretrained(lowercase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowercase , lowercase ) )
shutil.rmtree(lowercase )
# Save tokenizer rust, legacy_format=False
UpperCAmelCase : Tuple = tempfile.mkdtemp()
UpperCAmelCase : Tuple = tokenizer_r.save_pretrained(lowercase , legacy_format=lowercase )
UpperCAmelCase : Dict = tokenizer_p.save_pretrained(lowercase )
# Checks it saved the tokenizer.json file
self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
UpperCAmelCase : Union[str, Any] = tokenizer_r.from_pretrained(lowercase )
UpperCAmelCase : str = tokenizer_p.from_pretrained(lowercase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowercase , lowercase ) )
shutil.rmtree(lowercase )
@cached_property
def __lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
return XLMRobertaTokenizer.from_pretrained("xlm-roberta-base" )
def __lowerCAmelCase ( self : int ):
'''simple docstring'''
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(lowercase , f.name )
UpperCAmelCase : List[str] = XLMRobertaTokenizer(f.name , keep_accents=lowercase )
UpperCAmelCase : List[str] = pickle.dumps(lowercase )
pickle.loads(lowercase )
def __lowerCAmelCase ( self : str ):
'''simple docstring'''
if not self.test_rust_tokenizer:
return
UpperCAmelCase : Any = self.get_tokenizer()
UpperCAmelCase : Tuple = self.get_rust_tokenizer()
UpperCAmelCase : Union[str, Any] = "I was born in 92000, and this is falsé."
UpperCAmelCase : Tuple = tokenizer.tokenize(lowercase )
UpperCAmelCase : List[str] = rust_tokenizer.tokenize(lowercase )
self.assertListEqual(lowercase , lowercase )
UpperCAmelCase : Any = tokenizer.encode(lowercase , add_special_tokens=lowercase )
UpperCAmelCase : Union[str, Any] = rust_tokenizer.encode(lowercase , add_special_tokens=lowercase )
self.assertListEqual(lowercase , lowercase )
UpperCAmelCase : List[Any] = self.get_rust_tokenizer()
UpperCAmelCase : Any = tokenizer.encode(lowercase )
UpperCAmelCase : int = rust_tokenizer.encode(lowercase )
self.assertListEqual(lowercase , lowercase )
@slow
def __lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = "Hello World!"
UpperCAmelCase : List[str] = [0, 3_53_78, 66_61, 38, 2]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(lowercase , self.big_tokenizer.encode(lowercase ) )
@slow
def __lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase : Tuple = (
"This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"
" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"
)
UpperCAmelCase : List[str] = [
0,
32_93,
83,
10,
45_52,
49_89,
79_86,
6_78,
10,
59_15,
1_11,
17_94_59,
12_48_50,
4,
60_44,
2_37,
12,
6,
5,
6,
4,
67_80,
7_05,
15,
13_88,
44,
3_78,
1_01_14,
7_11,
1_52,
20,
6,
5,
2_23_76,
6_42,
12_21,
1_51_90,
3_41_53,
4_50,
56_08,
9_59,
11_19,
5_77_02,
1_36,
1_86,
47,
10_98,
2_93_67,
47,
# 4426, # What fairseq tokenizes from "<unk>": "_<"
# 3678, # What fairseq tokenizes from "<unk>": "unk"
# 2740, # What fairseq tokenizes from "<unk>": ">"
3, # What we tokenize from "<unk>": "<unk>"
6, # Residue from the tokenization: an extra sentencepiece underline
4,
60_44,
2_37,
62_84,
5_09_01,
5_28,
31,
90,
34,
9_27,
2,
]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(lowercase , self.big_tokenizer.encode(lowercase ) )
@slow
def __lowerCAmelCase ( self : Dict ):
'''simple docstring'''
UpperCAmelCase : Dict = {"input_ids": [[0, 1_10_62, 8_27_72, 7, 15, 8_27_72, 5_38, 5_15_29, 2_37, 1_71_98, 12_90, 2_06, 9, 21_51_75, 13_14, 1_36, 1_71_98, 12_90, 2_06, 9, 5_63_59, 42, 12_20_09, 9, 1_64_66, 16, 8_73_44, 45_37, 9, 47_17, 7_83_81, 6, 15_99_58, 7, 15, 2_44_80, 6_18, 4, 5_27, 2_26_93, 54_28, 4, 27_77, 2_44_80, 98_74, 4, 4_35_23, 5_94, 4, 8_03, 1_83_92, 3_31_89, 18, 4, 4_35_23, 2_44_47, 1_23_99, 1_00, 2_49_55, 8_36_58, 96_26, 14_40_57, 15, 8_39, 2_23_35, 16, 1_36, 2_49_55, 8_36_58, 8_34_79, 15, 3_91_02, 7_24, 16, 6_78, 6_45, 27_89, 13_28, 45_89, 42, 12_20_09, 11_57_74, 23, 8_05, 13_28, 4_68_76, 7, 1_36, 5_38_94, 19_40, 4_22_27, 4_11_59, 1_77_21, 8_23, 4_25, 4, 2_75_12, 9_87_22, 2_06, 1_36, 55_31, 49_70, 9_19, 1_73_36, 5, 2], [0, 2_00_80, 6_18, 83, 8_27_75, 47, 4_79, 9, 15_17, 73, 5_38_94, 3_33, 8_05_81, 11_01_17, 1_88_11, 52_56, 12_95, 51, 15_25_26, 2_97, 79_86, 3_90, 12_44_16, 5_38, 3_54_31, 2_14, 98, 1_50_44, 2_57_37, 1_36, 71_08, 4_37_01, 23, 7_56, 13_53_55, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 5_81, 6_37_73, 11_94_55, 6, 14_77_97, 8_82_03, 7, 6_45, 70, 21, 32_85, 1_02_69, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowercase , model_name="xlm-roberta-base" , revision="d9d8a8ea5eb94b1c6654ae9249df7793cd2933d3" , )
| 595
|
"""simple docstring"""
import hashlib
import unittest
from typing import Dict
import numpy as np
from transformers import (
MODEL_FOR_MASK_GENERATION_MAPPING,
TF_MODEL_FOR_MASK_GENERATION_MAPPING,
is_vision_available,
pipeline,
)
from transformers.pipelines import MaskGenerationPipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
if is_vision_available():
from PIL import Image
else:
class snake_case__ :
@staticmethod
def __lowerCAmelCase ( *lowercase : Any , **lowercase : str ):
'''simple docstring'''
pass
def lowercase_ ( _lowercase : Image ):
'''simple docstring'''
UpperCAmelCase : str = hashlib.mda(image.tobytes() )
return m.hexdigest()[:10]
def lowercase_ ( _lowercase : Image ):
'''simple docstring'''
UpperCAmelCase : int = np.array(_lowercase )
UpperCAmelCase : Union[str, Any] = npimg.shape
return {"hash": hashimage(_lowercase ), "shape": shape}
@is_pipeline_test
@require_vision
@require_torch
class snake_case__ ( unittest.TestCase ):
SCREAMING_SNAKE_CASE__ = dict(
(list(MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if MODEL_FOR_MASK_GENERATION_MAPPING else []) )
SCREAMING_SNAKE_CASE__ = dict(
(list(TF_MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if TF_MODEL_FOR_MASK_GENERATION_MAPPING else []) )
def __lowerCAmelCase ( self : List[Any] , lowercase : List[str] , lowercase : Dict , lowercase : str ):
'''simple docstring'''
UpperCAmelCase : Optional[Any] = MaskGenerationPipeline(model=lowercase , image_processor=lowercase )
return image_segmenter, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def __lowerCAmelCase ( self : Optional[Any] , lowercase : Dict , lowercase : Dict ):
'''simple docstring'''
pass
@require_tf
@unittest.skip("Image segmentation not implemented in TF" )
def __lowerCAmelCase ( self : Dict ):
'''simple docstring'''
pass
@slow
@require_torch
def __lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase : Any = pipeline("mask-generation" , model="facebook/sam-vit-huge" )
UpperCAmelCase : str = image_segmenter("http://images.cocodataset.org/val2017/000000039769.jpg" , points_per_batch=2_56 )
# Shortening by hashing
UpperCAmelCase : Optional[Any] = []
for i, o in enumerate(outputs["masks"] ):
new_outupt += [{"mask": mask_to_test_readable(lowercase ), "scores": outputs["scores"][i]}]
# fmt: off
self.assertEqual(
nested_simplify(lowercase , decimals=4 ) , [
{"mask": {"hash": "115ad19f5f", "shape": (4_80, 6_40)}, "scores": 1.0_4_4_4},
{"mask": {"hash": "6affa964c6", "shape": (4_80, 6_40)}, "scores": 1.0_2_1},
{"mask": {"hash": "dfe28a0388", "shape": (4_80, 6_40)}, "scores": 1.0_1_6_7},
{"mask": {"hash": "c0a5f4a318", "shape": (4_80, 6_40)}, "scores": 1.0_1_3_2},
{"mask": {"hash": "fe8065c197", "shape": (4_80, 6_40)}, "scores": 1.0_0_5_3},
{"mask": {"hash": "e2d0b7a0b7", "shape": (4_80, 6_40)}, "scores": 0.9_9_6_7},
{"mask": {"hash": "453c7844bd", "shape": (4_80, 6_40)}, "scores": 0.9_9_3},
{"mask": {"hash": "3d44f2926d", "shape": (4_80, 6_40)}, "scores": 0.9_9_0_9},
{"mask": {"hash": "64033ddc3f", "shape": (4_80, 6_40)}, "scores": 0.9_8_7_9},
{"mask": {"hash": "801064ff79", "shape": (4_80, 6_40)}, "scores": 0.9_8_3_4},
{"mask": {"hash": "6172f276ef", "shape": (4_80, 6_40)}, "scores": 0.9_7_1_6},
{"mask": {"hash": "b49e60e084", "shape": (4_80, 6_40)}, "scores": 0.9_6_1_2},
{"mask": {"hash": "a811e775fd", "shape": (4_80, 6_40)}, "scores": 0.9_5_9_9},
{"mask": {"hash": "a6a8ebcf4b", "shape": (4_80, 6_40)}, "scores": 0.9_5_5_2},
{"mask": {"hash": "9d8257e080", "shape": (4_80, 6_40)}, "scores": 0.9_5_3_2},
{"mask": {"hash": "32de6454a8", "shape": (4_80, 6_40)}, "scores": 0.9_5_1_6},
{"mask": {"hash": "af3d4af2c8", "shape": (4_80, 6_40)}, "scores": 0.9_4_9_9},
{"mask": {"hash": "3c6db475fb", "shape": (4_80, 6_40)}, "scores": 0.9_4_8_3},
{"mask": {"hash": "c290813fb9", "shape": (4_80, 6_40)}, "scores": 0.9_4_6_4},
{"mask": {"hash": "b6f0b8f606", "shape": (4_80, 6_40)}, "scores": 0.9_4_3},
{"mask": {"hash": "92ce16bfdf", "shape": (4_80, 6_40)}, "scores": 0.9_4_3},
{"mask": {"hash": "c749b25868", "shape": (4_80, 6_40)}, "scores": 0.9_4_0_8},
{"mask": {"hash": "efb6cab859", "shape": (4_80, 6_40)}, "scores": 0.9_3_3_5},
{"mask": {"hash": "1ff2eafb30", "shape": (4_80, 6_40)}, "scores": 0.9_3_2_6},
{"mask": {"hash": "788b798e24", "shape": (4_80, 6_40)}, "scores": 0.9_2_6_2},
{"mask": {"hash": "abea804f0e", "shape": (4_80, 6_40)}, "scores": 0.8_9_9_9},
{"mask": {"hash": "7b9e8ddb73", "shape": (4_80, 6_40)}, "scores": 0.8_9_8_6},
{"mask": {"hash": "cd24047c8a", "shape": (4_80, 6_40)}, "scores": 0.8_9_8_4},
{"mask": {"hash": "6943e6bcbd", "shape": (4_80, 6_40)}, "scores": 0.8_8_7_3},
{"mask": {"hash": "b5f47c9191", "shape": (4_80, 6_40)}, "scores": 0.8_8_7_1}
] , )
# fmt: on
@require_torch
@slow
def __lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase : List[Any] = "facebook/sam-vit-huge"
UpperCAmelCase : Optional[int] = pipeline("mask-generation" , model=lowercase )
UpperCAmelCase : Dict = image_segmenter(
"http://images.cocodataset.org/val2017/000000039769.jpg" , pred_iou_thresh=1 , points_per_batch=2_56 )
# Shortening by hashing
UpperCAmelCase : str = []
for i, o in enumerate(outputs["masks"] ):
new_outupt += [{"mask": mask_to_test_readable(lowercase ), "scores": outputs["scores"][i]}]
self.assertEqual(
nested_simplify(lowercase , decimals=4 ) , [
{"mask": {"hash": "115ad19f5f", "shape": (4_80, 6_40)}, "scores": 1.0_4_4_4},
{"mask": {"hash": "6affa964c6", "shape": (4_80, 6_40)}, "scores": 1.0_2_1_0},
{"mask": {"hash": "dfe28a0388", "shape": (4_80, 6_40)}, "scores": 1.0_1_6_7},
{"mask": {"hash": "c0a5f4a318", "shape": (4_80, 6_40)}, "scores": 1.0_1_3_2},
{"mask": {"hash": "fe8065c197", "shape": (4_80, 6_40)}, "scores": 1.0_0_5_3},
] , )
| 595
| 1
|
'''simple docstring'''
import copy
import os
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
from datasets.arrow_writer import ArrowWriter, OptimizedTypedSequence, ParquetWriter, TypedSequence
from datasets.features import ArrayaD, ClassLabel, Features, Image, Value
from datasets.features.features import ArrayaDExtensionType, cast_to_python_objects
from datasets.keyhash import DuplicatedKeysError, InvalidKeyError
from .utils import require_pil
class lowercase ( __lowerCamelCase ):
'''simple docstring'''
def a__ ( self : str ) -> List[Any]:
'''simple docstring'''
lowerCamelCase__ = pa.array(TypedSequence([1, 2, 3] ) )
self.assertEqual(arr.type , pa.intaa() )
def a__ ( self : Dict ) -> Dict:
'''simple docstring'''
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
lowerCamelCase__ = pa.array(TypedSequence([1, 2, 3] ) , type=pa.intaa() )
def a__ ( self : int ) -> Optional[Any]:
'''simple docstring'''
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
lowerCamelCase__ = pa.array(TypedSequence([1, 2, 3] , try_type=Value("bool" ) , type=Value("int64" ) ) )
def a__ ( self : List[Any] ) -> Any:
'''simple docstring'''
lowerCamelCase__ = pa.array(TypedSequence([1, 2, 3] , type=Value("int32" ) ) )
self.assertEqual(arr.type , pa.intaa() )
def a__ ( self : Union[str, Any] ) -> Dict:
'''simple docstring'''
with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ):
lowerCamelCase__ = pa.array(TypedSequence(["foo", "bar"] , type=Value("int64" ) ) )
def a__ ( self : Any ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase__ = pa.array(TypedSequence([1, 2, 3] , try_type=Value("int32" ) ) )
self.assertEqual(arr.type , pa.intaa() )
def a__ ( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase__ = pa.array(TypedSequence(["foo", "bar"] , try_type=Value("int64" ) ) )
self.assertEqual(arr.type , pa.string() )
def a__ ( self : Optional[Any] ) -> Any:
'''simple docstring'''
lowerCamelCase__ = pa.array(TypedSequence([[[1, 2, 3]]] , type=ArrayaD((1, 3) , "int64" ) ) )
self.assertEqual(arr.type , ArrayaDExtensionType((1, 3) , "int64" ) )
def a__ ( self : Dict ) -> List[str]:
'''simple docstring'''
with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ):
lowerCamelCase__ = pa.array(TypedSequence(["foo", "bar"] , type=ArrayaD((1, 3) , "int64" ) ) )
def a__ ( self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase__ = pa.array(TypedSequence([[[1, 2, 3]]] , try_type=ArrayaD((1, 3) , "int64" ) ) )
self.assertEqual(arr.type , ArrayaDExtensionType((1, 3) , "int64" ) )
def a__ ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
lowerCamelCase__ = pa.array(TypedSequence(["foo", "bar"] , try_type=ArrayaD((1, 3) , "int64" ) ) )
self.assertEqual(arr.type , pa.string() )
@require_pil
def a__ ( self : Any ) -> Optional[int]:
'''simple docstring'''
import PIL.Image
lowerCamelCase__ = PIL.Image.fromarray(np.arange(10 , dtype=np.uinta ).reshape(2 , 5 ) )
with patch(
"datasets.arrow_writer.cast_to_python_objects" , side_effect=SCREAMING_SNAKE_CASE_ ) as mock_cast_to_python_objects:
lowerCamelCase__ = pa.array(TypedSequence([{"path": None, "bytes": B"image_bytes"}, pil_image] , type=Image() ) )
lowerCamelCase__ , lowerCamelCase__ = mock_cast_to_python_objects.call_args_list[-1]
self.assertIn("optimize_list_casting" , SCREAMING_SNAKE_CASE_ )
self.assertFalse(kwargs["optimize_list_casting"] )
def lowerCamelCase_ ( lowercase__ , lowercase__):
lowerCamelCase__ = pa.BufferReader(__a) if isinstance(__a , pa.Buffer) else pa.memory_map(__a)
lowerCamelCase__ = pa.ipc.open_stream(__a)
lowerCamelCase__ = f.read_all()
assert len(pa_table.to_batches()) == expected_num_chunks
assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]}
del pa_table
@pytest.mark.parametrize("writer_batch_size" , [None, 1, 10])
@pytest.mark.parametrize(
"fields" , [None, {"col_1": pa.string(), "col_2": pa.intaa()}, {"col_1": pa.string(), "col_2": pa.intaa()}])
def lowerCamelCase_ ( lowercase__ , lowercase__):
lowerCamelCase__ = pa.BufferOutputStream()
lowerCamelCase__ = pa.schema(__a) if fields else None
with ArrowWriter(stream=__a , schema=__a , writer_batch_size=__a) as writer:
writer.write({"col_1": "foo", "col_2": 1})
writer.write({"col_1": "bar", "col_2": 2})
lowerCamelCase__ , lowerCamelCase__ = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
lowerCamelCase__ = {"col_1": pa.string(), "col_2": pa.intaa()}
assert writer._schema == pa.schema(__a , metadata=writer._schema.metadata)
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1)
def lowerCamelCase_ ( ):
lowerCamelCase__ = pa.BufferOutputStream()
lowerCamelCase__ = Features({"labels": ClassLabel(names=["neg", "pos"])})
with ArrowWriter(stream=__a , features=__a) as writer:
writer.write({"labels": 0})
writer.write({"labels": 1})
lowerCamelCase__ , lowerCamelCase__ = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert writer._schema == features.arrow_schema
assert writer._schema.metadata == features.arrow_schema.metadata
lowerCamelCase__ = pa.BufferReader(output.getvalue())
lowerCamelCase__ = pa.ipc.open_stream(__a)
lowerCamelCase__ = f.read_all()
lowerCamelCase__ = pa_table.schema
assert pa_table.num_rows == 2
assert schema == features.arrow_schema
assert schema.metadata == features.arrow_schema.metadata
assert features == Features.from_arrow_schema(__a)
@pytest.mark.parametrize("writer_batch_size" , [None, 1, 10])
def lowerCamelCase_ ( lowercase__):
lowerCamelCase__ = pa.BufferOutputStream()
with ArrowWriter(
stream=__a , writer_batch_size=__a , hash_salt="split_name" , check_duplicates=__a , ) as writer:
with pytest.raises(__a):
writer.write({"col_1": "foo", "col_2": 1} , key=[1, 2])
lowerCamelCase__ , lowerCamelCase__ = writer.finalize()
@pytest.mark.parametrize("writer_batch_size" , [None, 2, 10])
def lowerCamelCase_ ( lowercase__):
lowerCamelCase__ = pa.BufferOutputStream()
with ArrowWriter(
stream=__a , writer_batch_size=__a , hash_salt="split_name" , check_duplicates=__a , ) as writer:
with pytest.raises(__a):
writer.write({"col_1": "foo", "col_2": 1} , key=10)
writer.write({"col_1": "bar", "col_2": 2} , key=10)
lowerCamelCase__ , lowerCamelCase__ = writer.finalize()
@pytest.mark.parametrize("writer_batch_size" , [None, 2, 10])
def lowerCamelCase_ ( lowercase__):
lowerCamelCase__ = pa.BufferOutputStream()
with ArrowWriter(
stream=__a , writer_batch_size=__a , hash_salt="split_name" , check_duplicates=__a , ) as writer:
writer.write({"col_1": "foo", "col_2": 1} , key=1)
writer.write({"col_1": "bar", "col_2": 2} , key=2)
lowerCamelCase__ , lowerCamelCase__ = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1)
@pytest.mark.parametrize("writer_batch_size" , [None, 1, 10])
@pytest.mark.parametrize(
"fields" , [None, {"col_1": pa.string(), "col_2": pa.intaa()}, {"col_1": pa.string(), "col_2": pa.intaa()}])
def lowerCamelCase_ ( lowercase__ , lowercase__):
lowerCamelCase__ = pa.BufferOutputStream()
lowerCamelCase__ = pa.schema(__a) if fields else None
with ArrowWriter(stream=__a , schema=__a , writer_batch_size=__a) as writer:
writer.write_batch({"col_1": ["foo", "bar"], "col_2": [1, 2]})
writer.write_batch({"col_1": [], "col_2": []})
lowerCamelCase__ , lowerCamelCase__ = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
lowerCamelCase__ = {"col_1": pa.string(), "col_2": pa.intaa()}
assert writer._schema == pa.schema(__a , metadata=writer._schema.metadata)
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1)
@pytest.mark.parametrize("writer_batch_size" , [None, 1, 10])
@pytest.mark.parametrize(
"fields" , [None, {"col_1": pa.string(), "col_2": pa.intaa()}, {"col_1": pa.string(), "col_2": pa.intaa()}])
def lowerCamelCase_ ( lowercase__ , lowercase__):
lowerCamelCase__ = pa.BufferOutputStream()
lowerCamelCase__ = pa.schema(__a) if fields else None
with ArrowWriter(stream=__a , schema=__a , writer_batch_size=__a) as writer:
writer.write_table(pa.Table.from_pydict({"col_1": ["foo", "bar"], "col_2": [1, 2]}))
lowerCamelCase__ , lowerCamelCase__ = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
lowerCamelCase__ = {"col_1": pa.string(), "col_2": pa.intaa()}
assert writer._schema == pa.schema(__a , metadata=writer._schema.metadata)
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1)
@pytest.mark.parametrize("writer_batch_size" , [None, 1, 10])
@pytest.mark.parametrize(
"fields" , [None, {"col_1": pa.string(), "col_2": pa.intaa()}, {"col_1": pa.string(), "col_2": pa.intaa()}])
def lowerCamelCase_ ( lowercase__ , lowercase__):
lowerCamelCase__ = pa.BufferOutputStream()
lowerCamelCase__ = pa.schema(__a) if fields else None
with ArrowWriter(stream=__a , schema=__a , writer_batch_size=__a) as writer:
writer.write_row(pa.Table.from_pydict({"col_1": ["foo"], "col_2": [1]}))
writer.write_row(pa.Table.from_pydict({"col_1": ["bar"], "col_2": [2]}))
lowerCamelCase__ , lowerCamelCase__ = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
lowerCamelCase__ = {"col_1": pa.string(), "col_2": pa.intaa()}
assert writer._schema == pa.schema(__a , metadata=writer._schema.metadata)
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1)
def lowerCamelCase_ ( ):
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCamelCase__ = {"col_1": pa.string(), "col_2": pa.intaa()}
lowerCamelCase__ = os.path.join(__a , "test.arrow")
with ArrowWriter(path=__a , schema=pa.schema(__a)) as writer:
writer.write_batch({"col_1": ["foo", "bar"], "col_2": [1, 2]})
lowerCamelCase__ , lowerCamelCase__ = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert writer._schema == pa.schema(__a , metadata=writer._schema.metadata)
_check_output(__a , 1)
def lowerCamelCase_ ( lowercase__):
if pa.types.is_list(__a):
return get_base_dtype(arr_type.value_type)
else:
return arr_type
def lowerCamelCase_ ( lowercase__ , lowercase__):
if isinstance(lst[0] , __a):
change_first_primitive_element_in_list(lst[0] , __a)
else:
lowerCamelCase__ = value
@pytest.mark.parametrize("optimized_int_type, expected_dtype" , [(None, pa.intaa()), (Value("int32"), pa.intaa())])
@pytest.mark.parametrize("sequence" , [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]])
def lowerCamelCase_ ( lowercase__ , lowercase__ , lowercase__):
lowerCamelCase__ = pa.array(TypedSequence(__a , optimized_int_type=__a))
assert get_base_dtype(arr.type) == expected_dtype
@pytest.mark.parametrize(
"col, expected_dtype" , [
("attention_mask", pa.inta()),
("special_tokens_mask", pa.inta()),
("token_type_ids", pa.inta()),
("input_ids", pa.intaa()),
("other", pa.intaa()),
] , )
@pytest.mark.parametrize("sequence" , [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]])
def lowerCamelCase_ ( lowercase__ , lowercase__ , lowercase__):
lowerCamelCase__ = pa.array(OptimizedTypedSequence(__a , col=__a))
assert get_base_dtype(arr.type) == expected_dtype
# not in range
if col != "other":
# avoids errors due to in-place modifications
lowerCamelCase__ = copy.deepcopy(__a)
lowerCamelCase__ = np.iinfo(expected_dtype.to_pandas_dtype()).max + 1
change_first_primitive_element_in_list(__a , __a)
lowerCamelCase__ = pa.array(OptimizedTypedSequence(__a , col=__a))
assert get_base_dtype(arr.type) == pa.intaa()
@pytest.mark.parametrize("raise_exception" , [False, True])
def lowerCamelCase_ ( lowercase__ , lowercase__):
lowerCamelCase__ = str(tmp_path / "dataset-train.arrow")
try:
with ArrowWriter(path=__a) as writer:
if raise_exception:
raise pa.lib.ArrowInvalid()
else:
writer.stream.close()
except pa.lib.ArrowInvalid:
pass
finally:
assert writer.stream.closed
def lowerCamelCase_ ( lowercase__):
lowerCamelCase__ = "mock://dataset-train.arrow"
with ArrowWriter(path=__a , storage_options=mockfs.storage_options) as writer:
assert isinstance(writer._fs , type(__a))
assert writer._fs.storage_options == mockfs.storage_options
writer.write({"col_1": "foo", "col_2": 1})
writer.write({"col_1": "bar", "col_2": 2})
lowerCamelCase__ , lowerCamelCase__ = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert mockfs.exists(__a)
def lowerCamelCase_ ( ):
lowerCamelCase__ = pa.BufferOutputStream()
with ParquetWriter(stream=__a) as writer:
writer.write({"col_1": "foo", "col_2": 1})
writer.write({"col_1": "bar", "col_2": 2})
lowerCamelCase__ , lowerCamelCase__ = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
lowerCamelCase__ = pa.BufferReader(output.getvalue())
lowerCamelCase__ = pq.read_table(__a)
assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]}
@require_pil
@pytest.mark.parametrize("embed_local_files" , [False, True])
def lowerCamelCase_ ( lowercase__ , lowercase__):
import PIL.Image
lowerCamelCase__ = str(tmp_path / "test_image_rgb.jpg")
PIL.Image.fromarray(np.zeros((5, 5) , dtype=np.uinta)).save(__a , format="png")
lowerCamelCase__ = pa.BufferOutputStream()
with ParquetWriter(
stream=__a , features=Features({"image": Image()}) , embed_local_files=__a) as writer:
writer.write({"image": image_path})
writer.finalize()
lowerCamelCase__ = pa.BufferReader(output.getvalue())
lowerCamelCase__ = pq.read_table(__a)
lowerCamelCase__ = pa_table.to_pydict()
if embed_local_files:
assert isinstance(out["image"][0]["path"] , __a)
with open(__a , "rb") as f:
assert out["image"][0]["bytes"] == f.read()
else:
assert out["image"][0]["path"] == image_path
assert out["image"][0]["bytes"] is None
def lowerCamelCase_ ( ):
lowerCamelCase__ = pa.schema([pa.field("col_1" , pa.string() , nullable=__a)])
lowerCamelCase__ = pa.BufferOutputStream()
with ArrowWriter(stream=__a) as writer:
writer._build_writer(inferred_schema=__a)
assert writer._schema == pa.schema([pa.field("col_1" , pa.string())])
| 720
|
'''simple docstring'''
from __future__ import annotations
def lowerCamelCase_ ( lowercase__ , lowercase__ , lowercase__):
lowerCamelCase__ = list(range(len(lowercase__)))
lowerCamelCase__ = [v / w for v, w in zip(lowercase__ , lowercase__)]
index.sort(key=lambda lowercase__: ratio[i] , reverse=lowercase__)
lowerCamelCase__ = 0
lowerCamelCase__ = [0] * len(lowercase__)
for i in index:
if weight[i] <= capacity:
lowerCamelCase__ = 1
max_value += value[i]
capacity -= weight[i]
else:
lowerCamelCase__ = capacity / weight[i]
max_value += value[i] * capacity / weight[i]
break
return max_value, fractions
if __name__ == "__main__":
import doctest
doctest.testmod()
| 187
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
a__ = {
'''configuration_perceiver''': ['''PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''PerceiverConfig''', '''PerceiverOnnxConfig'''],
'''tokenization_perceiver''': ['''PerceiverTokenizer'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ = ['''PerceiverFeatureExtractor''']
a__ = ['''PerceiverImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ = [
'''PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''PerceiverForImageClassificationConvProcessing''',
'''PerceiverForImageClassificationFourier''',
'''PerceiverForImageClassificationLearned''',
'''PerceiverForMaskedLM''',
'''PerceiverForMultimodalAutoencoding''',
'''PerceiverForOpticalFlow''',
'''PerceiverForSequenceClassification''',
'''PerceiverLayer''',
'''PerceiverModel''',
'''PerceiverPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_perceiver import PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP, PerceiverConfig, PerceiverOnnxConfig
from .tokenization_perceiver import PerceiverTokenizer
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_perceiver import PerceiverFeatureExtractor
from .image_processing_perceiver import PerceiverImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_perceiver import (
PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST,
PerceiverForImageClassificationConvProcessing,
PerceiverForImageClassificationFourier,
PerceiverForImageClassificationLearned,
PerceiverForMaskedLM,
PerceiverForMultimodalAutoencoding,
PerceiverForOpticalFlow,
PerceiverForSequenceClassification,
PerceiverLayer,
PerceiverModel,
PerceiverPreTrainedModel,
)
else:
import sys
a__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 14
|
"""simple docstring"""
from typing import Optional, Tuple, Union
import tensorflow as tf
from ...activations_tf import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_tf_outputs import (
TFBaseModelOutputWithNoAttention,
TFBaseModelOutputWithPoolingAndNoAttention,
TFSequenceClassifierOutput,
)
from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs
from ...tf_utils import shape_list
from ...utils import logging
from .configuration_regnet import RegNetConfig
__lowerCamelCase = logging.get_logger(__name__)
# General docstring
__lowerCamelCase = 'RegNetConfig'
# Base docstring
__lowerCamelCase = 'facebook/regnet-y-040'
__lowerCamelCase = [1, 10_88, 7, 7]
# Image classification docstring
__lowerCamelCase = 'facebook/regnet-y-040'
__lowerCamelCase = 'tabby, tabby cat'
__lowerCamelCase = [
'facebook/regnet-y-040',
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class __A ( tf.keras.layers.Layer ):
def __init__( self : List[str] , __snake_case : int , __snake_case : int = 3 , __snake_case : int = 1 , __snake_case : int = 1 , __snake_case : Optional[str] = "relu" , **__snake_case : str , ) -> Any:
super().__init__(**__snake_case )
# The padding and conv has been verified in
# https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb
__magic_name__: Optional[int] = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 )
__magic_name__: Dict = tf.keras.layers.ConvaD(
filters=__snake_case , kernel_size=__snake_case , strides=__snake_case , padding="""VALID""" , groups=__snake_case , use_bias=__snake_case , name="""convolution""" , )
__magic_name__: int = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name="""normalization""" )
__magic_name__: Optional[int] = ACTaFN[activation] if activation is not None else tf.identity
def lowerCamelCase__ ( self : Optional[int] , __snake_case : str ) -> Dict:
__magic_name__: Optional[Any] = self.convolution(self.padding(__snake_case ) )
__magic_name__: Union[str, Any] = self.normalization(__snake_case )
__magic_name__: Tuple = self.activation(__snake_case )
return hidden_state
class __A ( tf.keras.layers.Layer ):
def __init__( self : Union[str, Any] , __snake_case : RegNetConfig , **__snake_case : Dict ) -> Optional[int]:
super().__init__(**__snake_case )
__magic_name__: Tuple = config.num_channels
__magic_name__: Optional[int] = TFRegNetConvLayer(
out_channels=config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act , name="""embedder""" , )
def lowerCamelCase__ ( self : List[str] , __snake_case : Dict ) -> int:
__magic_name__: Union[str, Any] = shape_list(__snake_case )[1]
if tf.executing_eagerly() and num_channels != self.num_channels:
raise ValueError(
"""Make sure that the channel dimension of the pixel values match with the one set in the configuration.""" )
# When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format.
# So change the input format from `NCHW` to `NHWC`.
# shape = (batch_size, in_height, in_width, in_channels=num_channels)
__magic_name__: Any = tf.transpose(__snake_case , perm=(0, 2, 3, 1) )
__magic_name__: Dict = self.embedder(__snake_case )
return hidden_state
class __A ( tf.keras.layers.Layer ):
def __init__( self : Union[str, Any] , __snake_case : int , __snake_case : int = 2 , **__snake_case : Any ) -> Dict:
super().__init__(**__snake_case )
__magic_name__: Union[str, Any] = tf.keras.layers.ConvaD(
filters=__snake_case , kernel_size=1 , strides=__snake_case , use_bias=__snake_case , name="""convolution""" )
__magic_name__: Dict = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name="""normalization""" )
def lowerCamelCase__ ( self : Optional[Any] , __snake_case : tf.Tensor , __snake_case : bool = False ) -> tf.Tensor:
return self.normalization(self.convolution(__snake_case ) , training=__snake_case )
class __A ( tf.keras.layers.Layer ):
def __init__( self : int , __snake_case : int , __snake_case : int , **__snake_case : str ) -> str:
super().__init__(**__snake_case )
__magic_name__: Optional[Any] = tf.keras.layers.GlobalAveragePoolingaD(keepdims=__snake_case , name="""pooler""" )
__magic_name__: Optional[Any] = [
tf.keras.layers.ConvaD(filters=__snake_case , kernel_size=1 , activation="""relu""" , name="""attention.0""" ),
tf.keras.layers.ConvaD(filters=__snake_case , kernel_size=1 , activation="""sigmoid""" , name="""attention.2""" ),
]
def lowerCamelCase__ ( self : Dict , __snake_case : List[str] ) -> List[Any]:
# [batch_size, h, w, num_channels] -> [batch_size, 1, 1, num_channels]
__magic_name__: List[str] = self.pooler(__snake_case )
for layer_module in self.attention:
__magic_name__: List[str] = layer_module(__snake_case )
__magic_name__: Optional[Any] = hidden_state * pooled
return hidden_state
class __A ( tf.keras.layers.Layer ):
def __init__( self : Union[str, Any] , __snake_case : RegNetConfig , __snake_case : int , __snake_case : int , __snake_case : int = 1 , **__snake_case : Optional[int] ) -> Optional[int]:
super().__init__(**__snake_case )
__magic_name__: List[str] = in_channels != out_channels or stride != 1
__magic_name__: Union[str, Any] = max(1 , out_channels // config.groups_width )
__magic_name__: Optional[Any] = (
TFRegNetShortCut(__snake_case , stride=__snake_case , name="""shortcut""" )
if should_apply_shortcut
else tf.keras.layers.Activation("""linear""" , name="""shortcut""" )
)
# `self.layers` instead of `self.layer` because that is a reserved argument.
__magic_name__: List[str] = [
TFRegNetConvLayer(__snake_case , kernel_size=1 , activation=config.hidden_act , name="""layer.0""" ),
TFRegNetConvLayer(
__snake_case , stride=__snake_case , groups=__snake_case , activation=config.hidden_act , name="""layer.1""" ),
TFRegNetConvLayer(__snake_case , kernel_size=1 , activation=__snake_case , name="""layer.2""" ),
]
__magic_name__: Any = ACTaFN[config.hidden_act]
def lowerCamelCase__ ( self : Optional[int] , __snake_case : Any ) -> Union[str, Any]:
__magic_name__: Any = hidden_state
for layer_module in self.layers:
__magic_name__: Optional[int] = layer_module(__snake_case )
__magic_name__: str = self.shortcut(__snake_case )
hidden_state += residual
__magic_name__: int = self.activation(__snake_case )
return hidden_state
class __A ( tf.keras.layers.Layer ):
def __init__( self : List[str] , __snake_case : RegNetConfig , __snake_case : int , __snake_case : int , __snake_case : int = 1 , **__snake_case : Union[str, Any] ) -> Dict:
super().__init__(**__snake_case )
__magic_name__: str = in_channels != out_channels or stride != 1
__magic_name__: Dict = max(1 , out_channels // config.groups_width )
__magic_name__: Tuple = (
TFRegNetShortCut(__snake_case , stride=__snake_case , name="""shortcut""" )
if should_apply_shortcut
else tf.keras.layers.Activation("""linear""" , name="""shortcut""" )
)
__magic_name__: str = [
TFRegNetConvLayer(__snake_case , kernel_size=1 , activation=config.hidden_act , name="""layer.0""" ),
TFRegNetConvLayer(
__snake_case , stride=__snake_case , groups=__snake_case , activation=config.hidden_act , name="""layer.1""" ),
TFRegNetSELayer(__snake_case , reduced_channels=int(round(in_channels / 4 ) ) , name="""layer.2""" ),
TFRegNetConvLayer(__snake_case , kernel_size=1 , activation=__snake_case , name="""layer.3""" ),
]
__magic_name__: Optional[int] = ACTaFN[config.hidden_act]
def lowerCamelCase__ ( self : List[str] , __snake_case : int ) -> Dict:
__magic_name__: int = hidden_state
for layer_module in self.layers:
__magic_name__: Optional[Any] = layer_module(__snake_case )
__magic_name__: Union[str, Any] = self.shortcut(__snake_case )
hidden_state += residual
__magic_name__: Any = self.activation(__snake_case )
return hidden_state
class __A ( tf.keras.layers.Layer ):
def __init__( self : int , __snake_case : RegNetConfig , __snake_case : int , __snake_case : int , __snake_case : int = 2 , __snake_case : int = 2 , **__snake_case : List[Any] ) -> Optional[int]:
super().__init__(**__snake_case )
__magic_name__: int = TFRegNetXLayer if config.layer_type == """x""" else TFRegNetYLayer
__magic_name__: Optional[Any] = [
# downsampling is done in the first layer with stride of 2
layer(__snake_case , __snake_case , __snake_case , stride=__snake_case , name="""layers.0""" ),
*[layer(__snake_case , __snake_case , __snake_case , name=F'layers.{i+1}' ) for i in range(depth - 1 )],
]
def lowerCamelCase__ ( self : int , __snake_case : Union[str, Any] ) -> Tuple:
for layer_module in self.layers:
__magic_name__: Dict = layer_module(__snake_case )
return hidden_state
class __A ( tf.keras.layers.Layer ):
def __init__( self : Union[str, Any] , __snake_case : RegNetConfig , **__snake_case : Optional[Any] ) -> Dict:
super().__init__(**__snake_case )
__magic_name__: List[Any] = []
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
TFRegNetStage(
__snake_case , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , name="""stages.0""" , ) )
__magic_name__: Dict = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for i, ((in_channels, out_channels), depth) in enumerate(zip(__snake_case , config.depths[1:] ) ):
self.stages.append(TFRegNetStage(__snake_case , __snake_case , __snake_case , depth=__snake_case , name=F'stages.{i+1}' ) )
def lowerCamelCase__ ( self : int , __snake_case : tf.Tensor , __snake_case : bool = False , __snake_case : bool = True ) -> TFBaseModelOutputWithNoAttention:
__magic_name__: int = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
__magic_name__: Optional[Any] = hidden_states + (hidden_state,)
__magic_name__: Optional[Any] = stage_module(__snake_case )
if output_hidden_states:
__magic_name__: int = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return TFBaseModelOutputWithNoAttention(last_hidden_state=__snake_case , hidden_states=__snake_case )
@keras_serializable
class __A ( tf.keras.layers.Layer ):
UpperCAmelCase__ = RegNetConfig
def __init__( self : Optional[int] , __snake_case : Any , **__snake_case : List[str] ) -> int:
super().__init__(**__snake_case )
__magic_name__: Union[str, Any] = config
__magic_name__: Optional[int] = TFRegNetEmbeddings(__snake_case , name="""embedder""" )
__magic_name__: int = TFRegNetEncoder(__snake_case , name="""encoder""" )
__magic_name__: int = tf.keras.layers.GlobalAveragePoolingaD(keepdims=__snake_case , name="""pooler""" )
@unpack_inputs
def lowerCamelCase__ ( self : Optional[Any] , __snake_case : tf.Tensor , __snake_case : Optional[bool] = None , __snake_case : Optional[bool] = None , __snake_case : bool = False , ) -> TFBaseModelOutputWithPoolingAndNoAttention:
__magic_name__: Optional[int] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__magic_name__: int = return_dict if return_dict is not None else self.config.use_return_dict
__magic_name__: List[str] = self.embedder(__snake_case , training=__snake_case )
__magic_name__: Optional[Any] = self.encoder(
__snake_case , output_hidden_states=__snake_case , return_dict=__snake_case , training=__snake_case )
__magic_name__: str = encoder_outputs[0]
__magic_name__: List[Any] = self.pooler(__snake_case )
# Change to NCHW output format have uniformity in the modules
__magic_name__: int = tf.transpose(__snake_case , perm=(0, 3, 1, 2) )
__magic_name__: List[str] = tf.transpose(__snake_case , perm=(0, 3, 1, 2) )
# Change the other hidden state outputs to NCHW as well
if output_hidden_states:
__magic_name__: List[str] = tuple([tf.transpose(__snake_case , perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=__snake_case , pooler_output=__snake_case , hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states , )
class __A ( SCREAMING_SNAKE_CASE_ ):
UpperCAmelCase__ = RegNetConfig
UpperCAmelCase__ = "regnet"
UpperCAmelCase__ = "pixel_values"
@property
def lowerCamelCase__ ( self : Optional[int] ) -> Union[str, Any]:
return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 2_2_4, 2_2_4) , dtype=tf.floataa )}
__lowerCamelCase = r'\n Parameters:\n This model is a Tensorflow\n [tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a\n regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and\n behavior.\n config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.\n'
__lowerCamelCase = r'\n Args:\n pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConveNextImageProcessor.__call__`] for details.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n'
@add_start_docstrings(
"The bare RegNet model outputting raw features without any specific head on top." ,SCREAMING_SNAKE_CASE_ ,)
class __A ( SCREAMING_SNAKE_CASE_ ):
def __init__( self : Optional[Any] , __snake_case : RegNetConfig , *__snake_case : List[Any] , **__snake_case : Tuple ) -> Tuple:
super().__init__(__snake_case , *__snake_case , **__snake_case )
__magic_name__: List[str] = TFRegNetMainLayer(__snake_case , name="""regnet""" )
@unpack_inputs
@add_start_docstrings_to_model_forward(__snake_case )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=__snake_case , config_class=_CONFIG_FOR_DOC , modality="""vision""" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def lowerCamelCase__ ( self : Dict , __snake_case : tf.Tensor , __snake_case : Optional[bool] = None , __snake_case : Optional[bool] = None , __snake_case : int=False , ) -> Union[TFBaseModelOutputWithPoolingAndNoAttention, Tuple[tf.Tensor]]:
__magic_name__: Optional[int] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__magic_name__: Union[str, Any] = return_dict if return_dict is not None else self.config.use_return_dict
__magic_name__: List[str] = self.regnet(
pixel_values=__snake_case , output_hidden_states=__snake_case , return_dict=__snake_case , training=__snake_case , )
if not return_dict:
return (outputs[0],) + outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=outputs.last_hidden_state , pooler_output=outputs.pooler_output , hidden_states=outputs.hidden_states , )
@add_start_docstrings(
"\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " ,SCREAMING_SNAKE_CASE_ ,)
class __A ( SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
def __init__( self : int , __snake_case : RegNetConfig , *__snake_case : Any , **__snake_case : Any ) -> Optional[Any]:
super().__init__(__snake_case , *__snake_case , **__snake_case )
__magic_name__: Union[str, Any] = config.num_labels
__magic_name__: Tuple = TFRegNetMainLayer(__snake_case , name="""regnet""" )
# classification head
__magic_name__: List[Any] = [
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(config.num_labels , name="""classifier.1""" ) if config.num_labels > 0 else tf.identity,
]
@unpack_inputs
@add_start_docstrings_to_model_forward(__snake_case )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=__snake_case , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def lowerCamelCase__ ( self : List[str] , __snake_case : tf.Tensor = None , __snake_case : tf.Tensor = None , __snake_case : bool = None , __snake_case : bool = None , __snake_case : Dict=False , ) -> Union[TFSequenceClassifierOutput, Tuple[tf.Tensor]]:
__magic_name__: Optional[Any] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__magic_name__: Optional[Any] = return_dict if return_dict is not None else self.config.use_return_dict
__magic_name__: Any = self.regnet(
__snake_case , output_hidden_states=__snake_case , return_dict=__snake_case , training=__snake_case )
__magic_name__: Optional[Any] = outputs.pooler_output if return_dict else outputs[1]
__magic_name__: Optional[int] = self.classifier[0](__snake_case )
__magic_name__: List[Any] = self.classifier[1](__snake_case )
__magic_name__: Optional[int] = None if labels is None else self.hf_compute_loss(labels=__snake_case , logits=__snake_case )
if not return_dict:
__magic_name__: List[Any] = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFSequenceClassifierOutput(loss=__snake_case , logits=__snake_case , hidden_states=outputs.hidden_states )
| 96
| 0
|
from typing import Any, Dict, Optional
import torch
import torch.nn.functional as F
from torch import nn
from ..utils import maybe_allow_in_graph
from .activations import get_activation
from .attention_processor import Attention
from .embeddings import CombinedTimestepLabelEmbeddings
@maybe_allow_in_graph
class lowercase ( nn.Module ):
def __init__( self ,A__ ,A__ ,A__ ,A__=0.0 ,A__ = None ,A__ = "geglu" ,A__ = None ,A__ = False ,A__ = False ,A__ = False ,A__ = False ,A__ = True ,A__ = "layer_norm" ,A__ = False ,):
super().__init__()
lowercase = only_cross_attention
lowercase = (num_embeds_ada_norm is not None) and norm_type == '''ada_norm_zero'''
lowercase = (num_embeds_ada_norm is not None) and norm_type == '''ada_norm'''
if norm_type in ("ada_norm", "ada_norm_zero") and num_embeds_ada_norm is None:
raise ValueError(
f'`norm_type` is set to {norm_type}, but `num_embeds_ada_norm` is not defined. Please make sure to'
f' define `num_embeds_ada_norm` if setting `norm_type` to {norm_type}.')
# Define 3 blocks. Each block has its own normalization layer.
# 1. Self-Attn
if self.use_ada_layer_norm:
lowercase = AdaLayerNorm(A__ ,A__)
elif self.use_ada_layer_norm_zero:
lowercase = AdaLayerNormZero(A__ ,A__)
else:
lowercase = nn.LayerNorm(A__ ,elementwise_affine=A__)
lowercase = Attention(
query_dim=A__ ,heads=A__ ,dim_head=A__ ,dropout=A__ ,bias=A__ ,cross_attention_dim=cross_attention_dim if only_cross_attention else None ,upcast_attention=A__ ,)
# 2. Cross-Attn
if cross_attention_dim is not None or double_self_attention:
# We currently only use AdaLayerNormZero for self attention where there will only be one attention block.
# I.e. the number of returned modulation chunks from AdaLayerZero would not make sense if returned during
# the second cross attention block.
lowercase = (
AdaLayerNorm(A__ ,A__)
if self.use_ada_layer_norm
else nn.LayerNorm(A__ ,elementwise_affine=A__)
)
lowercase = Attention(
query_dim=A__ ,cross_attention_dim=cross_attention_dim if not double_self_attention else None ,heads=A__ ,dim_head=A__ ,dropout=A__ ,bias=A__ ,upcast_attention=A__ ,) # is self-attn if encoder_hidden_states is none
else:
lowercase = None
lowercase = None
# 3. Feed-forward
lowercase = nn.LayerNorm(A__ ,elementwise_affine=A__)
lowercase = FeedForward(A__ ,dropout=A__ ,activation_fn=A__ ,final_dropout=A__)
# let chunk size default to None
lowercase = None
lowercase = 0
def A__ ( self ,A__ ,A__):
# Sets chunk feed-forward
lowercase = chunk_size
lowercase = dim
def A__ ( self ,A__ ,A__ = None ,A__ = None ,A__ = None ,A__ = None ,A__ = None ,A__ = None ,):
# Notice that normalization is always applied before the real computation in the following blocks.
# 1. Self-Attention
if self.use_ada_layer_norm:
lowercase = self.norma(A__ ,A__)
elif self.use_ada_layer_norm_zero:
lowercase , lowercase , lowercase , lowercase , lowercase = self.norma(
A__ ,A__ ,A__ ,hidden_dtype=hidden_states.dtype)
else:
lowercase = self.norma(A__)
lowercase = cross_attention_kwargs if cross_attention_kwargs is not None else {}
lowercase = self.attna(
A__ ,encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None ,attention_mask=A__ ,**A__ ,)
if self.use_ada_layer_norm_zero:
lowercase = gate_msa.unsqueeze(1) * attn_output
lowercase = attn_output + hidden_states
# 2. Cross-Attention
if self.attna is not None:
lowercase = (
self.norma(A__ ,A__) if self.use_ada_layer_norm else self.norma(A__)
)
lowercase = self.attna(
A__ ,encoder_hidden_states=A__ ,attention_mask=A__ ,**A__ ,)
lowercase = attn_output + hidden_states
# 3. Feed-forward
lowercase = self.norma(A__)
if self.use_ada_layer_norm_zero:
lowercase = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None]
if self._chunk_size is not None:
# "feed_forward_chunk_size" can be used to save memory
if norm_hidden_states.shape[self._chunk_dim] % self._chunk_size != 0:
raise ValueError(
f'`hidden_states` dimension to be chunked: {norm_hidden_states.shape[self._chunk_dim]} has to be divisible by chunk size: {self._chunk_size}. Make sure to set an appropriate `chunk_size` when calling `unet.enable_forward_chunking`.')
lowercase = norm_hidden_states.shape[self._chunk_dim] // self._chunk_size
lowercase = torch.cat(
[self.ff(A__) for hid_slice in norm_hidden_states.chunk(A__ ,dim=self._chunk_dim)] ,dim=self._chunk_dim ,)
else:
lowercase = self.ff(A__)
if self.use_ada_layer_norm_zero:
lowercase = gate_mlp.unsqueeze(1) * ff_output
lowercase = ff_output + hidden_states
return hidden_states
class lowercase ( nn.Module ):
def __init__( self ,A__ ,A__ = None ,A__ = 4 ,A__ = 0.0 ,A__ = "geglu" ,A__ = False ,):
super().__init__()
lowercase = int(dim * mult)
lowercase = dim_out if dim_out is not None else dim
if activation_fn == "gelu":
lowercase = GELU(A__ ,A__)
if activation_fn == "gelu-approximate":
lowercase = GELU(A__ ,A__ ,approximate='''tanh''')
elif activation_fn == "geglu":
lowercase = GEGLU(A__ ,A__)
elif activation_fn == "geglu-approximate":
lowercase = ApproximateGELU(A__ ,A__)
lowercase = nn.ModuleList([])
# project in
self.net.append(A__)
# project dropout
self.net.append(nn.Dropout(A__))
# project out
self.net.append(nn.Linear(A__ ,A__))
# FF as used in Vision Transformer, MLP-Mixer, etc. have a final dropout
if final_dropout:
self.net.append(nn.Dropout(A__))
def A__ ( self ,A__):
for module in self.net:
lowercase = module(A__)
return hidden_states
class lowercase ( nn.Module ):
def __init__( self ,A__ ,A__ ,A__ = "none"):
super().__init__()
lowercase = nn.Linear(A__ ,A__)
lowercase = approximate
def A__ ( self ,A__):
if gate.device.type != "mps":
return F.gelu(A__ ,approximate=self.approximate)
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa) ,approximate=self.approximate).to(dtype=gate.dtype)
def A__ ( self ,A__):
lowercase = self.proj(A__)
lowercase = self.gelu(A__)
return hidden_states
class lowercase ( nn.Module ):
def __init__( self ,A__ ,A__):
super().__init__()
lowercase = nn.Linear(A__ ,dim_out * 2)
def A__ ( self ,A__):
if gate.device.type != "mps":
return F.gelu(A__)
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa)).to(dtype=gate.dtype)
def A__ ( self ,A__):
lowercase , lowercase = self.proj(A__).chunk(2 ,dim=-1)
return hidden_states * self.gelu(A__)
class lowercase ( nn.Module ):
def __init__( self ,A__ ,A__):
super().__init__()
lowercase = nn.Linear(A__ ,A__)
def A__ ( self ,A__):
lowercase = self.proj(A__)
return x * torch.sigmoid(1.702 * x)
class lowercase ( nn.Module ):
def __init__( self ,A__ ,A__):
super().__init__()
lowercase = nn.Embedding(A__ ,A__)
lowercase = nn.SiLU()
lowercase = nn.Linear(A__ ,embedding_dim * 2)
lowercase = nn.LayerNorm(A__ ,elementwise_affine=A__)
def A__ ( self ,A__ ,A__):
lowercase = self.linear(self.silu(self.emb(A__)))
lowercase , lowercase = torch.chunk(A__ ,2)
lowercase = self.norm(A__) * (1 + scale) + shift
return x
class lowercase ( nn.Module ):
def __init__( self ,A__ ,A__):
super().__init__()
lowercase = CombinedTimestepLabelEmbeddings(A__ ,A__)
lowercase = nn.SiLU()
lowercase = nn.Linear(A__ ,6 * embedding_dim ,bias=A__)
lowercase = nn.LayerNorm(A__ ,elementwise_affine=A__ ,eps=1E-6)
def A__ ( self ,A__ ,A__ ,A__ ,A__=None):
lowercase = self.linear(self.silu(self.emb(A__ ,A__ ,hidden_dtype=A__)))
lowercase , lowercase , lowercase , lowercase , lowercase , lowercase = emb.chunk(6 ,dim=1)
lowercase = self.norm(A__) * (1 + scale_msa[:, None]) + shift_msa[:, None]
return x, gate_msa, shift_mlp, scale_mlp, gate_mlp
class lowercase ( nn.Module ):
def __init__( self ,A__ ,A__ ,A__ ,A__ = None ,A__ = 1E-5):
super().__init__()
lowercase = num_groups
lowercase = eps
if act_fn is None:
lowercase = None
else:
lowercase = get_activation(A__)
lowercase = nn.Linear(A__ ,out_dim * 2)
def A__ ( self ,A__ ,A__):
if self.act:
lowercase = self.act(A__)
lowercase = self.linear(A__)
lowercase = emb[:, :, None, None]
lowercase , lowercase = emb.chunk(2 ,dim=1)
lowercase = F.group_norm(A__ ,self.num_groups ,eps=self.eps)
lowercase = x * (1 + scale) + shift
return x
| 633
|
import warnings
from diffusers import StableDiffusionInpaintPipeline as StableDiffusionInpaintPipeline # noqa F401
warnings.warn(
"The `inpainting.py` script is outdated. Please use directly `from diffusers import"
" StableDiffusionInpaintPipeline` instead."
)
| 633
| 1
|
def UpperCAmelCase ( a_ ) -> list:
"""simple docstring"""
return [
txt[:a] + txt[a].upper() + txt[a + 1 :]
for a in range(len(a_ ) )
if txt[a].isalpha()
]
if __name__ == "__main__":
__import__('doctest').testmod()
| 55
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class UpperCamelCase_ ( a_ ):
_A : Optional[int] = 'facebook/bart-large-mnli'
_A : Union[str, Any] = (
'This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which '
'should be the text to classify, and `labels`, which should be the list of labels to use for classification. '
'It returns the most likely label in the list of provided `labels` for the input text.'
)
_A : Dict = 'text_classifier'
_A : Union[str, Any] = AutoTokenizer
_A : Tuple = AutoModelForSequenceClassification
_A : Optional[int] = ['text', ['text']]
_A : Dict = ['text']
def UpperCamelCase_ ( self ) -> Optional[Any]:
"""simple docstring"""
super().setup()
UpperCAmelCase = self.model.config
UpperCAmelCase = -1
for idx, label in config.idalabel.items():
if label.lower().startswith("""entail""" ):
UpperCAmelCase = int(snake_case__ )
if self.entailment_id == -1:
raise ValueError("""Could not determine the entailment ID from the model config, please pass it at init.""" )
def UpperCamelCase_ ( self , snake_case__ , snake_case__ ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase = labels
return self.pre_processor(
[text] * len(snake_case__ ) , [f'''This example is {label}''' for label in labels] , return_tensors="""pt""" , padding="""max_length""" , )
def UpperCamelCase_ ( self , snake_case__ ) -> str:
"""simple docstring"""
UpperCAmelCase = outputs.logits
UpperCAmelCase = torch.argmax(logits[:, 2] ).item()
return self._labels[label_id]
| 673
| 0
|
import argparse
import glob
import logging
import os
import time
from argparse import Namespace
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from torch.utils.data import DataLoader, TensorDataset
from transformers import glue_compute_metrics as compute_metrics
from transformers import glue_convert_examples_to_features as convert_examples_to_features
from transformers import glue_output_modes, glue_tasks_num_labels
from transformers import glue_processors as processors
__A = logging.getLogger(__name__)
class SCREAMING_SNAKE_CASE ( snake_case ):
"""simple docstring"""
A_ = "sequence-classification"
def __init__( self: str , __A: Union[str, Any] ) -> List[str]:
if type(__A ) == dict:
_A = Namespace(**__A )
_A = glue_output_modes[hparams.task]
_A = glue_tasks_num_labels[hparams.task]
super().__init__(__A , __A , self.mode )
def __A ( self: Optional[Any] , **__A: Union[str, Any] ) -> Optional[int]:
return self.model(**__A )
def __A ( self: Any , __A: Union[str, Any] , __A: int ) -> Optional[Any]:
_A = {'''input_ids''': batch[0], '''attention_mask''': batch[1], '''labels''': batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
_A = batch[2] if self.config.model_type in ['''bert''', '''xlnet''', '''albert'''] else None
_A = self(**__A )
_A = outputs[0]
_A = self.trainer.lr_schedulers[0]['''scheduler''']
_A = {'''loss''': loss, '''rate''': lr_scheduler.get_last_lr()[-1]}
return {"loss": loss, "log": tensorboard_logs}
def __A ( self: List[str] ) -> Dict:
_A = self.hparams
_A = processors[args.task]()
_A = processor.get_labels()
for mode in ["train", "dev"]:
_A = self._feature_file(__A )
if os.path.exists(__A ) and not args.overwrite_cache:
logger.info('''Loading features from cached file %s''' , __A )
else:
logger.info('''Creating features from dataset file at %s''' , args.data_dir )
_A = (
processor.get_dev_examples(args.data_dir )
if mode == '''dev'''
else processor.get_train_examples(args.data_dir )
)
_A = convert_examples_to_features(
__A , self.tokenizer , max_length=args.max_seq_length , label_list=self.labels , output_mode=args.glue_output_mode , )
logger.info('''Saving features into cached file %s''' , __A )
torch.save(__A , __A )
def __A ( self: List[str] , __A: str , __A: int , __A: bool = False ) -> DataLoader:
_A = '''dev''' if mode == '''test''' else mode
_A = self._feature_file(__A )
logger.info('''Loading features from cached file %s''' , __A )
_A = torch.load(__A )
_A = torch.tensor([f.input_ids for f in features] , dtype=torch.long )
_A = torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
_A = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
if self.hparams.glue_output_mode == "classification":
_A = torch.tensor([f.label for f in features] , dtype=torch.long )
elif self.hparams.glue_output_mode == "regression":
_A = torch.tensor([f.label for f in features] , dtype=torch.float )
return DataLoader(
TensorDataset(__A , __A , __A , __A ) , batch_size=__A , shuffle=__A , )
def __A ( self: List[str] , __A: str , __A: Tuple ) -> str:
_A = {'''input_ids''': batch[0], '''attention_mask''': batch[1], '''labels''': batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
_A = batch[2] if self.config.model_type in ['''bert''', '''xlnet''', '''albert'''] else None
_A = self(**__A )
_A ,_A = outputs[:2]
_A = logits.detach().cpu().numpy()
_A = inputs['''labels'''].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def __A ( self: str , __A: Dict ) -> tuple:
_A = torch.stack([x['''val_loss'''] for x in outputs] ).mean().detach().cpu().item()
_A = np.concatenate([x['''pred'''] for x in outputs] , axis=0 )
if self.hparams.glue_output_mode == "classification":
_A = np.argmax(__A , axis=1 )
elif self.hparams.glue_output_mode == "regression":
_A = np.squeeze(__A )
_A = np.concatenate([x['''target'''] for x in outputs] , axis=0 )
_A = [[] for _ in range(out_label_ids.shape[0] )]
_A = [[] for _ in range(out_label_ids.shape[0] )]
_A = {**{'''val_loss''': val_loss_mean}, **compute_metrics(self.hparams.task , __A , __A )}
_A = dict(results.items() )
_A = results
return ret, preds_list, out_label_list
def __A ( self: Any , __A: list ) -> dict:
_A ,_A ,_A = self._eval_end(__A )
_A = ret['''log''']
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def __A ( self: int , __A: Union[str, Any] ) -> dict:
_A ,_A ,_A = self._eval_end(__A )
_A = ret['''log''']
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def __A ( __A: Optional[Any] , __A: Optional[Any] ) -> Optional[Any]:
BaseTransformer.add_model_specific_args(__A , __A )
parser.add_argument(
'''--max_seq_length''' , default=1_28 , type=__A , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument(
'''--task''' , default='''''' , type=__A , required=__A , help='''The GLUE task to run''' , )
parser.add_argument(
'''--gpus''' , default=0 , type=__A , help='''The number of GPUs allocated for this, it is by default 0 meaning none''' , )
parser.add_argument(
'''--overwrite_cache''' , action='''store_true''' , help='''Overwrite the cached training and evaluation sets''' )
return parser
def __A ( ):
'''simple docstring'''
_A = argparse.ArgumentParser()
add_generic_args(_lowercase , os.getcwd() )
_A = GLUETransformer.add_model_specific_args(_lowercase , os.getcwd() )
_A = parser.parse_args()
# If output_dir not provided, a folder will be generated in pwd
if args.output_dir is None:
_A = os.path.join(
'''./results''' , f"""{args.task}_{time.strftime("%Y%m%d_%H%M%S" )}""" , )
os.makedirs(args.output_dir )
_A = GLUETransformer(_lowercase )
_A = generic_train(_lowercase , _lowercase )
# Optionally, predict on dev set and write to output_dir
if args.do_predict:
_A = sorted(glob.glob(os.path.join(args.output_dir , '''checkpoint-epoch=*.ckpt''' ) , recursive=_lowercase ) )
_A = model.load_from_checkpoint(checkpoints[-1] )
return trainer.test(_lowercase )
if __name__ == "__main__":
main()
| 62
|
import logging
import os
import quant_trainer
import torch
from torch.utils.data import DataLoader
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput
__A = logging.getLogger(__name__)
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class SCREAMING_SNAKE_CASE ( snake_case ):
"""simple docstring"""
def __init__( self: int , *__A: str , __A: List[Any]=None , __A: Union[str, Any]=None , __A: List[Any]=None , **__A: int ) -> List[Any]:
super().__init__(*__A , **__A )
_A = eval_examples
_A = post_process_function
_A = quant_trainer_args
_A = 1_28 # default number of calibration samples
def __A ( self: Union[str, Any] , __A: List[Any]=None ) -> Optional[Any]:
if calib_dataset is None and self.calib_dataset is None:
raise ValueError('''Trainer: calibration requires an calib_dataset.''' )
_A = calib_dataset if calib_dataset is not None else self.calib_dataset
_A = self._remove_unused_columns(__A , description='''Calibration''' )
return DataLoader(
__A , batch_size=self.args.eval_batch_size , collate_fn=self.data_collator , drop_last=self.args.dataloader_drop_last , num_workers=self.args.dataloader_num_workers , pin_memory=self.args.dataloader_pin_memory , shuffle=__A , )
def __A ( self: List[Any] , __A: Any=None ) -> Optional[int]:
_A = self.train_dataset if calib_dataset is None else calib_dataset
_A = self.get_calib_dataloader(__A )
_A = self.model
quant_trainer.configure_model(__A , self.quant_trainer_args , calib=__A )
model.eval()
quant_trainer.enable_calibration(__A )
logger.info('''***** Running calibration *****''' )
logger.info(f""" Num examples = {self.calib_num}""" )
logger.info(f""" Batch size = {calib_dataloader.batch_size}""" )
for step, inputs in enumerate(__A ):
# Prediction step
_A ,_A ,_A = self.prediction_step(__A , __A , prediction_loss_only=__A )
if (step + 1) * calib_dataloader.batch_size >= self.calib_num:
break
quant_trainer.finish_calibration(__A , self.quant_trainer_args )
_A = model
def __A ( self: Any , __A: Dict=None , __A: Tuple=None , __A: List[Any]=None , __A: str = "eval" ) -> int:
_A = self.eval_dataset if eval_dataset is None else eval_dataset
_A = self.get_eval_dataloader(__A )
_A = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
_A = self.compute_metrics
_A = None
_A = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
_A = eval_loop(
__A , description='''Evaluation''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__A , )
finally:
_A = compute_metrics
if self.post_process_function is not None and self.compute_metrics is not None:
_A = self.post_process_function(__A , __A , output.predictions )
_A = self.compute_metrics(__A )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f"""{metric_key_prefix}_""" ):
_A = metrics.pop(__A )
self.log(__A )
else:
_A = {}
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
_A = self.callback_handler.on_evaluate(self.args , self.state , self.control , __A )
return metrics
def __A ( self: Union[str, Any] , __A: Optional[int] , __A: int , __A: List[Any]=None , __A: str = "test" ) -> Union[str, Any]:
_A = self.get_test_dataloader(__A )
# Temporarily disable metric computation, we will do it in the loop here.
_A = self.compute_metrics
_A = None
_A = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
_A = eval_loop(
__A , description='''Prediction''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__A , )
finally:
_A = compute_metrics
if self.post_process_function is None or self.compute_metrics is None:
return output
_A = self.post_process_function(__A , __A , output.predictions , '''predict''' )
_A = self.compute_metrics(__A )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f"""{metric_key_prefix}_""" ):
_A = metrics.pop(__A )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=__A )
def __A ( self: Tuple , __A: Optional[Any]="./" ) -> List[str]:
_A = self.eval_dataset
_A = self.get_eval_dataloader(__A )
_A = next(iter(__A ) )
# saving device - to make it consistent
_A = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' )
# convert to tuple
_A = tuple(v.to(__A ) for k, v in batch.items() )
logger.info('''Converting model to be onnx compatible''' )
from pytorch_quantization.nn import TensorQuantizer
_A = True
_A = self.model.to(__A )
model.eval()
model.float()
_A = model.module if hasattr(__A , '''module''' ) else model
quant_trainer.configure_model(__A , self.quant_trainer_args )
_A = os.path.join(__A , '''model.onnx''' )
logger.info(f"""exporting model to {output_model_file}""" )
_A = {0: '''batch_size''', 1: '''seq_len'''}
torch.onnx.export(
__A , __A , __A , export_params=__A , opset_version=13 , do_constant_folding=__A , input_names=['''input_ids''', '''attention_mask''', '''token_type_ids'''] , output_names=['''output_start_logits''', '''output_end_logits'''] , dynamic_axes={
'''input_ids''': axes,
'''attention_mask''': axes,
'''token_type_ids''': axes,
'''output_start_logits''': axes,
'''output_end_logits''': axes,
} , verbose=__A , )
logger.info('''onnx export finished''' )
| 62
| 1
|
'''simple docstring'''
import cmath
import math
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : int = math.radians(lowerCamelCase__ )
A_ : List[str] = math.radians(lowerCamelCase__ )
# Convert voltage and current to rectangular form
A_ : Optional[Any] = cmath.rect(lowerCamelCase__ , lowerCamelCase__ )
A_ : Tuple = cmath.rect(lowerCamelCase__ , lowerCamelCase__ )
# Calculate apparent power
return voltage_rect * current_rect
if __name__ == "__main__":
import doctest
doctest.testmod()
| 667
|
'''simple docstring'''
import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
lowerCamelCase :List[str] = imread(R'''digital_image_processing/image_data/lena_small.jpg''')
lowerCamelCase :Optional[int] = cvtColor(img, COLOR_BGR2GRAY)
def a ( ):
'''simple docstring'''
A_ : List[Any] = cn.convert_to_negative(lowerCamelCase__ )
# assert negative_img array for at least one True
assert negative_img.any()
def a ( ):
'''simple docstring'''
with Image.open("""digital_image_processing/image_data/lena_small.jpg""" ) as img:
# Work around assertion for response
assert str(cc.change_contrast(lowerCamelCase__ , 1_10 ) ).startswith(
"""<PIL.Image.Image image mode=RGB size=100x100 at""" )
def a ( ):
'''simple docstring'''
A_ : int = canny.gen_gaussian_kernel(9 , sigma=1.4 )
# Assert ambiguous array
assert resp.all()
def a ( ):
'''simple docstring'''
A_ : int = imread("""digital_image_processing/image_data/lena_small.jpg""" , 0 )
# assert ambiguous array for all == True
assert canny_img.all()
A_ : List[Any] = canny.canny(lowerCamelCase__ )
# assert canny array for at least one True
assert canny_array.any()
def a ( ):
'''simple docstring'''
assert gg.gaussian_filter(lowerCamelCase__ , 5 , sigma=0.9 ).all()
def a ( ):
'''simple docstring'''
A_ : int = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] )
A_ : Optional[Any] = conv.img_convolve(lowerCamelCase__ , lowerCamelCase__ ).astype(lowerCamelCase__ )
assert res.any()
def a ( ):
'''simple docstring'''
assert med.median_filter(lowerCamelCase__ , 3 ).any()
def a ( ):
'''simple docstring'''
A_, A_ : int = sob.sobel_filter(lowerCamelCase__ )
assert grad.any() and theta.any()
def a ( ):
'''simple docstring'''
A_ : int = sp.make_sepia(lowerCamelCase__ , 20 )
assert sepia.all()
def a ( lowerCamelCase__ = "digital_image_processing/image_data/lena_small.jpg" ):
'''simple docstring'''
A_ : Any = bs.Burkes(imread(lowerCamelCase__ , 1 ) , 1_20 )
burkes.process()
assert burkes.output_img.any()
def a ( lowerCamelCase__ = "digital_image_processing/image_data/lena_small.jpg" , ):
'''simple docstring'''
A_ : Union[str, Any] = rs.NearestNeighbour(imread(lowerCamelCase__ , 1 ) , 4_00 , 2_00 )
nn.process()
assert nn.output.any()
def a ( ):
'''simple docstring'''
A_ : int = """digital_image_processing/image_data/lena.jpg"""
# Reading the image and converting it to grayscale.
A_ : Union[str, Any] = imread(lowerCamelCase__ , 0 )
# Test for get_neighbors_pixel function() return not None
A_ : str = 0
A_ : str = 0
A_ : Dict = image[x_coordinate][y_coordinate]
A_ : Optional[Any] = lbp.get_neighbors_pixel(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
assert neighbors_pixels is not None
# Test for local_binary_pattern function()
# Create a numpy array as the same height and width of read image
A_ : str = np.zeros((image.shape[0], image.shape[1]) )
# Iterating through the image and calculating the local binary pattern value
# for each pixel.
for i in range(0 , image.shape[0] ):
for j in range(0 , image.shape[1] ):
A_ : Any = lbp.local_binary_value(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
assert lbp_image.any()
| 667
| 1
|
import time
import warnings
from abc import ABC
from copy import deepcopy
from typing import Optional
import torch
from ..utils import add_start_docstrings, logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = R'\n Args:\n input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n scores (`torch.FloatTensor` of shape `(batch_size, config.vocab_size)`):\n Prediction scores of a language modeling head. These can be scores for each vocabulary token before SoftMax\n or scores for each vocabulary token after SoftMax.\n kwargs (`Dict[str, Any]`, *optional*):\n Additional stopping criteria specific kwargs.\n\n Return:\n `bool`. `False` indicates we should continue, `True` indicates we should stop.\n\n'
class _SCREAMING_SNAKE_CASE ( _lowerCAmelCase ):
@add_start_docstrings(UpperCAmelCase)
def __call__(self , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase):
'''simple docstring'''
raise NotImplementedError('''StoppingCriteria needs to be subclassed''')
class _SCREAMING_SNAKE_CASE ( _lowerCAmelCase ):
def __init__(self , UpperCAmelCase , UpperCAmelCase = None):
'''simple docstring'''
__UpperCAmelCase =max_length
__UpperCAmelCase =max_position_embeddings
@add_start_docstrings(UpperCAmelCase)
def __call__(self , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase):
'''simple docstring'''
__UpperCAmelCase =input_ids.shape[-1]
__UpperCAmelCase =cur_len >= self.max_length
if self.max_position_embeddings is not None and not is_done and cur_len >= self.max_position_embeddings:
logger.warning_once(
'''This is a friendly reminder - the current text generation call will exceed the model\'s predefined '''
f"""maximum length ({self.max_position_embeddings}). Depending on the model, you may observe """
'''exceptions, performance degradation, or nothing at all.''')
return is_done
class _SCREAMING_SNAKE_CASE ( _lowerCAmelCase ):
def __init__(self , UpperCAmelCase , UpperCAmelCase):
'''simple docstring'''
warnings.warn(
'''The class `MaxNewTokensCriteria` is deprecated. '''
f"""Please use `MaxLengthCriteria(max_length={start_length + max_new_tokens})` """
'''with `max_length = start_length + max_new_tokens` instead.''' , UpperCAmelCase , )
__UpperCAmelCase =start_length
__UpperCAmelCase =max_new_tokens
__UpperCAmelCase =start_length + max_new_tokens
@add_start_docstrings(UpperCAmelCase)
def __call__(self , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase):
'''simple docstring'''
return input_ids.shape[-1] >= self.max_length
class _SCREAMING_SNAKE_CASE ( _lowerCAmelCase ):
def __init__(self , UpperCAmelCase , UpperCAmelCase = None):
'''simple docstring'''
__UpperCAmelCase =max_time
__UpperCAmelCase =time.time() if initial_timestamp is None else initial_timestamp
@add_start_docstrings(UpperCAmelCase)
def __call__(self , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase):
'''simple docstring'''
return time.time() - self.initial_timestamp > self.max_time
class _SCREAMING_SNAKE_CASE ( _lowerCAmelCase ):
@add_start_docstrings(UpperCAmelCase)
def __call__(self , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase):
'''simple docstring'''
return any(criteria(UpperCAmelCase , UpperCAmelCase) for criteria in self)
@property
def A__ (self):
'''simple docstring'''
for stopping_criterium in self:
if isinstance(UpperCAmelCase , UpperCAmelCase):
return stopping_criterium.max_length
elif isinstance(UpperCAmelCase , UpperCAmelCase):
return stopping_criterium.max_length
return None
def SCREAMING_SNAKE_CASE ( snake_case__ , snake_case__ ) -> StoppingCriteriaList:
__UpperCAmelCase =stopping_criteria.max_length
__UpperCAmelCase =deepcopy(snake_case__ )
if stopping_max_length is not None and stopping_max_length != max_length:
warnings.warn('''You set different `max_length` for stopping criteria and `max_length` parameter''' , snake_case__ )
elif stopping_max_length is None:
new_stopping_criteria.append(MaxLengthCriteria(max_length=snake_case__ ) )
return new_stopping_criteria
| 709
|
from typing import List, Optional, Tuple, Union
import PIL
import torch
from torchvision import transforms
from diffusers.pipeline_utils import DiffusionPipeline, ImagePipelineOutput
from diffusers.schedulers import DDIMScheduler
from diffusers.utils import randn_tensor
UpperCamelCase_ = transforms.Compose(
[
transforms.Resize((2_5_6, 2_5_6)),
transforms.ToTensor(),
transforms.Normalize([0.5], [0.5]),
]
)
def SCREAMING_SNAKE_CASE ( snake_case__ ) -> Optional[int]:
if isinstance(snake_case__ , torch.Tensor ):
return image
elif isinstance(snake_case__ , PIL.Image.Image ):
__UpperCAmelCase =[image]
__UpperCAmelCase =[trans(img.convert('''RGB''' ) ) for img in image]
__UpperCAmelCase =torch.stack(snake_case__ )
return image
class _SCREAMING_SNAKE_CASE ( _lowerCAmelCase ):
def __init__(self , UpperCAmelCase , UpperCAmelCase):
'''simple docstring'''
super().__init__()
# make sure scheduler can always be converted to DDIM
__UpperCAmelCase =DDIMScheduler.from_config(scheduler.config)
self.register_modules(unet=UpperCAmelCase , scheduler=UpperCAmelCase)
def A__ (self , UpperCAmelCase):
'''simple docstring'''
if strength < 0 or strength > 1:
raise ValueError(f"""The value of strength should in [0.0, 1.0] but is {strength}""")
def A__ (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase):
'''simple docstring'''
__UpperCAmelCase =min(int(num_inference_steps * strength) , UpperCAmelCase)
__UpperCAmelCase =max(num_inference_steps - init_timestep , 0)
__UpperCAmelCase =self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def A__ (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=None):
'''simple docstring'''
if not isinstance(UpperCAmelCase , (torch.Tensor, PIL.Image.Image, list)):
raise ValueError(
f"""`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(UpperCAmelCase)}""")
__UpperCAmelCase =image.to(device=UpperCAmelCase , dtype=UpperCAmelCase)
if isinstance(UpperCAmelCase , UpperCAmelCase) and len(UpperCAmelCase) != batch_size:
raise ValueError(
f"""You have passed a list of generators of length {len(UpperCAmelCase)}, but requested an effective batch"""
f""" size of {batch_size}. Make sure the batch size matches the length of the generators.""")
__UpperCAmelCase =init_latents.shape
__UpperCAmelCase =randn_tensor(UpperCAmelCase , generator=UpperCAmelCase , device=UpperCAmelCase , dtype=UpperCAmelCase)
# get latents
print('''add noise to latents at timestep''' , UpperCAmelCase)
__UpperCAmelCase =self.scheduler.add_noise(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase)
__UpperCAmelCase =init_latents
return latents
@torch.no_grad()
def __call__(self , UpperCAmelCase = None , UpperCAmelCase = 0.8 , UpperCAmelCase = 1 , UpperCAmelCase = None , UpperCAmelCase = 0.0 , UpperCAmelCase = 5_0 , UpperCAmelCase = None , UpperCAmelCase = "pil" , UpperCAmelCase = True , ):
'''simple docstring'''
self.check_inputs(UpperCAmelCase)
# 2. Preprocess image
__UpperCAmelCase =preprocess(UpperCAmelCase)
# 3. set timesteps
self.scheduler.set_timesteps(UpperCAmelCase , device=self.device)
__UpperCAmelCase , __UpperCAmelCase =self.get_timesteps(UpperCAmelCase , UpperCAmelCase , self.device)
__UpperCAmelCase =timesteps[:1].repeat(UpperCAmelCase)
# 4. Prepare latent variables
__UpperCAmelCase =self.prepare_latents(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , self.unet.dtype , self.device , UpperCAmelCase)
__UpperCAmelCase =latents
# 5. Denoising loop
for t in self.progress_bar(UpperCAmelCase):
# 1. predict noise model_output
__UpperCAmelCase =self.unet(UpperCAmelCase , UpperCAmelCase).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
__UpperCAmelCase =self.scheduler.step(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , eta=UpperCAmelCase , use_clipped_model_output=UpperCAmelCase , generator=UpperCAmelCase , ).prev_sample
__UpperCAmelCase =(image / 2 + 0.5).clamp(0 , 1)
__UpperCAmelCase =image.cpu().permute(0 , 2 , 3 , 1).numpy()
if output_type == "pil":
__UpperCAmelCase =self.numpy_to_pil(UpperCAmelCase)
if not return_dict:
return (image, latent_timestep.item())
return ImagePipelineOutput(images=UpperCAmelCase)
| 142
| 0
|
def lowerCamelCase( a__ ,a__ ,a__):
return not any(
neighbour == 1 and colored_vertices[i] == color
for i, neighbour in enumerate(a__))
def lowerCamelCase( a__ ,a__ ,a__ ,a__):
# Base Case
if index == len(a__):
return True
# Recursive Step
for i in range(a__):
if valid_coloring(graph[index] ,a__ ,a__):
# Color current vertex
_SCREAMING_SNAKE_CASE =i
# Validate coloring
if util_color(a__ ,a__ ,a__ ,index + 1):
return True
# Backtrack
_SCREAMING_SNAKE_CASE =-1
return False
def lowerCamelCase( a__ ,a__):
_SCREAMING_SNAKE_CASE =[-1] * len(a__)
if util_color(a__ ,a__ ,a__ ,0):
return colored_vertices
return []
| 691
|
from typing import TYPE_CHECKING
from ....utils import _LazyModule
snake_case_ : Dict = {'''tokenization_tapex''': ['''TapexTokenizer''']}
if TYPE_CHECKING:
from .tokenization_tapex import TapexTokenizer
else:
import sys
snake_case_ : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 691
| 1
|
import math
def lowerCamelCase__ ( __lowerCAmelCase : int ):
"""simple docstring"""
lowerCAmelCase_ = math.loga(math.sqrt(4 * positive_integer + 1 ) / 2 + 1 / 2 )
return exponent == int(__lowerCAmelCase )
def lowerCamelCase__ ( __lowerCAmelCase : float = 1 / 12345 ):
"""simple docstring"""
lowerCAmelCase_ = 0
lowerCAmelCase_ = 0
lowerCAmelCase_ = 3
while True:
lowerCAmelCase_ = (integer**2 - 1) / 4
# if candidate is an integer, then there is a partition for k
if partition_candidate == int(__lowerCAmelCase ):
lowerCAmelCase_ = int(__lowerCAmelCase )
total_partitions += 1
if check_partition_perfect(__lowerCAmelCase ):
perfect_partitions += 1
if perfect_partitions > 0:
if perfect_partitions / total_partitions < max_proportion:
return int(__lowerCAmelCase )
integer += 1
if __name__ == "__main__":
print(f"""{solution() = }""")
| 704
|
import math
class _lowerCAmelCase :
def __init__( self , _UpperCamelCase=0 ) -> Tuple: # a graph with Node 0,1,...,N-1
lowerCAmelCase_ = n
lowerCAmelCase_ = [
[math.inf for j in range(0 , _UpperCamelCase )] for i in range(0 , _UpperCamelCase )
] # adjacency matrix for weight
lowerCAmelCase_ = [
[math.inf for j in range(0 , _UpperCamelCase )] for i in range(0 , _UpperCamelCase )
] # dp[i][j] stores minimum distance from i to j
def __a ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> List[Any]:
lowerCAmelCase_ = w
def __a ( self ) -> List[str]:
for k in range(0 , self.n ):
for i in range(0 , self.n ):
for j in range(0 , self.n ):
lowerCAmelCase_ = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] )
def __a ( self , _UpperCamelCase , _UpperCamelCase ) -> Dict:
return self.dp[u][v]
if __name__ == "__main__":
_A = Graph(5)
graph.add_edge(0, 2, 9)
graph.add_edge(0, 4, 10)
graph.add_edge(1, 3, 5)
graph.add_edge(2, 3, 7)
graph.add_edge(3, 0, 10)
graph.add_edge(3, 1, 2)
graph.add_edge(3, 2, 1)
graph.add_edge(3, 4, 6)
graph.add_edge(4, 1, 3)
graph.add_edge(4, 2, 4)
graph.add_edge(4, 3, 9)
graph.floyd_warshall()
graph.show_min(1, 4)
graph.show_min(0, 3)
| 279
| 0
|
_lowerCAmelCase: Union[str, Any] = [4, 1, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
_lowerCAmelCase: int = [3, 7, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
_lowerCAmelCase: Any = {
0: 'Sunday',
1: 'Monday',
2: 'Tuesday',
3: 'Wednesday',
4: 'Thursday',
5: 'Friday',
6: 'Saturday',
}
def _lowercase( __a : int , __a : int , __a : int ):
assert len(str(__a ) ) > 2, "year should be in YYYY format"
assert 1 <= month <= 12, "month should be between 1 to 12"
assert 1 <= day <= 31, "day should be between 1 to 31"
# Doomsday algorithm:
a__ =year // 100
a__ =(5 * (century % 4) + 2) % 7
a__ =year % 100
a__ =centurian % 12
a__ =(
(centurian // 12) + centurian_m + (centurian_m // 4) + century_anchor
) % 7
a__ =(
DOOMSDAY_NOT_LEAP[month - 1]
if (year % 4 != 0) or (centurian == 0 and (year % 400) == 0)
else DOOMSDAY_LEAP[month - 1]
)
a__ =(dooms_day + day - day_anchor) % 7
return WEEK_DAY_NAMES[week_day]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 20
|
import json
import os
import torch
from diffusers import UNetaDModel
os.makedirs('''hub/hopper-medium-v2/unet/hor32''', exist_ok=True)
os.makedirs('''hub/hopper-medium-v2/unet/hor128''', exist_ok=True)
os.makedirs('''hub/hopper-medium-v2/value_function''', exist_ok=True)
def A ( __UpperCamelCase ) -> Union[str, Any]:
if hor == 128:
A__ = ('DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D')
A__ = (32, 128, 256)
A__ = ('UpResnetBlock1D', 'UpResnetBlock1D')
elif hor == 32:
A__ = ('DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D')
A__ = (32, 64, 128, 256)
A__ = ('UpResnetBlock1D', 'UpResnetBlock1D', 'UpResnetBlock1D')
A__ = torch.load(f'''/Users/bglickenhaus/Documents/diffuser/temporal_unet-hopper-mediumv2-hor{hor}.torch''' )
A__ = model.state_dict()
A__ = {
'down_block_types': down_block_types,
'block_out_channels': block_out_channels,
'up_block_types': up_block_types,
'layers_per_block': 1,
'use_timestep_embedding': True,
'out_block_type': 'OutConv1DBlock',
'norm_num_groups': 8,
'downsample_each_block': False,
'in_channels': 14,
'out_channels': 14,
'extra_in_channels': 0,
'time_embedding_type': 'positional',
'flip_sin_to_cos': False,
'freq_shift': 1,
'sample_size': 65_536,
'mid_block_type': 'MidResTemporalBlock1D',
'act_fn': 'mish',
}
A__ = UNetaDModel(**__UpperCamelCase )
print(f'''length of state dict: {len(state_dict.keys() )}''' )
print(f'''length of value function dict: {len(hf_value_function.state_dict().keys() )}''' )
A__ = dict(zip(model.state_dict().keys() , hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
A__ = state_dict.pop(__UpperCamelCase )
hf_value_function.load_state_dict(__UpperCamelCase )
torch.save(hf_value_function.state_dict() , f'''hub/hopper-medium-v2/unet/hor{hor}/diffusion_pytorch_model.bin''' )
with open(f'''hub/hopper-medium-v2/unet/hor{hor}/config.json''' , 'w' ) as f:
json.dump(__UpperCamelCase , __UpperCamelCase )
def A ( ) -> List[str]:
A__ = {
'in_channels': 14,
'down_block_types': ('DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D'),
'up_block_types': (),
'out_block_type': 'ValueFunction',
'mid_block_type': 'ValueFunctionMidBlock1D',
'block_out_channels': (32, 64, 128, 256),
'layers_per_block': 1,
'downsample_each_block': True,
'sample_size': 65_536,
'out_channels': 14,
'extra_in_channels': 0,
'time_embedding_type': 'positional',
'use_timestep_embedding': True,
'flip_sin_to_cos': False,
'freq_shift': 1,
'norm_num_groups': 8,
'act_fn': 'mish',
}
A__ = torch.load('/Users/bglickenhaus/Documents/diffuser/value_function-hopper-mediumv2-hor32.torch' )
A__ = model
A__ = UNetaDModel(**__UpperCamelCase )
print(f'''length of state dict: {len(state_dict.keys() )}''' )
print(f'''length of value function dict: {len(hf_value_function.state_dict().keys() )}''' )
A__ = dict(zip(state_dict.keys() , hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
A__ = state_dict.pop(__UpperCamelCase )
hf_value_function.load_state_dict(__UpperCamelCase )
torch.save(hf_value_function.state_dict() , 'hub/hopper-medium-v2/value_function/diffusion_pytorch_model.bin' )
with open('hub/hopper-medium-v2/value_function/config.json' , 'w' ) as f:
json.dump(__UpperCamelCase , __UpperCamelCase )
if __name__ == "__main__":
unet(3_2)
# unet(128)
value_function()
| 9
| 0
|
'''simple docstring'''
import unittest
from transformers import GPTNeoXJapaneseConfig, is_torch_available
from transformers.models.gpt_neox_japanese.tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import GPTNeoXJapaneseForCausalLM, GPTNeoXJapaneseModel
class __UpperCAmelCase :
'''simple docstring'''
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=13 , _UpperCAmelCase=7 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=99 , _UpperCAmelCase=32 , _UpperCAmelCase=5 , _UpperCAmelCase=4 , _UpperCAmelCase=4 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.1 , _UpperCAmelCase=True , _UpperCAmelCase=512 , _UpperCAmelCase=16 , _UpperCAmelCase=2 , _UpperCAmelCase=0.02 , _UpperCAmelCase=3 , _UpperCAmelCase=4 , _UpperCAmelCase=None , ):
UpperCAmelCase__ : List[str] = parent
UpperCAmelCase__ : Tuple = batch_size
UpperCAmelCase__ : Dict = seq_length
UpperCAmelCase__ : Dict = is_training
UpperCAmelCase__ : List[Any] = use_input_mask
UpperCAmelCase__ : List[Any] = use_token_type_ids
UpperCAmelCase__ : List[Any] = use_labels
UpperCAmelCase__ : Optional[Any] = vocab_size
UpperCAmelCase__ : Union[str, Any] = hidden_size
UpperCAmelCase__ : Tuple = num_hidden_layers
UpperCAmelCase__ : Optional[int] = num_attention_heads
UpperCAmelCase__ : Optional[Any] = intermediate_multiple_size
UpperCAmelCase__ : str = hidden_act
UpperCAmelCase__ : List[Any] = hidden_dropout
UpperCAmelCase__ : str = attention_dropout
UpperCAmelCase__ : List[Any] = weight_tying
UpperCAmelCase__ : Any = max_position_embeddings
UpperCAmelCase__ : Tuple = type_vocab_size
UpperCAmelCase__ : Optional[Any] = type_sequence_label_size
UpperCAmelCase__ : Any = initializer_range
UpperCAmelCase__ : Any = num_labels
UpperCAmelCase__ : Optional[int] = num_choices
UpperCAmelCase__ : str = scope
def lowerCamelCase ( self ):
UpperCAmelCase__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase__ : str = None
if self.use_input_mask:
UpperCAmelCase__ : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase__ : Union[str, Any] = None
if self.use_labels:
UpperCAmelCase__ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase__ : Optional[int] = self.get_config()
return config, input_ids, input_mask, token_labels
def lowerCamelCase ( self ):
return GPTNeoXJapaneseConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_multiple_size=self.intermediate_multiple_size , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , weight_tying=self.weight_tying , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_UpperCAmelCase , initializer_range=self.initializer_range , )
def lowerCamelCase ( self ):
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = self.prepare_config_and_inputs()
UpperCAmelCase__ : Any = True
return config, input_ids, input_mask, token_labels
def lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
UpperCAmelCase__ : Optional[Any] = GPTNeoXJapaneseModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
UpperCAmelCase__ : List[str] = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase )
UpperCAmelCase__ : Union[str, Any] = model(_UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
UpperCAmelCase__ : List[Any] = True
UpperCAmelCase__ : Optional[int] = GPTNeoXJapaneseModel(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
UpperCAmelCase__ : Optional[int] = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
UpperCAmelCase__ : Dict = GPTNeoXJapaneseForCausalLM(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
UpperCAmelCase__ : List[Any] = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
UpperCAmelCase__ : str = True
UpperCAmelCase__ : Union[str, Any] = GPTNeoXJapaneseForCausalLM(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
# first forward pass
UpperCAmelCase__ : List[Any] = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , use_cache=_UpperCAmelCase )
UpperCAmelCase__ : Union[str, Any] = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
UpperCAmelCase__ : int = ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCAmelCase__ : Optional[int] = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
UpperCAmelCase__ : int = torch.cat([input_ids, next_tokens] , dim=-1 )
UpperCAmelCase__ : Any = torch.cat([input_mask, next_mask] , dim=-1 )
UpperCAmelCase__ : Optional[Any] = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , output_hidden_states=_UpperCAmelCase )
UpperCAmelCase__ : Any = output_from_no_past['''hidden_states'''][0]
UpperCAmelCase__ : str = model(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , past_key_values=_UpperCAmelCase , output_hidden_states=_UpperCAmelCase , )['''hidden_states'''][0]
# select random slice
UpperCAmelCase__ : Any = ids_tensor((1,) , output_from_past.shape[-1] ).item()
UpperCAmelCase__ : Tuple = output_from_no_past[:, -3:, random_slice_idx].detach()
UpperCAmelCase__ : Dict = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_UpperCAmelCase , _UpperCAmelCase , atol=1E-3 ) )
def lowerCamelCase ( self ):
UpperCAmelCase__ : List[str] = self.prepare_config_and_inputs()
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : int = config_and_inputs
UpperCAmelCase__ : Any = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class __UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = (GPTNeoXJapaneseModel, GPTNeoXJapaneseForCausalLM) if is_torch_available() else ()
SCREAMING_SNAKE_CASE : List[Any] = (GPTNeoXJapaneseForCausalLM,) if is_torch_available() else ()
SCREAMING_SNAKE_CASE : Tuple = (
{"feature-extraction": GPTNeoXJapaneseModel, "text-generation": GPTNeoXJapaneseForCausalLM}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE : Dict = False
SCREAMING_SNAKE_CASE : Dict = False
SCREAMING_SNAKE_CASE : Optional[Any] = False
SCREAMING_SNAKE_CASE : Optional[int] = False
def lowerCamelCase ( self ):
UpperCAmelCase__ : List[str] = GPTNeoXJapaneseModelTester(self )
UpperCAmelCase__ : Optional[int] = ConfigTester(self , config_class=_UpperCAmelCase , hidden_size=37 )
def lowerCamelCase ( self ):
self.config_tester.run_common_tests()
def lowerCamelCase ( self ):
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
def lowerCamelCase ( self ):
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : int = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
def lowerCamelCase ( self ):
# This regression test was failing with PyTorch < 1.3
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_decoder()
UpperCAmelCase__ : Optional[int] = None
self.model_tester.create_and_check_model_as_decoder(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
def lowerCamelCase ( self ):
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
def lowerCamelCase ( self ):
UpperCAmelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*_UpperCAmelCase )
@slow
def lowerCamelCase ( self ):
UpperCAmelCase__ : Tuple = '''abeja/gpt-neox-japanese-2.7b'''
UpperCAmelCase__ : int = ['''データサイエンティストとは、''', '''100年後に必要とされる会社は、''', '''フルリモートの環境で働くために必要なことは、''', '''国境の長いトンネルを抜けると''', '''美味しい日本食といえば、''']
UpperCAmelCase__ : Union[str, Any] = [
'''データサイエンティストとは、データを分析し、ビジネスに役立つ知見を導き出す専門家のことです。''',
'''100年後に必要とされる会社は、「人」が中心の会社です。''',
'''フルリモートの環境で働くために必要なことは、「自分の時間をコントロールする」ことです。''',
'''国境の長いトンネルを抜けると、そこは雪国だった。''',
'''美味しい日本食といえば、やっぱりお寿司ですよね。''',
]
UpperCAmelCase__ : Any = GPTNeoXJapaneseTokenizer.from_pretrained(_UpperCAmelCase )
UpperCAmelCase__ : List[str] = GPTNeoXJapaneseForCausalLM.from_pretrained(_UpperCAmelCase )
UpperCAmelCase__ : int = []
for prompt in prompts:
UpperCAmelCase__ : Optional[Any] = tokenizer(_UpperCAmelCase , return_tensors='''pt''' ).input_ids
UpperCAmelCase__ : Dict = model.generate(_UpperCAmelCase , max_length=50 )
UpperCAmelCase__ : Tuple = tokenizer.batch_decode(_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase )
predicted_outputs += generated_string
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
| 708
|
'''simple docstring'''
import copy
import inspect
import unittest
from transformers import PretrainedConfig, SwiftFormerConfig
from transformers.testing_utils import (
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwiftFormerForImageClassification, SwiftFormerModel
from transformers.models.swiftformer.modeling_swiftformer import SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class __UpperCAmelCase :
'''simple docstring'''
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=13 , _UpperCAmelCase=3 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=224 , _UpperCAmelCase=1000 , _UpperCAmelCase=[3, 3, 6, 4] , _UpperCAmelCase=[48, 56, 112, 220] , ):
UpperCAmelCase__ : List[str] = parent
UpperCAmelCase__ : Union[str, Any] = batch_size
UpperCAmelCase__ : Tuple = num_channels
UpperCAmelCase__ : Tuple = is_training
UpperCAmelCase__ : Union[str, Any] = use_labels
UpperCAmelCase__ : List[Any] = hidden_dropout_prob
UpperCAmelCase__ : Optional[Any] = attention_probs_dropout_prob
UpperCAmelCase__ : Optional[int] = num_labels
UpperCAmelCase__ : Optional[Any] = image_size
UpperCAmelCase__ : int = layer_depths
UpperCAmelCase__ : List[str] = embed_dims
def lowerCamelCase ( self ):
UpperCAmelCase__ : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase__ : Union[str, Any] = None
if self.use_labels:
UpperCAmelCase__ : Tuple = ids_tensor([self.batch_size] , self.num_labels )
UpperCAmelCase__ : Optional[int] = self.get_config()
return config, pixel_values, labels
def lowerCamelCase ( self ):
return SwiftFormerConfig(
depths=self.layer_depths , embed_dims=self.embed_dims , mlp_ratio=4 , downsamples=[True, True, True, True] , hidden_act='''gelu''' , num_labels=self.num_labels , down_patch_size=3 , down_stride=2 , down_pad=1 , drop_rate=0.0 , drop_path_rate=0.0 , use_layer_scale=_UpperCAmelCase , layer_scale_init_value=1E-5 , )
def lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
UpperCAmelCase__ : List[str] = SwiftFormerModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
UpperCAmelCase__ : List[Any] = model(_UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dims[-1], 7, 7) )
def lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
UpperCAmelCase__ : List[str] = self.num_labels
UpperCAmelCase__ : str = SwiftFormerForImageClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
UpperCAmelCase__ : Optional[Any] = model(_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
UpperCAmelCase__ : Optional[Any] = SwiftFormerForImageClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
UpperCAmelCase__ : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase__ : Optional[int] = model(_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase ( self ):
((UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__)) : Tuple = self.prepare_config_and_inputs()
UpperCAmelCase__ : Dict = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = (SwiftFormerModel, SwiftFormerForImageClassification) if is_torch_available() else ()
SCREAMING_SNAKE_CASE : Union[str, Any] = (
{"feature-extraction": SwiftFormerModel, "image-classification": SwiftFormerForImageClassification}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE : int = False
SCREAMING_SNAKE_CASE : int = False
SCREAMING_SNAKE_CASE : Any = False
SCREAMING_SNAKE_CASE : str = False
SCREAMING_SNAKE_CASE : List[Any] = False
def lowerCamelCase ( self ):
UpperCAmelCase__ : Tuple = SwiftFormerModelTester(self )
UpperCAmelCase__ : Any = ConfigTester(
self , config_class=_UpperCAmelCase , has_text_modality=_UpperCAmelCase , hidden_size=37 , num_attention_heads=12 , num_hidden_layers=12 , )
def lowerCamelCase ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason='''SwiftFormer does not use inputs_embeds''' )
def lowerCamelCase ( self ):
pass
def lowerCamelCase ( self ):
UpperCAmelCase__ , UpperCAmelCase__ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ : Optional[Any] = model_class(_UpperCAmelCase )
UpperCAmelCase__ : str = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_UpperCAmelCase , nn.Linear ) )
def lowerCamelCase ( self ):
UpperCAmelCase__ , UpperCAmelCase__ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ : Union[str, Any] = model_class(_UpperCAmelCase )
UpperCAmelCase__ : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase__ : Union[str, Any] = [*signature.parameters.keys()]
UpperCAmelCase__ : Union[str, Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _UpperCAmelCase )
def lowerCamelCase ( self ):
UpperCAmelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def lowerCamelCase ( self ):
UpperCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_UpperCAmelCase )
@slow
def lowerCamelCase ( self ):
for model_name in SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase__ : List[Any] = SwiftFormerModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
@unittest.skip(reason='''SwiftFormer does not output attentions''' )
def lowerCamelCase ( self ):
pass
def lowerCamelCase ( self ):
def check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
UpperCAmelCase__ : str = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
with torch.no_grad():
UpperCAmelCase__ : str = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
UpperCAmelCase__ : Optional[int] = outputs.hidden_states
UpperCAmelCase__ : Optional[Any] = 8
self.assertEqual(len(_UpperCAmelCase ) , _UpperCAmelCase ) # TODO
# SwiftFormer's feature maps are of shape (batch_size, embed_dims, height, width)
# with the width and height being successively divided by 2, after every 2 blocks
for i in range(len(_UpperCAmelCase ) ):
self.assertEqual(
hidden_states[i].shape , torch.Size(
[
self.model_tester.batch_size,
self.model_tester.embed_dims[i // 2],
(self.model_tester.image_size // 4) // 2 ** (i // 2),
(self.model_tester.image_size // 4) // 2 ** (i // 2),
] ) , )
UpperCAmelCase__ , UpperCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ : Optional[Any] = True
check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase__ : int = True
check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
def lowerCamelCase ( self ):
def _config_zero_init(_UpperCAmelCase ):
UpperCAmelCase__ : str = copy.deepcopy(_UpperCAmelCase )
for key in configs_no_init.__dict__.keys():
if "_range" in key or "_std" in key or "initializer_factor" in key or "layer_scale" in key:
setattr(_UpperCAmelCase , _UpperCAmelCase , 1E-10 )
if isinstance(getattr(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) , _UpperCAmelCase ):
UpperCAmelCase__ : Union[str, Any] = _config_zero_init(getattr(_UpperCAmelCase , _UpperCAmelCase ) )
setattr(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
return configs_no_init
UpperCAmelCase__ , UpperCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase__ : Optional[Any] = _config_zero_init(_UpperCAmelCase )
for model_class in self.all_model_classes:
UpperCAmelCase__ : str = model_class(config=_UpperCAmelCase )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9) / 1E9).round().item() , [0.0, 1.0] , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def lowerCamelCase ( self ):
pass
def lowerCAmelCase__ ( ) -> Optional[int]:
UpperCAmelCase__ : Union[str, Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class __UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowerCamelCase ( self ):
return ViTImageProcessor.from_pretrained('''MBZUAI/swiftformer-xs''' ) if is_vision_available() else None
@slow
def lowerCamelCase ( self ):
UpperCAmelCase__ : int = SwiftFormerForImageClassification.from_pretrained('''MBZUAI/swiftformer-xs''' ).to(_UpperCAmelCase )
UpperCAmelCase__ : Tuple = self.default_image_processor
UpperCAmelCase__ : str = prepare_img()
UpperCAmelCase__ : Tuple = image_processor(images=_UpperCAmelCase , return_tensors='''pt''' ).to(_UpperCAmelCase )
# forward pass
with torch.no_grad():
UpperCAmelCase__ : Optional[Any] = model(**_UpperCAmelCase )
# verify the logits
UpperCAmelCase__ : Optional[Any] = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , _UpperCAmelCase )
UpperCAmelCase__ : List[str] = torch.tensor([[-2.1703E00, 2.1107E00, -2.0811E00]] ).to(_UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _UpperCAmelCase , atol=1E-4 ) )
| 599
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.