code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
'''simple docstring'''
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=a )
class lowercase_ ( a ):
'''simple docstring'''
__lowerCAmelCase : str = field(default="language-modeling" , metadata={"include_in_asdict_even_if_is_default": True} )
__lowerCAmelCase : ClassVar[Features] = Features({"text": Value("string" )} )
__lowerCAmelCase : ClassVar[Features] = Features({} )
__lowerCAmelCase : str = "text"
@property
def snake_case_ ( self ) -> Dict[str, str]:
"""simple docstring"""
return {self.text_column: "text"}
| 447
|
'''simple docstring'''
from io import BytesIO
from typing import List, Union
import requests
from ..utils import add_end_docstrings, is_decord_available, is_torch_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_decord_available():
import numpy as np
from decord import VideoReader
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
_a : List[Any] = logging.get_logger(__name__)
@add_end_docstrings(a )
class lowercase_ ( a ):
'''simple docstring'''
def __init__( self , *a_ , **a_ ) -> str:
"""simple docstring"""
super().__init__(*a_ , **a_ )
requires_backends(self , 'decord' )
self.check_model_type(a_ )
def snake_case_ ( self , a_=None , a_=None , a_=None ) -> int:
"""simple docstring"""
UpperCAmelCase = {}
if frame_sampling_rate is not None:
UpperCAmelCase = frame_sampling_rate
if num_frames is not None:
UpperCAmelCase = num_frames
UpperCAmelCase = {}
if top_k is not None:
UpperCAmelCase = top_k
return preprocess_params, {}, postprocess_params
def __call__( self , a_ , **a_ ) -> Union[str, Any]:
"""simple docstring"""
return super().__call__(a_ , **a_ )
def snake_case_ ( self , a_ , a_=None , a_=1 ) -> Tuple:
"""simple docstring"""
if num_frames is None:
UpperCAmelCase = self.model.config.num_frames
if video.startswith('http://' ) or video.startswith('https://' ):
UpperCAmelCase = BytesIO(requests.get(a_ ).content )
UpperCAmelCase = VideoReader(a_ )
videoreader.seek(0 )
UpperCAmelCase = 0
UpperCAmelCase = num_frames * frame_sampling_rate - 1
UpperCAmelCase = np.linspace(a_ , a_ , num=a_ , dtype=np.intaa )
UpperCAmelCase = videoreader.get_batch(a_ ).asnumpy()
UpperCAmelCase = list(a_ )
UpperCAmelCase = self.image_processor(a_ , return_tensors=self.framework )
return model_inputs
def snake_case_ ( self , a_ ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase = self.model(**a_ )
return model_outputs
def snake_case_ ( self , a_ , a_=5 ) -> Union[str, Any]:
"""simple docstring"""
if top_k > self.model.config.num_labels:
UpperCAmelCase = self.model.config.num_labels
if self.framework == "pt":
UpperCAmelCase = model_outputs.logits.softmax(-1 )[0]
UpperCAmelCase , UpperCAmelCase = probs.topk(a_ )
else:
raise ValueError(F'''Unsupported framework: {self.framework}''' )
UpperCAmelCase = scores.tolist()
UpperCAmelCase = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(a_ , a_ )]
| 447
| 1
|
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_funnel import FunnelTokenizer
UpperCamelCase : Dict = logging.get_logger(__name__)
UpperCamelCase : str = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
UpperCamelCase : str = [
"""small""",
"""small-base""",
"""medium""",
"""medium-base""",
"""intermediate""",
"""intermediate-base""",
"""large""",
"""large-base""",
"""xlarge""",
"""xlarge-base""",
]
UpperCamelCase : List[Any] = {
"""vocab_file""": {
"""funnel-transformer/small""": """https://huggingface.co/funnel-transformer/small/resolve/main/vocab.txt""",
"""funnel-transformer/small-base""": """https://huggingface.co/funnel-transformer/small-base/resolve/main/vocab.txt""",
"""funnel-transformer/medium""": """https://huggingface.co/funnel-transformer/medium/resolve/main/vocab.txt""",
"""funnel-transformer/medium-base""": (
"""https://huggingface.co/funnel-transformer/medium-base/resolve/main/vocab.txt"""
),
"""funnel-transformer/intermediate""": (
"""https://huggingface.co/funnel-transformer/intermediate/resolve/main/vocab.txt"""
),
"""funnel-transformer/intermediate-base""": (
"""https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/vocab.txt"""
),
"""funnel-transformer/large""": """https://huggingface.co/funnel-transformer/large/resolve/main/vocab.txt""",
"""funnel-transformer/large-base""": """https://huggingface.co/funnel-transformer/large-base/resolve/main/vocab.txt""",
"""funnel-transformer/xlarge""": """https://huggingface.co/funnel-transformer/xlarge/resolve/main/vocab.txt""",
"""funnel-transformer/xlarge-base""": (
"""https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""funnel-transformer/small""": """https://huggingface.co/funnel-transformer/small/resolve/main/tokenizer.json""",
"""funnel-transformer/small-base""": (
"""https://huggingface.co/funnel-transformer/small-base/resolve/main/tokenizer.json"""
),
"""funnel-transformer/medium""": """https://huggingface.co/funnel-transformer/medium/resolve/main/tokenizer.json""",
"""funnel-transformer/medium-base""": (
"""https://huggingface.co/funnel-transformer/medium-base/resolve/main/tokenizer.json"""
),
"""funnel-transformer/intermediate""": (
"""https://huggingface.co/funnel-transformer/intermediate/resolve/main/tokenizer.json"""
),
"""funnel-transformer/intermediate-base""": (
"""https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/tokenizer.json"""
),
"""funnel-transformer/large""": """https://huggingface.co/funnel-transformer/large/resolve/main/tokenizer.json""",
"""funnel-transformer/large-base""": (
"""https://huggingface.co/funnel-transformer/large-base/resolve/main/tokenizer.json"""
),
"""funnel-transformer/xlarge""": """https://huggingface.co/funnel-transformer/xlarge/resolve/main/tokenizer.json""",
"""funnel-transformer/xlarge-base""": (
"""https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/tokenizer.json"""
),
},
}
UpperCamelCase : List[str] = {f'''funnel-transformer/{name}''': 512 for name in _model_names}
UpperCamelCase : List[Any] = {f'''funnel-transformer/{name}''': {"""do_lower_case""": True} for name in _model_names}
class UpperCamelCase ( a_ ):
"""simple docstring"""
A : int = VOCAB_FILES_NAMES
A : str = PRETRAINED_VOCAB_FILES_MAP
A : Optional[int] = PRETRAINED_INIT_CONFIGURATION
A : Optional[Any] = FunnelTokenizer
A : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A : int = 2
def __init__( self : Union[str, Any] , UpperCAmelCase_ : Optional[Any]=None , UpperCAmelCase_ : Dict=None , UpperCAmelCase_ : Dict=True , UpperCAmelCase_ : List[str]="<unk>" , UpperCAmelCase_ : Union[str, Any]="<sep>" , UpperCAmelCase_ : Union[str, Any]="<pad>" , UpperCAmelCase_ : Tuple="<cls>" , UpperCAmelCase_ : List[str]="<mask>" , UpperCAmelCase_ : Dict="<s>" , UpperCAmelCase_ : Optional[Any]="</s>" , UpperCAmelCase_ : str=True , UpperCAmelCase_ : Any=True , UpperCAmelCase_ : str=None , UpperCAmelCase_ : Optional[int]="##" , **UpperCAmelCase_ : Optional[int] , ):
"""simple docstring"""
super().__init__(
UpperCAmelCase_ , tokenizer_file=UpperCAmelCase_ , do_lower_case=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , sep_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , cls_token=UpperCAmelCase_ , mask_token=UpperCAmelCase_ , bos_token=UpperCAmelCase_ , eos_token=UpperCAmelCase_ , clean_text=UpperCAmelCase_ , tokenize_chinese_chars=UpperCAmelCase_ , strip_accents=UpperCAmelCase_ , wordpieces_prefix=UpperCAmelCase_ , **UpperCAmelCase_ , )
a : str = json.loads(self.backend_tokenizer.normalizer.__getstate__())
if (
normalizer_state.get('lowercase' , UpperCAmelCase_) != do_lower_case
or normalizer_state.get('strip_accents' , UpperCAmelCase_) != strip_accents
or normalizer_state.get('handle_chinese_chars' , UpperCAmelCase_) != tokenize_chinese_chars
):
a : Optional[Any] = getattr(UpperCAmelCase_ , normalizer_state.pop('type'))
a : Any = do_lower_case
a : List[str] = strip_accents
a : List[Any] = tokenize_chinese_chars
a : Dict = normalizer_class(**UpperCAmelCase_)
a : Union[str, Any] = do_lower_case
def SCREAMING_SNAKE_CASE_ ( self : Any , UpperCAmelCase_ : Any , UpperCAmelCase_ : str=None):
"""simple docstring"""
a : Any = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def SCREAMING_SNAKE_CASE_ ( self : Dict , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None):
"""simple docstring"""
a : Any = [self.sep_token_id]
a : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls) * [self.cls_token_type_id] + len(token_ids_a + sep) * [0]
return len(cls) * [self.cls_token_type_id] + len(token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1]
def SCREAMING_SNAKE_CASE_ ( self : int , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[str] = None):
"""simple docstring"""
a : List[str] = self._tokenizer.model.save(UpperCAmelCase_ , name=UpperCAmelCase_)
return tuple(UpperCAmelCase_)
| 610
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCamelCase : Optional[Any] = logging.get_logger(__name__)
UpperCamelCase : Tuple = {
"""hustvl/yolos-small""": """https://huggingface.co/hustvl/yolos-small/resolve/main/config.json""",
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class UpperCamelCase ( a_ ):
"""simple docstring"""
A : Dict = "yolos"
def __init__( self : str , UpperCAmelCase_ : List[Any]=7_6_8 , UpperCAmelCase_ : str=1_2 , UpperCAmelCase_ : Optional[Any]=1_2 , UpperCAmelCase_ : Dict=3_0_7_2 , UpperCAmelCase_ : int="gelu" , UpperCAmelCase_ : int=0.0 , UpperCAmelCase_ : Dict=0.0 , UpperCAmelCase_ : Union[str, Any]=0.02 , UpperCAmelCase_ : List[str]=1e-12 , UpperCAmelCase_ : Union[str, Any]=[5_1_2, 8_6_4] , UpperCAmelCase_ : str=1_6 , UpperCAmelCase_ : Optional[int]=3 , UpperCAmelCase_ : str=True , UpperCAmelCase_ : int=1_0_0 , UpperCAmelCase_ : Union[str, Any]=True , UpperCAmelCase_ : Optional[int]=False , UpperCAmelCase_ : List[Any]=1 , UpperCAmelCase_ : Dict=5 , UpperCAmelCase_ : Union[str, Any]=2 , UpperCAmelCase_ : Dict=5 , UpperCAmelCase_ : str=2 , UpperCAmelCase_ : Union[str, Any]=0.1 , **UpperCAmelCase_ : Optional[Any] , ):
"""simple docstring"""
super().__init__(**UpperCAmelCase_)
a : Union[str, Any] = hidden_size
a : int = num_hidden_layers
a : Dict = num_attention_heads
a : Dict = intermediate_size
a : int = hidden_act
a : Union[str, Any] = hidden_dropout_prob
a : Any = attention_probs_dropout_prob
a : Dict = initializer_range
a : Union[str, Any] = layer_norm_eps
a : str = image_size
a : Any = patch_size
a : Union[str, Any] = num_channels
a : int = qkv_bias
a : Union[str, Any] = num_detection_tokens
a : Optional[int] = use_mid_position_embeddings
a : str = auxiliary_loss
# Hungarian matcher
a : Optional[Any] = class_cost
a : Union[str, Any] = bbox_cost
a : List[str] = giou_cost
# Loss coefficients
a : Any = bbox_loss_coefficient
a : Optional[Any] = giou_loss_coefficient
a : Union[str, Any] = eos_coefficient
class UpperCamelCase ( a_ ):
"""simple docstring"""
A : Optional[int] = version.parse("1.11" )
@property
def SCREAMING_SNAKE_CASE_ ( self : Tuple):
"""simple docstring"""
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
])
@property
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any]):
"""simple docstring"""
return 1e-4
@property
def SCREAMING_SNAKE_CASE_ ( self : int):
"""simple docstring"""
return 1_2
| 610
| 1
|
'''simple docstring'''
from ....configuration_utils import PretrainedConfig
from ....utils import logging
UpperCAmelCase_ : Any = logging.get_logger(__name__)
# TODO: upload to AWS
UpperCAmelCase_ : str = {
"yjernite/retribert-base-uncased": (
"https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/config.json"
),
}
class lowerCAmelCase ( lowerCAmelCase_):
__lowercase : Any = '''retribert'''
def __init__( self , __SCREAMING_SNAKE_CASE=3_0522 , __SCREAMING_SNAKE_CASE=768 , __SCREAMING_SNAKE_CASE=8 , __SCREAMING_SNAKE_CASE=12 , __SCREAMING_SNAKE_CASE=3072 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=512 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=1E-12 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=128 , __SCREAMING_SNAKE_CASE=0 , **__SCREAMING_SNAKE_CASE , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(pad_token_id=_snake_case , **_snake_case )
__snake_case = vocab_size
__snake_case = hidden_size
__snake_case = num_hidden_layers
__snake_case = num_attention_heads
__snake_case = hidden_act
__snake_case = intermediate_size
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = max_position_embeddings
__snake_case = type_vocab_size
__snake_case = initializer_range
__snake_case = layer_norm_eps
__snake_case = share_encoders
__snake_case = projection_dim
| 24
|
def lowercase_ (A : list , A : int = 0 ):
snake_case__ : List[str] = length or len(A )
snake_case__ : Any = False
for i in range(length - 1 ):
if list_data[i] > list_data[i + 1]:
snake_case__ , snake_case__ : List[str] = list_data[i + 1], list_data[i]
snake_case__ : Tuple = True
return list_data if not swapped else bubble_sort(A , length - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 478
| 0
|
'''simple docstring'''
import html
from ...feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from ...utils import is_bsa_available, logging, requires_backends
if is_bsa_available():
import bsa
from bsa import BeautifulSoup
_a : Optional[Any] = logging.get_logger(__name__)
class __A (__magic_name__ ):
def __init__( self , **UpperCamelCase_ ):
requires_backends(self , ["bs4"] )
super().__init__(**UpperCamelCase_ )
def _snake_case ( self , UpperCamelCase_ ):
__UpperCAmelCase : Optional[Any] = []
__UpperCAmelCase : List[str] = []
__UpperCAmelCase : Optional[int] = element if element.name else element.parent
for parent in child.parents: # type: bs4.element.Tag
__UpperCAmelCase : Tuple = parent.find_all(child.name , recursive=UpperCamelCase_ )
xpath_tags.append(child.name )
xpath_subscripts.append(
0 if 1 == len(UpperCamelCase_ ) else next(i for i, s in enumerate(UpperCamelCase_ , 1 ) if s is child ) )
__UpperCAmelCase : Tuple = parent
xpath_tags.reverse()
xpath_subscripts.reverse()
return xpath_tags, xpath_subscripts
def _snake_case ( self , UpperCamelCase_ ):
__UpperCAmelCase : Any = BeautifulSoup(UpperCamelCase_ , "html.parser" )
__UpperCAmelCase : Tuple = []
__UpperCAmelCase : Optional[int] = []
__UpperCAmelCase : Any = []
for element in html_code.descendants:
if type(UpperCamelCase_ ) == bsa.element.NavigableString:
if type(element.parent ) != bsa.element.Tag:
continue
__UpperCAmelCase : Dict = html.unescape(UpperCamelCase_ ).strip()
if not text_in_this_tag:
continue
all_doc_strings.append(UpperCamelCase_ )
__UpperCAmelCase : int = self.xpath_soup(UpperCamelCase_ )
stringaxtag_seq.append(UpperCamelCase_ )
stringaxsubs_seq.append(UpperCamelCase_ )
if len(UpperCamelCase_ ) != len(UpperCamelCase_ ):
raise ValueError("Number of doc strings and xtags does not correspond" )
if len(UpperCamelCase_ ) != len(UpperCamelCase_ ):
raise ValueError("Number of doc strings and xsubs does not correspond" )
return all_doc_strings, stringaxtag_seq, stringaxsubs_seq
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ ):
__UpperCAmelCase : List[Any] = ""
for tagname, subs in zip(UpperCamelCase_ , UpperCamelCase_ ):
xpath += f"""/{tagname}"""
if subs != 0:
xpath += f"""[{subs}]"""
return xpath
def __call__( self , UpperCamelCase_ ):
__UpperCAmelCase : Union[str, Any] = False
# Check that strings has a valid type
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
__UpperCAmelCase : List[Any] = True
elif isinstance(UpperCamelCase_ , (list, tuple) ):
if len(UpperCamelCase_ ) == 0 or isinstance(html_strings[0] , UpperCamelCase_ ):
__UpperCAmelCase : Union[str, Any] = True
if not valid_strings:
raise ValueError(
"HTML strings must of type `str`, `List[str]` (batch of examples), "
f"""but is of type {type(UpperCamelCase_ )}.""" )
__UpperCAmelCase : Any = bool(isinstance(UpperCamelCase_ , (list, tuple) ) and (isinstance(html_strings[0] , UpperCamelCase_ )) )
if not is_batched:
__UpperCAmelCase : Any = [html_strings]
# Get nodes + xpaths
__UpperCAmelCase : Union[str, Any] = []
__UpperCAmelCase : Optional[int] = []
for html_string in html_strings:
__UpperCAmelCase : Union[str, Any] = self.get_three_from_single(UpperCamelCase_ )
nodes.append(UpperCamelCase_ )
__UpperCAmelCase : Optional[Any] = []
for node, tag_list, sub_list in zip(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
__UpperCAmelCase : Optional[Any] = self.construct_xpath(UpperCamelCase_ , UpperCamelCase_ )
xpath_strings.append(UpperCamelCase_ )
xpaths.append(UpperCamelCase_ )
# return as Dict
__UpperCAmelCase : int = {"nodes": nodes, "xpaths": xpaths}
__UpperCAmelCase : Tuple = BatchFeature(data=UpperCamelCase_ , tensor_type=UpperCamelCase_ )
return encoded_inputs
| 715
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
_a : Dict = {"configuration_reformer": ["REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "ReformerConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Dict = ["ReformerTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : List[Any] = ["ReformerTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : int = [
"REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"ReformerAttention",
"ReformerForMaskedLM",
"ReformerForQuestionAnswering",
"ReformerForSequenceClassification",
"ReformerLayer",
"ReformerModel",
"ReformerModelWithLMHead",
"ReformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer import ReformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer_fast import ReformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_reformer import (
REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ReformerAttention,
ReformerForMaskedLM,
ReformerForQuestionAnswering,
ReformerForSequenceClassification,
ReformerLayer,
ReformerModel,
ReformerModelWithLMHead,
ReformerPreTrainedModel,
)
else:
import sys
_a : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 10
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
a : str = {
'configuration_encodec': [
'ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP',
'EncodecConfig',
],
'feature_extraction_encodec': ['EncodecFeatureExtractor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : List[Any] = [
'ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST',
'EncodecModel',
'EncodecPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_encodec import (
ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP,
EncodecConfig,
)
from .feature_extraction_encodec import EncodecFeatureExtractor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encodec import (
ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST,
EncodecModel,
EncodecPreTrainedModel,
)
else:
import sys
a : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 218
|
import copy
from dataclasses import dataclass
from pathlib import Path
from typing import Dict, Optional, Union
@dataclass
class lowerCamelCase__ :
'''simple docstring'''
snake_case_ =None
snake_case_ =False
snake_case_ =False
snake_case_ =False
snake_case_ =None
snake_case_ =None
snake_case_ =False
snake_case_ =False
snake_case_ =False
snake_case_ =True
snake_case_ =None
snake_case_ =1
snake_case_ =None
snake_case_ =False
snake_case_ =None
snake_case_ =None
def lowerCAmelCase__ (self ) -> "DownloadConfig":
"""simple docstring"""
return self.__class__(**{k: copy.deepcopy(__lowerCamelCase ) for k, v in self.__dict__.items()} )
| 647
| 0
|
import numpy as np
import skfuzzy as fuzz
if __name__ == "__main__":
# Create universe of discourse in Python using linspace ()
UpperCamelCase : int = np.linspace(start=0, stop=75, num=75, endpoint=True, retstep=False)
# Create two fuzzy sets by defining any membership function
# (trapmf(), gbellmf(), gaussmf(), etc).
UpperCamelCase : str = [0, 25, 50]
UpperCamelCase : Any = [25, 50, 75]
UpperCamelCase : Optional[int] = fuzz.membership.trimf(X, abca)
UpperCamelCase : int = fuzz.membership.trimf(X, abca)
# Compute the different operations using inbuilt functions.
UpperCamelCase : List[Any] = np.ones(75)
UpperCamelCase : Dict = np.zeros((75,))
# 1. Union = max(µA(x), µB(x))
UpperCamelCase : Optional[int] = fuzz.fuzzy_or(X, young, X, middle_aged)[1]
# 2. Intersection = min(µA(x), µB(x))
UpperCamelCase : int = fuzz.fuzzy_and(X, young, X, middle_aged)[1]
# 3. Complement (A) = (1- min(µA(x))
UpperCamelCase : Optional[int] = fuzz.fuzzy_not(young)
# 4. Difference (A/B) = min(µA(x),(1- µB(x)))
UpperCamelCase : Optional[int] = fuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1]
# 5. Algebraic Sum = [µA(x) + µB(x) – (µA(x) * µB(x))]
UpperCamelCase : Dict = young + middle_aged - (young * middle_aged)
# 6. Algebraic Product = (µA(x) * µB(x))
UpperCamelCase : Union[str, Any] = young * middle_aged
# 7. Bounded Sum = min[1,(µA(x), µB(x))]
UpperCamelCase : Optional[int] = fuzz.fuzzy_and(X, one, X, young + middle_aged)[1]
# 8. Bounded difference = min[0,(µA(x), µB(x))]
UpperCamelCase : str = fuzz.fuzzy_or(X, zero, X, young - middle_aged)[1]
# max-min composition
# max-product composition
# Plot each set A, set B and each operation result using plot() and subplot().
from matplotlib import pyplot as plt
plt.figure()
plt.subplot(4, 3, 1)
plt.plot(X, young)
plt.title("""Young""")
plt.grid(True)
plt.subplot(4, 3, 2)
plt.plot(X, middle_aged)
plt.title("""Middle aged""")
plt.grid(True)
plt.subplot(4, 3, 3)
plt.plot(X, union)
plt.title("""union""")
plt.grid(True)
plt.subplot(4, 3, 4)
plt.plot(X, intersection)
plt.title("""intersection""")
plt.grid(True)
plt.subplot(4, 3, 5)
plt.plot(X, complement_a)
plt.title("""complement_a""")
plt.grid(True)
plt.subplot(4, 3, 6)
plt.plot(X, difference)
plt.title("""difference a/b""")
plt.grid(True)
plt.subplot(4, 3, 7)
plt.plot(X, alg_sum)
plt.title("""alg_sum""")
plt.grid(True)
plt.subplot(4, 3, 8)
plt.plot(X, alg_product)
plt.title("""alg_product""")
plt.grid(True)
plt.subplot(4, 3, 9)
plt.plot(X, bdd_sum)
plt.title("""bdd_sum""")
plt.grid(True)
plt.subplot(4, 3, 10)
plt.plot(X, bdd_difference)
plt.title("""bdd_difference""")
plt.grid(True)
plt.subplots_adjust(hspace=0.5)
plt.show()
| 151
|
from copy import deepcopy
class A__ :
"""simple docstring"""
def __init__( self : Union[str, Any] , lowerCamelCase__ : list[int] | None = None , lowerCamelCase__ : int | None = None ):
if arr is None and size is not None:
a__ : Union[str, Any] = size
a__ : Optional[Any] = [0] * size
elif arr is not None:
self.init(lowerCamelCase__ )
else:
raise ValueError("Either arr or size must be specified" )
def _UpperCamelCase( self : Optional[int] , lowerCamelCase__ : list[int] ):
a__ : Any = len(lowerCamelCase__ )
a__ : List[Any] = deepcopy(lowerCamelCase__ )
for i in range(1 , self.size ):
a__ : Union[str, Any] = self.next_(lowerCamelCase__ )
if j < self.size:
self.tree[j] += self.tree[i]
def _UpperCamelCase( self : Tuple ):
a__ : List[str] = self.tree[:]
for i in range(self.size - 1 , 0 , -1 ):
a__ : Optional[Any] = self.next_(lowerCamelCase__ )
if j < self.size:
arr[j] -= arr[i]
return arr
@staticmethod
def _UpperCamelCase( lowerCamelCase__ : int ):
return index + (index & (-index))
@staticmethod
def _UpperCamelCase( lowerCamelCase__ : int ):
return index - (index & (-index))
def _UpperCamelCase( self : str , lowerCamelCase__ : int , lowerCamelCase__ : int ):
if index == 0:
self.tree[0] += value
return
while index < self.size:
self.tree[index] += value
a__ : Optional[int] = self.next_(lowerCamelCase__ )
def _UpperCamelCase( self : List[str] , lowerCamelCase__ : int , lowerCamelCase__ : int ):
self.add(lowerCamelCase__ , value - self.get(lowerCamelCase__ ) )
def _UpperCamelCase( self : Union[str, Any] , lowerCamelCase__ : int ):
if right == 0:
return 0
a__ : Tuple = self.tree[0]
right -= 1 # make right inclusive
while right > 0:
result += self.tree[right]
a__ : List[Any] = self.prev(lowerCamelCase__ )
return result
def _UpperCamelCase( self : List[Any] , lowerCamelCase__ : int , lowerCamelCase__ : int ):
return self.prefix(lowerCamelCase__ ) - self.prefix(lowerCamelCase__ )
def _UpperCamelCase( self : Union[str, Any] , lowerCamelCase__ : int ):
return self.query(lowerCamelCase__ , index + 1 )
def _UpperCamelCase( self : int , lowerCamelCase__ : int ):
value -= self.tree[0]
if value < 0:
return -1
a__ : Union[str, Any] = 1 # Largest power of 2 <= size
while j * 2 < self.size:
j *= 2
a__ : Tuple = 0
while j > 0:
if i + j < self.size and self.tree[i + j] <= value:
value -= self.tree[i + j]
i += j
j //= 2
return i
if __name__ == "__main__":
import doctest
doctest.testmod()
| 151
| 1
|
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCamelCase_ : Optional[int] = logging.get_logger(__name__)
UpperCamelCase_ : str = {"""vocab_file""": """spm_char.model"""}
UpperCamelCase_ : Dict = {
"""vocab_file""": {
"""microsoft/speecht5_asr""": """https://huggingface.co/microsoft/speecht5_asr/resolve/main/spm_char.model""",
"""microsoft/speecht5_tts""": """https://huggingface.co/microsoft/speecht5_tts/resolve/main/spm_char.model""",
"""microsoft/speecht5_vc""": """https://huggingface.co/microsoft/speecht5_vc/resolve/main/spm_char.model""",
}
}
UpperCamelCase_ : Dict = {
"""microsoft/speecht5_asr""": 1024,
"""microsoft/speecht5_tts""": 1024,
"""microsoft/speecht5_vc""": 1024,
}
class __lowercase ( __snake_case ):
_A = VOCAB_FILES_NAMES
_A = PRETRAINED_VOCAB_FILES_MAP
_A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_A = ["input_ids", "attention_mask"]
def __init__(self : List[Any] , snake_case : Any , snake_case : Dict="<s>" , snake_case : List[Any]="</s>" , snake_case : int="<unk>" , snake_case : List[Any]="<pad>" , snake_case : Optional[Dict[str, Any]] = None , **snake_case : Optional[int] , ) -> None:
_lowercase : Optional[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=snake_case , eos_token=snake_case , unk_token=snake_case , pad_token=snake_case , sp_model_kwargs=self.sp_model_kwargs , **snake_case , )
_lowercase : Optional[Any] = vocab_file
_lowercase : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(snake_case )
@property
def _a(self : Any ) -> Optional[Any]:
return self.sp_model.get_piece_size()
def _a(self : Dict ) -> Any:
_lowercase : Dict = {self.convert_ids_to_tokens(snake_case ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__(self : str ) -> Dict:
_lowercase : int = self.__dict__.copy()
_lowercase : Dict = None
return state
def __setstate__(self : Optional[Any] , snake_case : Tuple ) -> str:
_lowercase : Dict = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
_lowercase : Optional[Any] = {}
_lowercase : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _a(self : Tuple , snake_case : str ) -> List[str]:
return self.sp_model.encode(snake_case , out_type=snake_case )
def _a(self : Tuple , snake_case : List[str] ) -> Tuple:
return self.sp_model.piece_to_id(snake_case )
def _a(self : Optional[int] , snake_case : Optional[int] ) -> Tuple:
_lowercase : Optional[Any] = self.sp_model.IdToPiece(snake_case )
return token
def _a(self : Union[str, Any] , snake_case : Dict ) -> Tuple:
_lowercase : Union[str, Any] = []
_lowercase : List[Any] = ""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(snake_case ) + token
_lowercase : Any = []
else:
current_sub_tokens.append(snake_case )
out_string += self.sp_model.decode(snake_case )
return out_string.strip()
def _a(self : Dict , snake_case : Union[str, Any] , snake_case : int=None ) -> List[int]:
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def _a(self : str , snake_case : List[int] , snake_case : Optional[List[int]] = None , snake_case : bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case , token_ids_a=snake_case , already_has_special_tokens=snake_case )
_lowercase : Union[str, Any] = [1]
if token_ids_a is None:
return ([0] * len(snake_case )) + suffix_ones
return ([0] * len(snake_case )) + ([0] * len(snake_case )) + suffix_ones
def _a(self : Tuple , snake_case : str , snake_case : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(snake_case ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
_lowercase : int = os.path.join(
snake_case , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , snake_case )
elif not os.path.isfile(self.vocab_file ):
with open(snake_case , "wb" ) as fi:
_lowercase : str = self.sp_model.serialized_model_proto()
fi.write(snake_case )
return (out_vocab_file,)
| 461
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
UpperCamelCase_ : int = {
"""configuration_transfo_xl""": ["""TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TransfoXLConfig"""],
"""tokenization_transfo_xl""": ["""TransfoXLCorpus""", """TransfoXLTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ : Any = [
"""TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""AdaptiveEmbedding""",
"""TransfoXLForSequenceClassification""",
"""TransfoXLLMHeadModel""",
"""TransfoXLModel""",
"""TransfoXLPreTrainedModel""",
"""load_tf_weights_in_transfo_xl""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ : Tuple = [
"""TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFAdaptiveEmbedding""",
"""TFTransfoXLForSequenceClassification""",
"""TFTransfoXLLMHeadModel""",
"""TFTransfoXLMainLayer""",
"""TFTransfoXLModel""",
"""TFTransfoXLPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_transfo_xl import TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, TransfoXLConfig
from .tokenization_transfo_xl import TransfoXLCorpus, TransfoXLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_transfo_xl import (
TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
AdaptiveEmbedding,
TransfoXLForSequenceClassification,
TransfoXLLMHeadModel,
TransfoXLModel,
TransfoXLPreTrainedModel,
load_tf_weights_in_transfo_xl,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_transfo_xl import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFAdaptiveEmbedding,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLMainLayer,
TFTransfoXLModel,
TFTransfoXLPreTrainedModel,
)
else:
import sys
UpperCamelCase_ : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 461
| 1
|
from __future__ import annotations
import unittest
from transformers import LEDConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFLEDForConditionalGeneration, TFLEDModel
@require_tf
class __magic_name__ :
lowerCamelCase__ = LEDConfig
lowerCamelCase__ = {}
lowerCamelCase__ = '''gelu'''
def __init__( self , _a , _a=13 , _a=7 , _a=True , _a=False , _a=99 , _a=32 , _a=2 , _a=4 , _a=37 , _a=0.1 , _a=0.1 , _a=20 , _a=2 , _a=1 , _a=0 , _a=4 , ) -> Tuple:
lowerCAmelCase_ = parent
lowerCAmelCase_ = batch_size
lowerCAmelCase_ = seq_length
lowerCAmelCase_ = is_training
lowerCAmelCase_ = use_labels
lowerCAmelCase_ = vocab_size
lowerCAmelCase_ = hidden_size
lowerCAmelCase_ = num_hidden_layers
lowerCAmelCase_ = num_attention_heads
lowerCAmelCase_ = intermediate_size
lowerCAmelCase_ = hidden_dropout_prob
lowerCAmelCase_ = attention_probs_dropout_prob
lowerCAmelCase_ = max_position_embeddings
lowerCAmelCase_ = eos_token_id
lowerCAmelCase_ = pad_token_id
lowerCAmelCase_ = bos_token_id
lowerCAmelCase_ = attention_window
# `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size
# [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention
# returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1]
# because its local attention only attends to `self.attention_window` and one before and one after
lowerCAmelCase_ = self.attention_window + 2
# because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for
# the `test_attention_outputs` and `test_hidden_states_output` tests
lowerCAmelCase_ = (
self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window
)
def __a ( self ) -> Optional[Any]:
lowerCAmelCase_ = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
lowerCAmelCase_ = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
lowerCAmelCase_ = tf.concat([input_ids, eos_tensor] , axis=1 )
lowerCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase_ = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , attention_window=self.attention_window , **self.config_updates , )
lowerCAmelCase_ = prepare_led_inputs_dict(_a , _a , _a )
lowerCAmelCase_ = tf.concat(
[tf.zeros_like(_a )[:, :-1], tf.ones_like(_a )[:, -1:]] , axis=-1 , )
lowerCAmelCase_ = global_attention_mask
return config, inputs_dict
def __a ( self , _a , _a ) -> Tuple:
lowerCAmelCase_ = TFLEDModel(config=_a ).get_decoder()
lowerCAmelCase_ = inputs_dict["input_ids"]
lowerCAmelCase_ = input_ids[:1, :]
lowerCAmelCase_ = inputs_dict["attention_mask"][:1, :]
lowerCAmelCase_ = 1
# first forward pass
lowerCAmelCase_ = model(_a , attention_mask=_a , use_cache=_a )
lowerCAmelCase_ , lowerCAmelCase_ = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
lowerCAmelCase_ = ids_tensor((self.batch_size, 3) , config.vocab_size )
lowerCAmelCase_ = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
lowerCAmelCase_ = tf.concat([input_ids, next_tokens] , axis=-1 )
lowerCAmelCase_ = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
lowerCAmelCase_ = model(_a , attention_mask=_a )[0]
lowerCAmelCase_ = model(_a , attention_mask=_a , past_key_values=_a )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
lowerCAmelCase_ = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
lowerCAmelCase_ = output_from_no_past[:, -3:, random_slice_idx]
lowerCAmelCase_ = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(_a , _a , rtol=1E-3 )
def A(__a: Union[str, Any] , __a: List[Any] , __a: Union[str, Any] , __a: int=None , __a: int=None , __a: Union[str, Any]=None , __a: Tuple=None , ):
if attention_mask is None:
lowerCAmelCase_ = tf.cast(tf.math.not_equal(__a , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
lowerCAmelCase_ = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
lowerCAmelCase_ = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
lowerCAmelCase_ = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"attention_mask": attention_mask,
"decoder_input_ids": decoder_input_ids,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
}
@require_tf
class __magic_name__ (__lowercase , __lowercase , unittest.TestCase ):
lowerCamelCase__ = (TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else ()
lowerCamelCase__ = (TFLEDForConditionalGeneration,) if is_tf_available() else ()
lowerCamelCase__ = (
{
'''conversational''': TFLEDForConditionalGeneration,
'''feature-extraction''': TFLEDModel,
'''summarization''': TFLEDForConditionalGeneration,
'''text2text-generation''': TFLEDForConditionalGeneration,
'''translation''': TFLEDForConditionalGeneration,
}
if is_tf_available()
else {}
)
lowerCamelCase__ = True
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
def __a ( self ) -> List[str]:
lowerCAmelCase_ = TFLEDModelTester(self )
lowerCAmelCase_ = ConfigTester(self , config_class=_a )
def __a ( self ) -> List[Any]:
self.config_tester.run_common_tests()
def __a ( self ) -> Union[str, Any]:
lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*_a )
def __a ( self ) -> str:
lowerCAmelCase_ , lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase_ = tf.zeros_like(inputs_dict["attention_mask"] )
lowerCAmelCase_ = 2
lowerCAmelCase_ = tf.where(
tf.range(self.model_tester.seq_length )[None, :] < num_global_attn_indices , 1 , inputs_dict["global_attention_mask"] , )
lowerCAmelCase_ = True
lowerCAmelCase_ = self.model_tester.seq_length
lowerCAmelCase_ = self.model_tester.encoder_seq_length
def check_decoder_attentions_output(_a ):
lowerCAmelCase_ = outputs.decoder_attentions
self.assertEqual(len(_a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
def check_encoder_attentions_output(_a ):
lowerCAmelCase_ = [t.numpy() for t in outputs.encoder_attentions]
lowerCAmelCase_ = [t.numpy() for t in outputs.encoder_global_attentions]
self.assertEqual(len(_a ) , self.model_tester.num_hidden_layers )
self.assertEqual(len(_a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
self.assertListEqual(
list(global_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices] , )
for model_class in self.all_model_classes:
lowerCAmelCase_ = True
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = model_class(_a )
lowerCAmelCase_ = model(self._prepare_for_class(_a , _a ) )
lowerCAmelCase_ = len(_a )
self.assertEqual(config.output_hidden_states , _a )
check_encoder_attentions_output(_a )
if self.is_encoder_decoder:
lowerCAmelCase_ = model_class(_a )
lowerCAmelCase_ = model(self._prepare_for_class(_a , _a ) )
self.assertEqual(config.output_hidden_states , _a )
check_decoder_attentions_output(_a )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
lowerCAmelCase_ = True
lowerCAmelCase_ = model_class(_a )
lowerCAmelCase_ = model(self._prepare_for_class(_a , _a ) )
self.assertEqual(config.output_hidden_states , _a )
check_encoder_attentions_output(_a )
# Check attention is always last and order is fine
lowerCAmelCase_ = True
lowerCAmelCase_ = True
lowerCAmelCase_ = model_class(_a )
lowerCAmelCase_ = model(self._prepare_for_class(_a , _a ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(_a ) )
self.assertEqual(model.config.output_hidden_states , _a )
check_encoder_attentions_output(_a )
@unittest.skip("LED keeps using potentially symbolic tensors in conditionals and breaks tracing." )
def __a ( self ) -> Dict:
pass
def __a ( self ) -> Union[str, Any]:
# TODO: Head-masking not yet implement
pass
def A(__a: Union[str, Any] ):
return tf.constant(__a , dtype=tf.intaa )
lowerCamelCase__ = 1e-4
@slow
@require_tf
class __magic_name__ (unittest.TestCase ):
def __a ( self ) -> Any:
lowerCAmelCase_ = TFLEDForConditionalGeneration.from_pretrained("allenai/led-base-16384" ).led
# change to intended input here
lowerCAmelCase_ = _long_tensor([512 * [0, 31414, 232, 328, 740, 1140, 12695, 69]] )
lowerCAmelCase_ = _long_tensor([128 * [0, 31414, 232, 328, 740, 1140, 12695, 69]] )
lowerCAmelCase_ = prepare_led_inputs_dict(model.config , _a , _a )
lowerCAmelCase_ = model(**_a )[0]
lowerCAmelCase_ = (1, 1024, 768)
self.assertEqual(output.shape , _a )
# change to expected output here
lowerCAmelCase_ = tf.convert_to_tensor(
[[2.3_0_5_0, 2.8_2_7_9, 0.6_5_3_1], [-1.8_4_5_7, -0.1_4_5_5, -3.5_6_6_1], [-1.0_1_8_6, 0.4_5_8_6, -2.2_0_4_3]] , )
tf.debugging.assert_near(output[:, :3, :3] , _a , atol=1E-3 )
def __a ( self ) -> List[Any]:
lowerCAmelCase_ = TFLEDForConditionalGeneration.from_pretrained("allenai/led-base-16384" )
# change to intended input here
lowerCAmelCase_ = _long_tensor([512 * [0, 31414, 232, 328, 740, 1140, 12695, 69]] )
lowerCAmelCase_ = _long_tensor([128 * [0, 31414, 232, 328, 740, 1140, 12695, 69]] )
lowerCAmelCase_ = prepare_led_inputs_dict(model.config , _a , _a )
lowerCAmelCase_ = model(**_a )[0]
lowerCAmelCase_ = (1, 1024, model.config.vocab_size)
self.assertEqual(output.shape , _a )
# change to expected output here
lowerCAmelCase_ = tf.convert_to_tensor(
[[3_3.6_5_0_7, 6.4_5_7_2, 1_6.8_0_8_9], [5.8_7_3_9, -2.4_2_3_8, 1_1.2_9_0_2], [-3.2_1_3_9, -4.3_1_4_9, 4.2_7_8_3]] , )
tf.debugging.assert_near(output[:, :3, :3] , _a , atol=1E-3 , rtol=1E-3 )
| 226
|
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_outputs import (
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import logging
from .configuration_regnet import RegNetConfig
lowerCamelCase__ = logging.get_logger(__name__)
# General docstring
lowerCamelCase__ = '''RegNetConfig'''
# Base docstring
lowerCamelCase__ = '''facebook/regnet-y-040'''
lowerCamelCase__ = [1, 10_88, 7, 7]
# Image classification docstring
lowerCamelCase__ = '''facebook/regnet-y-040'''
lowerCamelCase__ = '''tabby, tabby cat'''
lowerCamelCase__ = [
'''facebook/regnet-y-040''',
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class __magic_name__ (nn.Module ):
def __init__( self , _a , _a , _a = 3 , _a = 1 , _a = 1 , _a = "relu" , ) -> int:
super().__init__()
lowerCAmelCase_ = nn.Convad(
_a , _a , kernel_size=_a , stride=_a , padding=kernel_size // 2 , groups=_a , bias=_a , )
lowerCAmelCase_ = nn.BatchNormad(_a )
lowerCAmelCase_ = ACTaFN[activation] if activation is not None else nn.Identity()
def __a ( self , _a ) -> str:
lowerCAmelCase_ = self.convolution(_a )
lowerCAmelCase_ = self.normalization(_a )
lowerCAmelCase_ = self.activation(_a )
return hidden_state
class __magic_name__ (nn.Module ):
def __init__( self , _a ) -> Dict:
super().__init__()
lowerCAmelCase_ = RegNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act )
lowerCAmelCase_ = config.num_channels
def __a ( self , _a ) -> List[str]:
lowerCAmelCase_ = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
"Make sure that the channel dimension of the pixel values match with the one set in the configuration." )
lowerCAmelCase_ = self.embedder(_a )
return hidden_state
class __magic_name__ (nn.Module ):
def __init__( self , _a , _a , _a = 2 ) -> int:
super().__init__()
lowerCAmelCase_ = nn.Convad(_a , _a , kernel_size=1 , stride=_a , bias=_a )
lowerCAmelCase_ = nn.BatchNormad(_a )
def __a ( self , _a ) -> Tensor:
lowerCAmelCase_ = self.convolution(_a )
lowerCAmelCase_ = self.normalization(_a )
return hidden_state
class __magic_name__ (nn.Module ):
def __init__( self , _a , _a ) -> str:
super().__init__()
lowerCAmelCase_ = nn.AdaptiveAvgPoolad((1, 1) )
lowerCAmelCase_ = nn.Sequential(
nn.Convad(_a , _a , kernel_size=1 ) , nn.ReLU() , nn.Convad(_a , _a , kernel_size=1 ) , nn.Sigmoid() , )
def __a ( self , _a ) -> int:
# b c h w -> b c 1 1
lowerCAmelCase_ = self.pooler(_a )
lowerCAmelCase_ = self.attention(_a )
lowerCAmelCase_ = hidden_state * attention
return hidden_state
class __magic_name__ (nn.Module ):
def __init__( self , _a , _a , _a , _a = 1 ) -> Union[str, Any]:
super().__init__()
lowerCAmelCase_ = in_channels != out_channels or stride != 1
lowerCAmelCase_ = max(1 , out_channels // config.groups_width )
lowerCAmelCase_ = (
RegNetShortCut(_a , _a , stride=_a ) if should_apply_shortcut else nn.Identity()
)
lowerCAmelCase_ = nn.Sequential(
RegNetConvLayer(_a , _a , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(_a , _a , stride=_a , groups=_a , activation=config.hidden_act ) , RegNetConvLayer(_a , _a , kernel_size=1 , activation=_a ) , )
lowerCAmelCase_ = ACTaFN[config.hidden_act]
def __a ( self , _a ) -> List[str]:
lowerCAmelCase_ = hidden_state
lowerCAmelCase_ = self.layer(_a )
lowerCAmelCase_ = self.shortcut(_a )
hidden_state += residual
lowerCAmelCase_ = self.activation(_a )
return hidden_state
class __magic_name__ (nn.Module ):
def __init__( self , _a , _a , _a , _a = 1 ) -> int:
super().__init__()
lowerCAmelCase_ = in_channels != out_channels or stride != 1
lowerCAmelCase_ = max(1 , out_channels // config.groups_width )
lowerCAmelCase_ = (
RegNetShortCut(_a , _a , stride=_a ) if should_apply_shortcut else nn.Identity()
)
lowerCAmelCase_ = nn.Sequential(
RegNetConvLayer(_a , _a , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(_a , _a , stride=_a , groups=_a , activation=config.hidden_act ) , RegNetSELayer(_a , reduced_channels=int(round(in_channels / 4 ) ) ) , RegNetConvLayer(_a , _a , kernel_size=1 , activation=_a ) , )
lowerCAmelCase_ = ACTaFN[config.hidden_act]
def __a ( self , _a ) -> Union[str, Any]:
lowerCAmelCase_ = hidden_state
lowerCAmelCase_ = self.layer(_a )
lowerCAmelCase_ = self.shortcut(_a )
hidden_state += residual
lowerCAmelCase_ = self.activation(_a )
return hidden_state
class __magic_name__ (nn.Module ):
def __init__( self , _a , _a , _a , _a = 2 , _a = 2 , ) -> Any:
super().__init__()
lowerCAmelCase_ = RegNetXLayer if config.layer_type == "x" else RegNetYLayer
lowerCAmelCase_ = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(
_a , _a , _a , stride=_a , ) , *[layer(_a , _a , _a ) for _ in range(depth - 1 )] , )
def __a ( self , _a ) -> Tuple:
lowerCAmelCase_ = self.layers(_a )
return hidden_state
class __magic_name__ (nn.Module ):
def __init__( self , _a ) -> Optional[int]:
super().__init__()
lowerCAmelCase_ = nn.ModuleList([] )
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
RegNetStage(
_a , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) )
lowerCAmelCase_ = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(_a , config.depths[1:] ):
self.stages.append(RegNetStage(_a , _a , _a , depth=_a ) )
def __a ( self , _a , _a = False , _a = True ) -> BaseModelOutputWithNoAttention:
lowerCAmelCase_ = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
lowerCAmelCase_ = hidden_states + (hidden_state,)
lowerCAmelCase_ = stage_module(_a )
if output_hidden_states:
lowerCAmelCase_ = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=_a , hidden_states=_a )
class __magic_name__ (__lowercase ):
lowerCamelCase__ = RegNetConfig
lowerCamelCase__ = '''regnet'''
lowerCamelCase__ = '''pixel_values'''
lowerCamelCase__ = True
def __a ( self , _a ) -> str:
if isinstance(_a , nn.Convad ):
nn.init.kaiming_normal_(module.weight , mode="fan_out" , nonlinearity="relu" )
elif isinstance(_a , (nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight , 1 )
nn.init.constant_(module.bias , 0 )
def __a ( self , _a , _a=False ) -> str:
if isinstance(_a , _a ):
lowerCAmelCase_ = value
lowerCamelCase__ = R'''
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
'''
lowerCamelCase__ = R'''
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConvNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.
'''
@add_start_docstrings(
'''The bare RegNet model outputting raw features without any specific head on top.''' , __lowercase , )
# Copied from transformers.models.resnet.modeling_resnet.ResNetModel with RESNET->REGNET,ResNet->RegNet
class __magic_name__ (__lowercase ):
def __init__( self , _a ) -> int:
super().__init__(_a )
lowerCAmelCase_ = config
lowerCAmelCase_ = RegNetEmbeddings(_a )
lowerCAmelCase_ = RegNetEncoder(_a )
lowerCAmelCase_ = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(_a )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=_a , config_class=_CONFIG_FOR_DOC , modality="vision" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def __a ( self , _a , _a = None , _a = None ) -> BaseModelOutputWithPoolingAndNoAttention:
lowerCAmelCase_ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowerCAmelCase_ = return_dict if return_dict is not None else self.config.use_return_dict
lowerCAmelCase_ = self.embedder(_a )
lowerCAmelCase_ = self.encoder(
_a , output_hidden_states=_a , return_dict=_a )
lowerCAmelCase_ = encoder_outputs[0]
lowerCAmelCase_ = self.pooler(_a )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=_a , pooler_output=_a , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
'''
RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
''' , __lowercase , )
# Copied from transformers.models.resnet.modeling_resnet.ResNetForImageClassification with RESNET->REGNET,ResNet->RegNet,resnet->regnet
class __magic_name__ (__lowercase ):
def __init__( self , _a ) -> List[str]:
super().__init__(_a )
lowerCAmelCase_ = config.num_labels
lowerCAmelCase_ = RegNetModel(_a )
# classification head
lowerCAmelCase_ = nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(_a )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=_a , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def __a ( self , _a = None , _a = None , _a = None , _a = None , ) -> ImageClassifierOutputWithNoAttention:
lowerCAmelCase_ = return_dict if return_dict is not None else self.config.use_return_dict
lowerCAmelCase_ = self.regnet(_a , output_hidden_states=_a , return_dict=_a )
lowerCAmelCase_ = outputs.pooler_output if return_dict else outputs[1]
lowerCAmelCase_ = self.classifier(_a )
lowerCAmelCase_ = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
lowerCAmelCase_ = "regression"
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
lowerCAmelCase_ = "single_label_classification"
else:
lowerCAmelCase_ = "multi_label_classification"
if self.config.problem_type == "regression":
lowerCAmelCase_ = MSELoss()
if self.num_labels == 1:
lowerCAmelCase_ = loss_fct(logits.squeeze() , labels.squeeze() )
else:
lowerCAmelCase_ = loss_fct(_a , _a )
elif self.config.problem_type == "single_label_classification":
lowerCAmelCase_ = CrossEntropyLoss()
lowerCAmelCase_ = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
lowerCAmelCase_ = BCEWithLogitsLoss()
lowerCAmelCase_ = loss_fct(_a , _a )
if not return_dict:
lowerCAmelCase_ = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=_a , logits=_a , hidden_states=outputs.hidden_states )
| 226
| 1
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase__ = logging.get_logger(__name__)
class a ( lowerCAmelCase_ ):
_snake_case : List[Any] = 'timm_backbone'
def __init__( self : List[Any] , __lowerCAmelCase : str=None , __lowerCAmelCase : Optional[int]=3 , __lowerCAmelCase : Any=True , __lowerCAmelCase : Optional[int]=True , __lowerCAmelCase : Optional[Any]=None , **__lowerCAmelCase : Optional[Any] , ):
super().__init__(**__lowerCAmelCase )
_UpperCAmelCase = backbone
_UpperCAmelCase = num_channels
_UpperCAmelCase = features_only
_UpperCAmelCase = use_pretrained_backbone
_UpperCAmelCase = True
_UpperCAmelCase = out_indices if out_indices is not None else (-1,)
| 277
|
"""simple docstring"""
from collections.abc import Generator
def __UpperCAmelCase ( ):
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = 0, 1
while True:
_UpperCAmelCase , _UpperCAmelCase = b, a + b
yield b
def __UpperCAmelCase ( lowercase = 10_00 ):
"""simple docstring"""
_UpperCAmelCase = 1
_UpperCAmelCase = fibonacci_generator()
while len(str(next(lowercase ) ) ) < n:
answer += 1
return answer + 1
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 277
| 1
|
import gc
import unittest
from diffusers import FlaxStableDiffusionInpaintPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class A__ ( unittest.TestCase ):
def a__ ( self : int ) -> List[str]:
"""simple docstring"""
super().tearDown()
gc.collect()
def a__ ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-inpaint/init_image.png' )
__lowercase = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png' )
__lowercase = 'xvjiarui/stable-diffusion-2-inpainting'
__lowercase , __lowercase = FlaxStableDiffusionInpaintPipeline.from_pretrained(UpperCAmelCase__ , safety_checker=UpperCAmelCase__ )
__lowercase = 'Face of a yellow cat, high resolution, sitting on a park bench'
__lowercase = jax.random.PRNGKey(0 )
__lowercase = 50
__lowercase = jax.device_count()
__lowercase = num_samples * [prompt]
__lowercase = num_samples * [init_image]
__lowercase = num_samples * [mask_image]
__lowercase , __lowercase , __lowercase = pipeline.prepare_inputs(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
# shard inputs and rng
__lowercase = replicate(UpperCAmelCase__ )
__lowercase = jax.random.split(UpperCAmelCase__ , jax.device_count() )
__lowercase = shard(UpperCAmelCase__ )
__lowercase = shard(UpperCAmelCase__ )
__lowercase = shard(UpperCAmelCase__ )
__lowercase = pipeline(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , jit=UpperCAmelCase__ )
__lowercase = output.images.reshape(UpperCAmelCase__ , 5_12 , 5_12 , 3 )
__lowercase = images[0, 2_53:2_56, 2_53:2_56, -1]
__lowercase = jnp.asarray(jax.device_get(image_slice.flatten() ) )
__lowercase = jnp.array(
[0.3_611_307, 0.37_649_736, 0.3_757_408, 0.38_213_953, 0.39_295_167, 0.3_841_631, 0.41_554_978, 0.4_137_475, 0.4_217_084] )
print(f"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
| 721
|
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
"""google/umt5-small""": """https://huggingface.co/google/umt5-small/resolve/main/config.json""",
# See all umt5 models at https://huggingface.co/models?filter=umt5
}
class A__ ( lowerCAmelCase__ ):
lowerCAmelCase__ : Optional[int] = "umt5"
lowerCAmelCase__ : Tuple = ["past_key_values"]
def __init__( self : str , _UpperCAmelCase : int=25_01_12 , _UpperCAmelCase : Optional[int]=5_12 , _UpperCAmelCase : List[str]=64 , _UpperCAmelCase : Union[str, Any]=10_24 , _UpperCAmelCase : str=8 , _UpperCAmelCase : Tuple=None , _UpperCAmelCase : List[str]=6 , _UpperCAmelCase : str=32 , _UpperCAmelCase : Optional[int]=1_28 , _UpperCAmelCase : List[str]=0.1 , _UpperCAmelCase : str=1e-6 , _UpperCAmelCase : Dict=1.0 , _UpperCAmelCase : str="gated-gelu" , _UpperCAmelCase : str=True , _UpperCAmelCase : Union[str, Any]=True , _UpperCAmelCase : Tuple="T5Tokenizer" , _UpperCAmelCase : Union[str, Any]=True , _UpperCAmelCase : List[str]=0 , _UpperCAmelCase : int=1 , _UpperCAmelCase : List[str]=0 , **_UpperCAmelCase : Union[str, Any] , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(
is_encoder_decoder=_UpperCAmelCase , tokenizer_class=_UpperCAmelCase , tie_word_embeddings=_UpperCAmelCase , pad_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , decoder_start_token_id=_UpperCAmelCase , **_UpperCAmelCase , )
__lowercase = vocab_size
__lowercase = d_model
__lowercase = d_kv
__lowercase = d_ff
__lowercase = num_layers
__lowercase = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
__lowercase = num_heads
__lowercase = relative_attention_num_buckets
__lowercase = relative_attention_max_distance
__lowercase = dropout_rate
__lowercase = layer_norm_epsilon
__lowercase = initializer_factor
__lowercase = feed_forward_proj
__lowercase = use_cache
__lowercase = self.feed_forward_proj.split('-' )
__lowercase = act_info[-1]
__lowercase = act_info[0] == 'gated'
if len(_UpperCAmelCase ) > 1 and act_info[0] != "gated" or len(_UpperCAmelCase ) > 2:
raise ValueError(
f"""`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer."""
'Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '
'\'gated-gelu\' or \'relu\'' )
if feed_forward_proj == "gated-gelu":
__lowercase = 'gelu_new'
@property
def a__ ( self : Tuple ) -> Any:
"""simple docstring"""
return self.d_model
@property
def a__ ( self : List[str] ) -> List[Any]:
"""simple docstring"""
return self.num_heads
@property
def a__ ( self : Union[str, Any] ) -> str:
"""simple docstring"""
return self.num_layers
class A__ ( lowerCAmelCase__ ):
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.inputs
def a__ ( self : str ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
__lowercase = {
'input_ids': {0: 'batch', 1: 'encoder_sequence'},
'attention_mask': {0: 'batch', 1: 'encoder_sequence'},
}
if self.use_past:
__lowercase = 'past_encoder_sequence + sequence'
__lowercase = {0: 'batch'}
__lowercase = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
else:
__lowercase = {0: 'batch', 1: 'decoder_sequence'}
__lowercase = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(_UpperCAmelCase , direction='inputs' )
return common_inputs
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.default_onnx_opset
def a__ ( self : List[str] ) -> int:
"""simple docstring"""
return 13
@property
def a__ ( self : Dict ) -> float:
"""simple docstring"""
return 5e-4
| 688
| 0
|
# This script creates a super tiny model that is useful inside tests, when we just want to test that
# the machinery works, without needing to the check the quality of the outcomes.
#
# This version creates a tiny vocab first, and then a tiny model - so the outcome is truly tiny -
# all files ~60KB. As compared to taking a full-size model, reducing to the minimum its layers and
# emb dimensions, but keeping the full vocab + merges files, leading to ~3MB in total for all files.
# The latter is done by `fsmt-make-super-tiny-model.py`.
#
# It will be used then as "stas/tiny-wmt19-en-ru"
from pathlib import Path
import json
import tempfile
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
_lowerCamelCase = 'tiny-wmt19-en-ru'
# Build
# borrowed from a test
_lowerCamelCase = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'w</w>',
'r</w>',
't</w>',
'lo',
'low',
'er</w>',
'low</w>',
'lowest</w>',
'newer</w>',
'wider</w>',
'<unk>',
]
_lowerCamelCase = dict(zip(vocab, range(len(vocab))))
_lowerCamelCase = ['l o 123', 'lo w 1456', 'e r</w> 1789', '']
with tempfile.TemporaryDirectory() as tmpdirname:
_lowerCamelCase = Path(tmpdirname)
_lowerCamelCase = build_dir / VOCAB_FILES_NAMES['src_vocab_file']
_lowerCamelCase = build_dir / VOCAB_FILES_NAMES['tgt_vocab_file']
_lowerCamelCase = build_dir / VOCAB_FILES_NAMES['merges_file']
with open(src_vocab_file, 'w') as fp:
fp.write(json.dumps(vocab_tokens))
with open(tgt_vocab_file, 'w') as fp:
fp.write(json.dumps(vocab_tokens))
with open(merges_file, 'w') as fp:
fp.write('\n'.join(merges))
_lowerCamelCase = FSMTTokenizer(
langs=['en', 'ru'],
src_vocab_size=len(vocab),
tgt_vocab_size=len(vocab),
src_vocab_file=src_vocab_file,
tgt_vocab_file=tgt_vocab_file,
merges_file=merges_file,
)
_lowerCamelCase = FSMTConfig(
langs=['ru', 'en'],
src_vocab_size=1000,
tgt_vocab_size=1000,
d_model=4,
encoder_layers=1,
decoder_layers=1,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
encoder_attention_heads=1,
decoder_attention_heads=1,
)
_lowerCamelCase = FSMTForConditionalGeneration(config)
print(F'''num of params {tiny_model.num_parameters()}''')
# Test
_lowerCamelCase = tokenizer(['Making tiny model'], return_tensors='pt')
_lowerCamelCase = tiny_model(**batch)
print('test output:', len(outputs.logits[0]))
# Save
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)
print(F'''Generated {mname_tiny}''')
# Upload
# transformers-cli upload tiny-wmt19-en-ru
| 6
|
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import torch
class _lowerCamelCase ( TensorFormatter[Mapping, "torch.Tensor", Mapping] ):
"""simple docstring"""
def __init__( self , UpperCAmelCase=None , **UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
super().__init__(features=UpperCAmelCase )
__snake_case : Optional[int] = torch_tensor_kwargs
import torch # noqa import torch at initialization
def UpperCAmelCase ( self , UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
import torch
if isinstance(UpperCAmelCase , UpperCAmelCase ) and column:
if all(
isinstance(UpperCAmelCase , torch.Tensor ) and x.shape == column[0].shape and x.dtype == column[0].dtype
for x in column ):
return torch.stack(UpperCAmelCase )
return column
def UpperCAmelCase ( self , UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
import torch
if isinstance(UpperCAmelCase , (str, bytes, type(UpperCAmelCase )) ):
return value
elif isinstance(UpperCAmelCase , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
__snake_case : Optional[Any] = {}
if isinstance(UpperCAmelCase , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
__snake_case : List[Any] = {"dtype": torch.intaa}
elif isinstance(UpperCAmelCase , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
__snake_case : Union[str, Any] = {"dtype": torch.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(UpperCAmelCase , PIL.Image.Image ):
__snake_case : Optional[int] = np.asarray(UpperCAmelCase )
return torch.tensor(UpperCAmelCase , **{**default_dtype, **self.torch_tensor_kwargs} )
def UpperCAmelCase ( self , UpperCAmelCase ) -> str:
'''simple docstring'''
import torch
# support for torch, tf, jax etc.
if hasattr(UpperCAmelCase , "__array__" ) and not isinstance(UpperCAmelCase , torch.Tensor ):
__snake_case : List[str] = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(UpperCAmelCase , np.ndarray ):
if data_struct.dtype == object: # torch tensors cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(UpperCAmelCase ) for substruct in data_struct] )
elif isinstance(UpperCAmelCase , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(UpperCAmelCase ) for substruct in data_struct] )
return self._tensorize(UpperCAmelCase )
def UpperCAmelCase ( self , UpperCAmelCase ) -> Tuple:
'''simple docstring'''
return map_nested(self._recursive_tensorize , UpperCAmelCase , map_list=UpperCAmelCase )
def UpperCAmelCase ( self , UpperCAmelCase ) -> Mapping:
'''simple docstring'''
__snake_case : Any = self.numpy_arrow_extractor().extract_row(UpperCAmelCase )
__snake_case : Dict = self.python_features_decoder.decode_row(UpperCAmelCase )
return self.recursive_tensorize(UpperCAmelCase )
def UpperCAmelCase ( self , UpperCAmelCase ) -> "torch.Tensor":
'''simple docstring'''
__snake_case : Optional[Any] = self.numpy_arrow_extractor().extract_column(UpperCAmelCase )
__snake_case : List[str] = self.python_features_decoder.decode_column(UpperCAmelCase , pa_table.column_names[0] )
__snake_case : Union[str, Any] = self.recursive_tensorize(UpperCAmelCase )
__snake_case : Union[str, Any] = self._consolidate(UpperCAmelCase )
return column
def UpperCAmelCase ( self , UpperCAmelCase ) -> Mapping:
'''simple docstring'''
__snake_case : str = self.numpy_arrow_extractor().extract_batch(UpperCAmelCase )
__snake_case : List[str] = self.python_features_decoder.decode_batch(UpperCAmelCase )
__snake_case : Union[str, Any] = self.recursive_tensorize(UpperCAmelCase )
for column_name in batch:
__snake_case : Dict = self._consolidate(batch[column_name] )
return batch
| 243
| 0
|
import warnings
from ...utils import logging
from .image_processing_chinese_clip import ChineseCLIPImageProcessor
__A = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
"""simple docstring"""
def __init__( self: Tuple , *__A: Optional[Any] , **__A: int ) -> Any:
warnings.warn(
'''The class ChineseCLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use ChineseCLIPImageProcessor instead.''' , __A , )
super().__init__(*__A , **__A )
| 716
|
import flax.linen as nn
import jax
import jax.numpy as jnp
class SCREAMING_SNAKE_CASE ( nn.Module ):
"""simple docstring"""
A_ = 42
A_ = jnp.floataa
def __A ( self: Tuple ) -> Tuple:
_A = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self: Dict , __A: Dict ) -> Tuple:
_A ,_A ,_A ,_A = hidden_states.shape
_A = jax.image.resize(
__A , shape=(batch, height * 2, width * 2, channels) , method='''nearest''' , )
_A = self.conv(__A )
return hidden_states
class SCREAMING_SNAKE_CASE ( nn.Module ):
"""simple docstring"""
A_ = 42
A_ = jnp.floataa
def __A ( self: List[str] ) -> Tuple:
_A = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self: Union[str, Any] , __A: List[Any] ) -> Union[str, Any]:
# pad = ((0, 0), (0, 1), (0, 1), (0, 0)) # pad height and width dim
# hidden_states = jnp.pad(hidden_states, pad_width=pad)
_A = self.conv(__A )
return hidden_states
class SCREAMING_SNAKE_CASE ( nn.Module ):
"""simple docstring"""
A_ = 42
A_ = None
A_ = 0.0
A_ = None
A_ = jnp.floataa
def __A ( self: Dict ) -> Dict:
_A = self.in_channels if self.out_channels is None else self.out_channels
_A = nn.GroupNorm(num_groups=32 , epsilon=1e-5 )
_A = nn.Conv(
__A , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
_A = nn.Dense(__A , dtype=self.dtype )
_A = nn.GroupNorm(num_groups=32 , epsilon=1e-5 )
_A = nn.Dropout(self.dropout_prob )
_A = nn.Conv(
__A , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
_A = self.in_channels != out_channels if self.use_nin_shortcut is None else self.use_nin_shortcut
_A = None
if use_nin_shortcut:
_A = nn.Conv(
__A , kernel_size=(1, 1) , strides=(1, 1) , padding='''VALID''' , dtype=self.dtype , )
def __call__( self: Dict , __A: List[Any] , __A: List[Any] , __A: Any=True ) -> List[Any]:
_A = hidden_states
_A = self.norma(__A )
_A = nn.swish(__A )
_A = self.conva(__A )
_A = self.time_emb_proj(nn.swish(__A ) )
_A = jnp.expand_dims(jnp.expand_dims(__A , 1 ) , 1 )
_A = hidden_states + temb
_A = self.norma(__A )
_A = nn.swish(__A )
_A = self.dropout(__A , __A )
_A = self.conva(__A )
if self.conv_shortcut is not None:
_A = self.conv_shortcut(__A )
return hidden_states + residual
| 62
| 0
|
import copy
import fnmatch
import json
import os
import pickle as pkl
import shutil
import sys
import tarfile
import tempfile
from collections import OrderedDict
from contextlib import contextmanager
from functools import partial
from hashlib import shaaaa
from io import BytesIO
from pathlib import Path
from urllib.parse import urlparse
from zipfile import ZipFile, is_zipfile
import cva
import numpy as np
import requests
import wget
from filelock import FileLock
from PIL import Image
from tqdm.auto import tqdm
from yaml import Loader, dump, load
try:
import torch
__lowerCAmelCase : str = True
except ImportError:
__lowerCAmelCase : Optional[Any] = False
try:
from torch.hub import _get_torch_home
__lowerCAmelCase : Dict = _get_torch_home()
except ImportError:
__lowerCAmelCase : Union[str, Any] = os.path.expanduser(
os.getenv("TORCH_HOME", os.path.join(os.getenv("XDG_CACHE_HOME", "~/.cache"), "torch"))
)
__lowerCAmelCase : int = os.path.join(torch_cache_home, "transformers")
__lowerCAmelCase : Optional[int] = "https://cdn.huggingface.co"
__lowerCAmelCase : str = "https://s3.amazonaws.com/models.huggingface.co/bert"
__lowerCAmelCase : Tuple = "/".join(str(Path(__file__).resolve()).split("/")[:-1])
__lowerCAmelCase : Any = os.path.join(PATH, "config.yaml")
__lowerCAmelCase : int = os.path.join(PATH, "attributes.txt")
__lowerCAmelCase : Union[str, Any] = os.path.join(PATH, "objects.txt")
__lowerCAmelCase : List[Any] = os.getenv("PYTORCH_PRETRAINED_BERT_CACHE", default_cache_path)
__lowerCAmelCase : Tuple = os.getenv("PYTORCH_TRANSFORMERS_CACHE", PYTORCH_PRETRAINED_BERT_CACHE)
__lowerCAmelCase : Optional[int] = os.getenv("TRANSFORMERS_CACHE", PYTORCH_TRANSFORMERS_CACHE)
__lowerCAmelCase : Optional[Any] = "pytorch_model.bin"
__lowerCAmelCase : List[str] = "config.yaml"
def UpperCAmelCase_ ( __lowerCAmelCase=OBJECTS , __lowerCAmelCase=ATTRIBUTES ) -> int:
__lowercase : List[Any] = []
with open(__lowerCAmelCase ) as f:
for object in f.readlines():
vg_classes.append(object.split(''',''' )[0].lower().strip() )
__lowercase : Dict = []
with open(__lowerCAmelCase ) as f:
for object in f.readlines():
vg_attrs.append(object.split(''',''' )[0].lower().strip() )
return vg_classes, vg_attrs
def UpperCAmelCase_ ( __lowerCAmelCase ) -> Tuple:
__lowercase : Any = OrderedDict()
with open(__lowerCAmelCase , '''rb''' ) as f:
__lowercase : Optional[Any] = pkl.load(__lowerCAmelCase )['''model''']
for k in copy.deepcopy(list(ckp.keys() ) ):
__lowercase : int = ckp.pop(__lowerCAmelCase )
if isinstance(__lowerCAmelCase , np.ndarray ):
__lowercase : int = torch.tensor(__lowerCAmelCase )
else:
assert isinstance(__lowerCAmelCase , torch.tensor ), type(__lowerCAmelCase )
__lowercase : int = v
return r
class __lowerCAmelCase :
"""simple docstring"""
A__ : Union[str, Any] = {}
def __init__( self : Union[str, Any] , _snake_case : dict , _snake_case : str = "root" , _snake_case : List[Any]=0 ):
__lowercase : Tuple = name
__lowercase : Tuple = level
__lowercase : Tuple = {}
for k, v in dictionary.items():
if v is None:
raise ValueError()
__lowercase : Dict = copy.deepcopy(_snake_case )
__lowercase : Any = copy.deepcopy(_snake_case )
if isinstance(_snake_case , _snake_case ):
__lowercase : List[str] = Config(_snake_case , name=_snake_case , level=level + 1 )
__lowercase : Optional[Any] = v
setattr(self , _snake_case , _snake_case )
__lowercase : Optional[Any] = d
def __repr__( self : str ):
return str(list((self._pointer.keys()) ) )
def __setattr__( self : Optional[Any] , _snake_case : Optional[int] , _snake_case : Union[str, Any] ):
__lowercase : Dict = val
__lowercase : Dict = val
__lowercase : Tuple = key.split('''.''' )
__lowercase : str = len(_snake_case ) - 1
__lowercase : List[Any] = self._pointer
if len(_snake_case ) > 1:
for i, l in enumerate(_snake_case ):
if hasattr(self , _snake_case ) and isinstance(getattr(self , _snake_case ) , _snake_case ):
setattr(getattr(self , _snake_case ) , '''.'''.join(levels[i:] ) , _snake_case )
if l == last_level:
__lowercase : List[str] = val
else:
__lowercase : Optional[int] = pointer[l]
def snake_case_ ( self : Union[str, Any] ):
return self._pointer
def snake_case_ ( self : Optional[Any] , _snake_case : Optional[int] , _snake_case : Union[str, Any] ):
with open(F'{file_name}' , '''w''' ) as stream:
dump(_snake_case , _snake_case )
def snake_case_ ( self : Dict , _snake_case : int , _snake_case : Dict ):
with open(F'{file_name}' , '''w''' ) as stream:
json.dump(_snake_case , _snake_case )
@staticmethod
def snake_case_ ( _snake_case : int ):
with open(_snake_case ) as stream:
__lowercase : Tuple = load(_snake_case , Loader=_snake_case )
return data
def __str__( self : Optional[int] ):
__lowercase : int = ''' '''
if self._name != "root":
__lowercase : Dict = F'{t * (self._level-1)}{self._name}:\n'
else:
__lowercase : Optional[int] = ''''''
__lowercase : Optional[Any] = self._level
for i, (k, v) in enumerate(self._pointer.items() ):
if isinstance(_snake_case , _snake_case ):
r += F'{t * (self._level)}{v}\n'
self._level += 1
else:
r += F'{t * (self._level)}{k}: {v} ({type(_snake_case ).__name__})\n'
__lowercase : Dict = level
return r[:-1]
@classmethod
def snake_case_ ( cls : Dict , _snake_case : str , **_snake_case : Union[str, Any] ):
__lowercase , __lowercase : Union[str, Any] = cls.get_config_dict(_snake_case , **_snake_case )
return cls(_snake_case )
@classmethod
def snake_case_ ( cls : Union[str, Any] , _snake_case : str , **_snake_case : int ):
__lowercase : Dict = kwargs.pop('''cache_dir''' , _snake_case )
__lowercase : str = kwargs.pop('''force_download''' , _snake_case )
__lowercase : int = kwargs.pop('''resume_download''' , _snake_case )
__lowercase : Any = kwargs.pop('''proxies''' , _snake_case )
__lowercase : Dict = kwargs.pop('''local_files_only''' , _snake_case )
if os.path.isdir(_snake_case ):
__lowercase : List[Any] = os.path.join(_snake_case , _snake_case )
elif os.path.isfile(_snake_case ) or is_remote_url(_snake_case ):
__lowercase : Optional[int] = pretrained_model_name_or_path
else:
__lowercase : Tuple = hf_bucket_url(_snake_case , filename=_snake_case , use_cdn=_snake_case )
try:
# Load from URL or cache if already cached
__lowercase : List[str] = cached_path(
_snake_case , cache_dir=_snake_case , force_download=_snake_case , proxies=_snake_case , resume_download=_snake_case , local_files_only=_snake_case , )
# Load config dict
if resolved_config_file is None:
raise EnvironmentError
__lowercase : int = Config.load_yaml(_snake_case )
except EnvironmentError:
__lowercase : Union[str, Any] = '''Can\'t load config for'''
raise EnvironmentError(_snake_case )
if resolved_config_file == config_file:
print('''loading configuration file from path''' )
else:
print('''loading configuration file cache''' )
return Config.load_yaml(_snake_case ), kwargs
def UpperCAmelCase_ ( __lowerCAmelCase ) -> str:
__lowercase : List[Any] = torch.load('''dump.pt''' , map_location=in_tensor.device )
__lowercase : List[str] = in_tensor.numpy()
__lowercase : List[str] = out_tensor.numpy()[0]
print(na.shape , na[0, 0, :5] )
print(na.shape , na[0, 0, :5] )
assert np.allclose(__lowerCAmelCase , __lowerCAmelCase , rtol=0.01 , atol=0.1 ), (
F'{sum([1 for x in np.isclose(__lowerCAmelCase , __lowerCAmelCase , rtol=0.01 , atol=0.1 ).flatten() if x is False] )/len(na.flatten() )*100:.4f} %'
" element-wise mismatch"
)
raise Exception('''tensors are all good''' )
# Hugging face functions below
def UpperCAmelCase_ ( __lowerCAmelCase ) -> Optional[Any]:
__lowercase : Dict = urlparse(__lowerCAmelCase )
return parsed.scheme in ("http", "https")
def UpperCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=True ) -> str:
__lowercase : List[Any] = CLOUDFRONT_DISTRIB_PREFIX if use_cdn else S3_BUCKET_PREFIX
__lowercase : List[Any] = '''/''' not in model_id
if legacy_format:
return F'{endpoint}/{model_id}-{filename}'
else:
return F'{endpoint}/{model_id}/{filename}'
def UpperCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=0 , __lowerCAmelCase=None , ) -> Any:
__lowercase : Union[str, Any] = '''python/{}'''.format(sys.version.split()[0] )
if _torch_available:
ua += "; torch/{}".format(torch.__version__ )
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
ua += "; " + "; ".join('''{}/{}'''.format(__lowerCAmelCase , __lowerCAmelCase ) for k, v in user_agent.items() )
elif isinstance(__lowerCAmelCase , __lowerCAmelCase ):
ua += "; " + user_agent
__lowercase : Optional[int] = {'''user-agent''': ua}
if resume_size > 0:
__lowercase : Dict = '''bytes=%d-''' % (resume_size,)
__lowercase : Dict = requests.get(__lowerCAmelCase , stream=__lowerCAmelCase , proxies=__lowerCAmelCase , headers=__lowerCAmelCase )
if response.status_code == 416: # Range not satisfiable
return
__lowercase : List[str] = response.headers.get('''Content-Length''' )
__lowercase : Union[str, Any] = resume_size + int(__lowerCAmelCase ) if content_length is not None else None
__lowercase : int = tqdm(
unit='''B''' , unit_scale=__lowerCAmelCase , total=__lowerCAmelCase , initial=__lowerCAmelCase , desc='''Downloading''' , )
for chunk in response.iter_content(chunk_size=1_024 ):
if chunk: # filter out keep-alive new chunks
progress.update(len(__lowerCAmelCase ) )
temp_file.write(__lowerCAmelCase )
progress.close()
def UpperCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=False , __lowerCAmelCase=None , __lowerCAmelCase=10 , __lowerCAmelCase=False , __lowerCAmelCase=None , __lowerCAmelCase=False , ) -> List[Any]:
if cache_dir is None:
__lowercase : Dict = TRANSFORMERS_CACHE
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
__lowercase : Tuple = str(__lowerCAmelCase )
os.makedirs(__lowerCAmelCase , exist_ok=__lowerCAmelCase )
__lowercase : int = None
if not local_files_only:
try:
__lowercase : Optional[int] = requests.head(__lowerCAmelCase , allow_redirects=__lowerCAmelCase , proxies=__lowerCAmelCase , timeout=__lowerCAmelCase )
if response.status_code == 200:
__lowercase : Union[str, Any] = response.headers.get('''ETag''' )
except (EnvironmentError, requests.exceptions.Timeout):
# etag is already None
pass
__lowercase : Any = url_to_filename(__lowerCAmelCase , __lowerCAmelCase )
# get cache path to put the file
__lowercase : Dict = os.path.join(__lowerCAmelCase , __lowerCAmelCase )
# etag is None = we don't have a connection, or url doesn't exist, or is otherwise inaccessible.
# try to get the last downloaded one
if etag is None:
if os.path.exists(__lowerCAmelCase ):
return cache_path
else:
__lowercase : str = [
file
for file in fnmatch.filter(os.listdir(__lowerCAmelCase ) , filename + '''.*''' )
if not file.endswith('''.json''' ) and not file.endswith('''.lock''' )
]
if len(__lowerCAmelCase ) > 0:
return os.path.join(__lowerCAmelCase , matching_files[-1] )
else:
# If files cannot be found and local_files_only=True,
# the models might've been found if local_files_only=False
# Notify the user about that
if local_files_only:
raise ValueError(
'''Cannot find the requested files in the cached path and outgoing traffic has been'''
''' disabled. To enable model look-ups and downloads online, set \'local_files_only\''''
''' to False.''' )
return None
# From now on, etag is not None.
if os.path.exists(__lowerCAmelCase ) and not force_download:
return cache_path
# Prevent parallel downloads of the same file with a lock.
__lowercase : List[Any] = cache_path + '''.lock'''
with FileLock(__lowerCAmelCase ):
# If the download just completed while the lock was activated.
if os.path.exists(__lowerCAmelCase ) and not force_download:
# Even if returning early like here, the lock will be released.
return cache_path
if resume_download:
__lowercase : str = cache_path + '''.incomplete'''
@contextmanager
def _resumable_file_manager():
with open(__lowerCAmelCase , '''a+b''' ) as f:
yield f
__lowercase : List[Any] = _resumable_file_manager
if os.path.exists(__lowerCAmelCase ):
__lowercase : Union[str, Any] = os.stat(__lowerCAmelCase ).st_size
else:
__lowercase : Optional[Any] = 0
else:
__lowercase : List[str] = partial(tempfile.NamedTemporaryFile , dir=__lowerCAmelCase , delete=__lowerCAmelCase )
__lowercase : str = 0
# Download to temporary file, then copy to cache dir once finished.
# Otherwise you get corrupt cache entries if the download gets interrupted.
with temp_file_manager() as temp_file:
print(
'''%s not found in cache or force_download set to True, downloading to %s''' , __lowerCAmelCase , temp_file.name , )
http_get(
__lowerCAmelCase , __lowerCAmelCase , proxies=__lowerCAmelCase , resume_size=__lowerCAmelCase , user_agent=__lowerCAmelCase , )
os.replace(temp_file.name , __lowerCAmelCase )
__lowercase : List[str] = {'''url''': url, '''etag''': etag}
__lowercase : Tuple = cache_path + '''.json'''
with open(__lowerCAmelCase , '''w''' ) as meta_file:
json.dump(__lowerCAmelCase , __lowerCAmelCase )
return cache_path
def UpperCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase=None ) -> List[str]:
__lowercase : str = url.encode('''utf-8''' )
__lowercase : List[Any] = shaaaa(__lowerCAmelCase )
__lowercase : Optional[Any] = url_hash.hexdigest()
if etag:
__lowercase : Dict = etag.encode('''utf-8''' )
__lowercase : Optional[Any] = shaaaa(__lowerCAmelCase )
filename += "." + etag_hash.hexdigest()
if url.endswith('''.h5''' ):
filename += ".h5"
return filename
def UpperCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=False , __lowerCAmelCase=None , __lowerCAmelCase=False , __lowerCAmelCase=None , __lowerCAmelCase=False , __lowerCAmelCase=False , __lowerCAmelCase=False , ) -> Optional[int]:
if cache_dir is None:
__lowercase : List[str] = TRANSFORMERS_CACHE
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
__lowercase : Union[str, Any] = str(__lowerCAmelCase )
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
__lowercase : List[Any] = str(__lowerCAmelCase )
if is_remote_url(__lowerCAmelCase ):
# URL, so get it from the cache (downloading if necessary)
__lowercase : int = get_from_cache(
__lowerCAmelCase , cache_dir=__lowerCAmelCase , force_download=__lowerCAmelCase , proxies=__lowerCAmelCase , resume_download=__lowerCAmelCase , user_agent=__lowerCAmelCase , local_files_only=__lowerCAmelCase , )
elif os.path.exists(__lowerCAmelCase ):
# File, and it exists.
__lowercase : Optional[Any] = url_or_filename
elif urlparse(__lowerCAmelCase ).scheme == "":
# File, but it doesn't exist.
raise EnvironmentError('''file {} not found'''.format(__lowerCAmelCase ) )
else:
# Something unknown
raise ValueError('''unable to parse {} as a URL or as a local path'''.format(__lowerCAmelCase ) )
if extract_compressed_file:
if not is_zipfile(__lowerCAmelCase ) and not tarfile.is_tarfile(__lowerCAmelCase ):
return output_path
# Path where we extract compressed archives
# We avoid '.' in dir name and add "-extracted" at the end: "./model.zip" => "./model-zip-extracted/"
__lowercase , __lowercase : int = os.path.split(__lowerCAmelCase )
__lowercase : Tuple = output_file.replace('''.''' , '''-''' ) + '''-extracted'''
__lowercase : Optional[int] = os.path.join(__lowerCAmelCase , __lowerCAmelCase )
if os.path.isdir(__lowerCAmelCase ) and os.listdir(__lowerCAmelCase ) and not force_extract:
return output_path_extracted
# Prevent parallel extractions
__lowercase : int = output_path + '''.lock'''
with FileLock(__lowerCAmelCase ):
shutil.rmtree(__lowerCAmelCase , ignore_errors=__lowerCAmelCase )
os.makedirs(__lowerCAmelCase )
if is_zipfile(__lowerCAmelCase ):
with ZipFile(__lowerCAmelCase , '''r''' ) as zip_file:
zip_file.extractall(__lowerCAmelCase )
zip_file.close()
elif tarfile.is_tarfile(__lowerCAmelCase ):
__lowercase : Any = tarfile.open(__lowerCAmelCase )
tar_file.extractall(__lowerCAmelCase )
tar_file.close()
else:
raise EnvironmentError('''Archive format of {} could not be identified'''.format(__lowerCAmelCase ) )
return output_path_extracted
return output_path
def UpperCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase="," ) -> int:
assert isinstance(__lowerCAmelCase , __lowerCAmelCase )
if os.path.isfile(__lowerCAmelCase ):
with open(__lowerCAmelCase ) as f:
__lowercase : List[str] = eval(f.read() )
else:
__lowercase : Optional[int] = requests.get(__lowerCAmelCase )
try:
__lowercase : List[str] = requests.json()
except Exception:
__lowercase : List[str] = req.content.decode()
assert data is not None, "could not connect"
try:
__lowercase : str = eval(__lowerCAmelCase )
except Exception:
__lowercase : Tuple = data.split('''\n''' )
req.close()
return data
def UpperCAmelCase_ ( __lowerCAmelCase ) -> Tuple:
__lowercase : List[Any] = requests.get(__lowerCAmelCase )
__lowercase : Dict = np.array(Image.open(BytesIO(response.content ) ) )
return img
def UpperCAmelCase_ ( __lowerCAmelCase ) -> Optional[Any]:
__lowercase : Union[str, Any] = url.split('''/''' )[-1]
if fn not in os.listdir(os.getcwd() ):
wget.download(__lowerCAmelCase )
with open(__lowerCAmelCase , '''rb''' ) as stream:
__lowercase : List[Any] = pkl.load(__lowerCAmelCase )
__lowercase : Tuple = weights.pop('''model''' )
__lowercase : Optional[Any] = {}
for k, v in model.items():
__lowercase : Any = torch.from_numpy(__lowerCAmelCase )
if "running_var" in k:
__lowercase : Optional[int] = torch.tensor([0] )
__lowercase : Any = k.replace('''running_var''' , '''num_batches_tracked''' )
__lowercase : int = zero
return new
def UpperCAmelCase_ ( ) -> List[Any]:
print(F'{os.path.abspath(os.path.join(__lowerCAmelCase , os.pardir ) )}/demo.ipynb' )
def UpperCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase="RGB" ) -> List[str]:
assert isinstance(__lowerCAmelCase , __lowerCAmelCase )
if os.path.isfile(__lowerCAmelCase ):
__lowercase : Any = cva.imread(__lowerCAmelCase )
else:
__lowercase : int = get_image_from_url(__lowerCAmelCase )
assert img is not None, F'could not connect to: {im}'
__lowercase : str = cva.cvtColor(__lowerCAmelCase , cva.COLOR_BGR2RGB )
if input_format == "RGB":
__lowercase : Tuple = img[:, :, ::-1]
return img
def UpperCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase=1 ) -> Tuple:
return (images[i : i + batch] for i in range(0 , len(__lowerCAmelCase ) , __lowerCAmelCase ))
| 509
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowerCAmelCase : str = logging.get_logger(__name__)
__lowerCAmelCase : Union[str, Any] = {
"facebook/data2vec-vision-base-ft": (
"https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json"
),
}
class __lowerCAmelCase ( lowerCAmelCase_ ):
"""simple docstring"""
A__ : Optional[Any] = '''data2vec-vision'''
def __init__( self : str , _snake_case : str=768 , _snake_case : Tuple=12 , _snake_case : Any=12 , _snake_case : Optional[int]=3072 , _snake_case : Tuple="gelu" , _snake_case : Dict=0.0 , _snake_case : Any=0.0 , _snake_case : Tuple=0.02 , _snake_case : List[Any]=1E-1_2 , _snake_case : int=224 , _snake_case : List[str]=16 , _snake_case : List[str]=3 , _snake_case : Optional[int]=False , _snake_case : str=False , _snake_case : Tuple=False , _snake_case : Tuple=False , _snake_case : Any=0.1 , _snake_case : Any=0.1 , _snake_case : List[Any]=True , _snake_case : List[Any]=[3, 5, 7, 11] , _snake_case : List[Any]=[1, 2, 3, 6] , _snake_case : Tuple=True , _snake_case : str=0.4 , _snake_case : Any=256 , _snake_case : Any=1 , _snake_case : str=False , _snake_case : str=255 , **_snake_case : Dict , ):
super().__init__(**_snake_case )
__lowercase : int = hidden_size
__lowercase : Optional[int] = num_hidden_layers
__lowercase : str = num_attention_heads
__lowercase : List[Any] = intermediate_size
__lowercase : Union[str, Any] = hidden_act
__lowercase : Optional[int] = hidden_dropout_prob
__lowercase : Tuple = attention_probs_dropout_prob
__lowercase : Dict = initializer_range
__lowercase : List[str] = layer_norm_eps
__lowercase : str = image_size
__lowercase : List[str] = patch_size
__lowercase : Dict = num_channels
__lowercase : Optional[Any] = use_mask_token
__lowercase : Optional[int] = use_absolute_position_embeddings
__lowercase : Tuple = use_relative_position_bias
__lowercase : Dict = use_shared_relative_position_bias
__lowercase : List[Any] = layer_scale_init_value
__lowercase : Union[str, Any] = drop_path_rate
__lowercase : Tuple = use_mean_pooling
# decode head attributes (semantic segmentation)
__lowercase : List[str] = out_indices
__lowercase : Tuple = pool_scales
# auxiliary head attributes (semantic segmentation)
__lowercase : Dict = use_auxiliary_head
__lowercase : str = auxiliary_loss_weight
__lowercase : Union[str, Any] = auxiliary_channels
__lowercase : Dict = auxiliary_num_convs
__lowercase : Dict = auxiliary_concat_input
__lowercase : Dict = semantic_loss_ignore_index
class __lowerCAmelCase ( lowerCAmelCase_ ):
"""simple docstring"""
A__ : int = version.parse('''1.11''' )
@property
def snake_case_ ( self : str ):
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def snake_case_ ( self : Tuple ):
return 1E-4
| 509
| 1
|
"""simple docstring"""
from __future__ import annotations
def _lowercase ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : list[str] | None = None ) -> list[list[str]]:
'''simple docstring'''
__A : int = word_bank or []
# create a table
__A : int = len(_SCREAMING_SNAKE_CASE ) + 1
__A : list[list[list[str]]] = []
for _ in range(_SCREAMING_SNAKE_CASE ):
table.append([] )
# seed value
__A : Optional[Any] = [[]] # because empty string has empty combination
# iterate through the indices
for i in range(_SCREAMING_SNAKE_CASE ):
# condition
if table[i] != []:
for word in word_bank:
# slice condition
if target[i : i + len(_SCREAMING_SNAKE_CASE )] == word:
__A : list[list[str]] = [
[word, *way] for way in table[i]
]
# adds the word to every combination the current position holds
# now,push that combination to the table[i+len(word)]
table[i + len(_SCREAMING_SNAKE_CASE )] += new_combinations
# combinations are in reverse order so reverse for better output
for combination in table[len(_SCREAMING_SNAKE_CASE )]:
combination.reverse()
return table[len(_SCREAMING_SNAKE_CASE )]
if __name__ == "__main__":
print(all_construct('''jwajalapa''', ['''jwa''', '''j''', '''w''', '''a''', '''la''', '''lapa''']))
print(all_construct('''rajamati''', ['''s''', '''raj''', '''amat''', '''raja''', '''ma''', '''i''', '''t''']))
print(
all_construct(
'''hexagonosaurus''',
['''h''', '''ex''', '''hex''', '''ag''', '''ago''', '''ru''', '''auru''', '''rus''', '''go''', '''no''', '''o''', '''s'''],
)
)
| 237
|
"""simple docstring"""
lowerCamelCase : int =[0, 2, 4, 6, 8]
lowerCamelCase : List[str] =[1, 3, 5, 7, 9]
def _lowercase ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : list[int] , _SCREAMING_SNAKE_CASE : int ) -> int:
'''simple docstring'''
if remaining_length == 0:
if digits[0] == 0 or digits[-1] == 0:
return 0
for i in range(length // 2 - 1 , -1 , -1 ):
remainder += digits[i] + digits[length - i - 1]
if remainder % 2 == 0:
return 0
remainder //= 10
return 1
if remaining_length == 1:
if remainder % 2 == 0:
return 0
__A : Union[str, Any] = 0
for digit in range(10 ):
__A : Dict = digit
result += reversible_numbers(
0 , (remainder + 2 * digit) // 10 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return result
__A : Union[str, Any] = 0
for digita in range(10 ):
__A : Tuple = digita
if (remainder + digita) % 2 == 0:
__A : Union[str, Any] = ODD_DIGITS
else:
__A : Optional[int] = EVEN_DIGITS
for digita in other_parity_digits:
__A : Union[str, Any] = digita
result += reversible_numbers(
remaining_length - 2 , (remainder + digita + digita) // 10 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , )
return result
def _lowercase ( _SCREAMING_SNAKE_CASE : int = 9 ) -> int:
'''simple docstring'''
__A : Tuple = 0
for length in range(1 , max_power + 1 ):
result += reversible_numbers(_SCREAMING_SNAKE_CASE , 0 , [0] * length , _SCREAMING_SNAKE_CASE )
return result
if __name__ == "__main__":
print(F'{solution() = }')
| 237
| 1
|
from ..utils import DummyObject, requires_backends
class _a ( metaclass=snake_case_ ):
"""simple docstring"""
_lowerCamelCase : List[Any] = ['torch', 'torchsde']
def __init__( self : Any , *UpperCAmelCase : int , **UpperCAmelCase : int ):
requires_backends(self , ["torch", "torchsde"] )
@classmethod
def __A ( cls : Optional[int] , *UpperCAmelCase : Any , **UpperCAmelCase : int ):
requires_backends(cls , ["torch", "torchsde"] )
@classmethod
def __A ( cls : int , *UpperCAmelCase : Tuple , **UpperCAmelCase : List[Any] ):
requires_backends(cls , ["torch", "torchsde"] )
| 86
|
def A__ ( snake_case_ : int ):
if upper_limit < 0:
raise ValueError('''Limit for the Catalan sequence must be ≥ 0''' )
SCREAMING_SNAKE_CASE__: List[Any]= [0] * (upper_limit + 1)
# Base case: C(0) = C(1) = 1
SCREAMING_SNAKE_CASE__: List[str]= 1
if upper_limit > 0:
SCREAMING_SNAKE_CASE__: List[str]= 1
# Recurrence relation: C(i) = sum(C(j).C(i-j-1)), from j = 0 to i
for i in range(2 , upper_limit + 1 ):
for j in range(snake_case_ ):
catalan_list[i] += catalan_list[j] * catalan_list[i - j - 1]
return catalan_list
if __name__ == "__main__":
print('\n********* Catalan Numbers Using Dynamic Programming ************\n')
print('\n*** Enter -1 at any time to quit ***')
print('\nEnter the upper limit (≥ 0) for the Catalan number sequence: ', end='')
try:
while True:
lowercase_ : Any = int(input().strip())
if N < 0:
print('\n********* Goodbye!! ************')
break
else:
print(f'''The Catalan numbers from 0 through {N} are:''')
print(catalan_numbers(N))
print('Try another upper limit for the sequence: ', end='')
except (NameError, ValueError):
print('\n********* Invalid input, goodbye! ************\n')
import doctest
doctest.testmod()
| 64
| 0
|
from functools import lru_cache
def A(__a: int ):
lowerCAmelCase_ = 2
lowerCAmelCase_ = set()
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.add(__a )
if n > 1:
factors.add(__a )
return factors
@lru_cache
def A(__a: int ):
return len(unique_prime_factors(__a ) )
def A(__a: list ):
return len(set(__a ) ) in (0, 1)
def A(__a: int ):
lowerCAmelCase_ = 2
while True:
# Increment each value of a generated range
lowerCAmelCase_ = [base + i for i in range(__a )]
# Run elements through out unique_prime_factors function
# Append our target number to the end.
lowerCAmelCase_ = [upf_len(__a ) for x in group]
checker.append(__a )
# If all numbers in the list are equal, return the group variable.
if equality(__a ):
return group
# Increment our base variable by 1
base += 1
def A(__a: int = 4 ):
lowerCAmelCase_ = run(__a )
return results[0] if len(__a ) else None
if __name__ == "__main__":
print(solution())
| 226
|
def A(__a: int ):
lowerCAmelCase_ = abs(__a )
lowerCAmelCase_ = 0
while n > 0:
res += n % 10
n //= 10
return res
def A(__a: int ):
lowerCAmelCase_ = abs(__a )
return n if n < 10 else n % 10 + sum_of_digits(n // 10 )
def A(__a: int ):
return sum(int(__a ) for c in str(abs(__a ) ) )
def A():
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(__a: Callable , __a: int ) -> None:
lowerCAmelCase_ = F"{func.__name__}({value})"
lowerCAmelCase_ = timeit(F"__main__.{call}" , setup="import __main__" )
print(F"{call:56} = {func(__a )} -- {timing:.4f} seconds" )
for value in (26_2144, 1125_8999_0684_2624, 126_7650_6002_2822_9401_4967_0320_5376):
for func in (sum_of_digits, sum_of_digits_recursion, sum_of_digits_compact):
benchmark_a_function(__a , __a )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 226
| 1
|
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
A__: int = logging.get_logger(__name__)
A__: List[Any] = {
'''Salesforce/codegen-350M-nl''': '''https://huggingface.co/Salesforce/codegen-350M-nl/resolve/main/config.json''',
'''Salesforce/codegen-350M-multi''': '''https://huggingface.co/Salesforce/codegen-350M-multi/resolve/main/config.json''',
'''Salesforce/codegen-350M-mono''': '''https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/config.json''',
'''Salesforce/codegen-2B-nl''': '''https://huggingface.co/Salesforce/codegen-2B-nl/resolve/main/config.json''',
'''Salesforce/codegen-2B-multi''': '''https://huggingface.co/Salesforce/codegen-2B-multi/resolve/main/config.json''',
'''Salesforce/codegen-2B-mono''': '''https://huggingface.co/Salesforce/codegen-2B-mono/resolve/main/config.json''',
'''Salesforce/codegen-6B-nl''': '''https://huggingface.co/Salesforce/codegen-6B-nl/resolve/main/config.json''',
'''Salesforce/codegen-6B-multi''': '''https://huggingface.co/Salesforce/codegen-6B-multi/resolve/main/config.json''',
'''Salesforce/codegen-6B-mono''': '''https://huggingface.co/Salesforce/codegen-6B-mono/resolve/main/config.json''',
'''Salesforce/codegen-16B-nl''': '''https://huggingface.co/Salesforce/codegen-16B-nl/resolve/main/config.json''',
'''Salesforce/codegen-16B-multi''': '''https://huggingface.co/Salesforce/codegen-16B-multi/resolve/main/config.json''',
'''Salesforce/codegen-16B-mono''': '''https://huggingface.co/Salesforce/codegen-16B-mono/resolve/main/config.json''',
}
class _a ( __A):
"""simple docstring"""
UpperCamelCase__ = """codegen"""
UpperCamelCase__ = {
"""max_position_embeddings""": """n_positions""",
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self: Any , __lowerCamelCase: Dict=5_0400 , __lowerCamelCase: str=2048 , __lowerCamelCase: Any=2048 , __lowerCamelCase: Tuple=4096 , __lowerCamelCase: List[str]=28 , __lowerCamelCase: str=16 , __lowerCamelCase: Optional[Any]=64 , __lowerCamelCase: str=None , __lowerCamelCase: List[str]="gelu_new" , __lowerCamelCase: Union[str, Any]=0.0 , __lowerCamelCase: Optional[int]=0.0 , __lowerCamelCase: List[Any]=0.0 , __lowerCamelCase: Optional[int]=1e-5 , __lowerCamelCase: int=0.02 , __lowerCamelCase: Dict=True , __lowerCamelCase: str=5_0256 , __lowerCamelCase: int=5_0256 , __lowerCamelCase: Any=False , **__lowerCamelCase: List[str] , ):
'''simple docstring'''
UpperCamelCase__: List[Any] = vocab_size
UpperCamelCase__: int = n_ctx
UpperCamelCase__: Optional[int] = n_positions
UpperCamelCase__: int = n_embd
UpperCamelCase__: str = n_layer
UpperCamelCase__: Dict = n_head
UpperCamelCase__: List[str] = n_inner
UpperCamelCase__: List[Any] = rotary_dim
UpperCamelCase__: Union[str, Any] = activation_function
UpperCamelCase__: str = resid_pdrop
UpperCamelCase__: Optional[int] = embd_pdrop
UpperCamelCase__: Union[str, Any] = attn_pdrop
UpperCamelCase__: Dict = layer_norm_epsilon
UpperCamelCase__: Union[str, Any] = initializer_range
UpperCamelCase__: List[Any] = use_cache
UpperCamelCase__: Tuple = bos_token_id
UpperCamelCase__: str = eos_token_id
super().__init__(
bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , tie_word_embeddings=__lowerCamelCase , **__lowerCamelCase )
class _a ( __A):
"""simple docstring"""
def __init__( self: List[str] , __lowerCamelCase: PretrainedConfig , __lowerCamelCase: str = "default" , __lowerCamelCase: List[PatchingSpec] = None , __lowerCamelCase: bool = False , ):
'''simple docstring'''
super().__init__(__lowerCamelCase , task=__lowerCamelCase , patching_specs=__lowerCamelCase , use_past=__lowerCamelCase )
if not getattr(self._config , "pad_token_id" , __lowerCamelCase ):
# TODO: how to do that better?
UpperCamelCase__: List[Any] = 0
@property
def UpperCAmelCase_ ( self: int ):
'''simple docstring'''
UpperCamelCase__: str = OrderedDict({"input_ids": {0: "batch", 1: "sequence"}} )
if self.use_past:
self.fill_with_past_key_values_(__lowerCamelCase , direction="inputs" )
UpperCamelCase__: Tuple = {0: '''batch''', 1: '''past_sequence + sequence'''}
else:
UpperCamelCase__: Tuple = {0: '''batch''', 1: '''sequence'''}
return common_inputs
@property
def UpperCAmelCase_ ( self: Optional[int] ):
'''simple docstring'''
return self._config.n_layer
@property
def UpperCAmelCase_ ( self: Optional[Any] ):
'''simple docstring'''
return self._config.n_head
def UpperCAmelCase_ ( self: List[str] , __lowerCamelCase: PreTrainedTokenizer , __lowerCamelCase: int = -1 , __lowerCamelCase: int = -1 , __lowerCamelCase: bool = False , __lowerCamelCase: Optional[TensorType] = None , ):
'''simple docstring'''
UpperCamelCase__: Dict = super(__lowerCamelCase , self ).generate_dummy_inputs(
__lowerCamelCase , batch_size=__lowerCamelCase , seq_length=__lowerCamelCase , is_pair=__lowerCamelCase , framework=__lowerCamelCase )
# We need to order the input in the way they appears in the forward()
UpperCamelCase__: List[str] = OrderedDict({"input_ids": common_inputs["input_ids"]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
UpperCamelCase__: Any = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
UpperCamelCase__: Tuple = seqlen + 2
UpperCamelCase__: int = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
UpperCamelCase__: str = [
(torch.zeros(__lowerCamelCase ), torch.zeros(__lowerCamelCase )) for _ in range(self.num_layers )
]
UpperCamelCase__: List[Any] = common_inputs['''attention_mask''']
if self.use_past:
UpperCamelCase__: Any = ordered_inputs['''attention_mask'''].dtype
UpperCamelCase__: Union[str, Any] = torch.cat(
[ordered_inputs["attention_mask"], torch.ones(__lowerCamelCase , __lowerCamelCase , dtype=__lowerCamelCase )] , dim=1 )
return ordered_inputs
@property
def UpperCAmelCase_ ( self: Tuple ):
'''simple docstring'''
return 13
| 380
|
'''simple docstring'''
import json
import os
import unittest
from transformers import BatchEncoding, MvpTokenizer, MvpTokenizerFast
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin, filter_roberta_detectors
@require_tokenizers
class UpperCAmelCase_ ( __A , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ = MvpTokenizer
UpperCamelCase_ = MvpTokenizerFast
UpperCamelCase_ = True
UpperCamelCase_ = filter_roberta_detectors
def A__ ( self : Union[str, Any] ) -> List[str]:
'''simple docstring'''
super().setUp()
lowercase : Dict =[
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
lowercase : Tuple =dict(zip(UpperCAmelCase , range(len(UpperCAmelCase ) ) ) )
lowercase : Optional[Any] =['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
lowercase : List[Any] ={'''unk_token''': '''<unk>'''}
lowercase : Dict =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
lowercase : Dict =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(UpperCAmelCase ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(UpperCAmelCase ) )
def A__ ( self : Union[str, Any] , **UpperCAmelCase : Dict ) -> Tuple:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **UpperCAmelCase )
def A__ ( self : List[str] , **UpperCAmelCase : Any ) -> Tuple:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **UpperCAmelCase )
def A__ ( self : Tuple , UpperCAmelCase : Any ) -> Union[str, Any]:
'''simple docstring'''
return "lower newer", "lower newer"
@cached_property
def A__ ( self : Tuple ) -> List[Any]:
'''simple docstring'''
return MvpTokenizer.from_pretrained('''RUCAIBox/mvp''' )
@cached_property
def A__ ( self : Any ) -> int:
'''simple docstring'''
return MvpTokenizerFast.from_pretrained('''RUCAIBox/mvp''' )
@require_torch
def A__ ( self : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
lowercase : Optional[int] =['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
lowercase : List[str] =[0, 250, 251, 1_7818, 13, 3_9186, 1938, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowercase : Union[str, Any] =tokenizer(UpperCAmelCase , max_length=len(UpperCAmelCase ) , padding=UpperCAmelCase , return_tensors='''pt''' )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
lowercase : Union[str, Any] =batch.input_ids.tolist()[0]
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
# Test that special tokens are reset
@require_torch
def A__ ( self : Tuple ) -> Any:
'''simple docstring'''
lowercase : Any =['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowercase : Dict =tokenizer(UpperCAmelCase , padding=UpperCAmelCase , return_tensors='''pt''' )
# check if input_ids are returned and no labels
self.assertIn('''input_ids''' , UpperCAmelCase )
self.assertIn('''attention_mask''' , UpperCAmelCase )
self.assertNotIn('''labels''' , UpperCAmelCase )
self.assertNotIn('''decoder_attention_mask''' , UpperCAmelCase )
@require_torch
def A__ ( self : Dict ) -> int:
'''simple docstring'''
lowercase : int =[
'''Summary of the text.''',
'''Another summary.''',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowercase : Optional[Any] =tokenizer(text_target=UpperCAmelCase , max_length=32 , padding='''max_length''' , return_tensors='''pt''' )
self.assertEqual(32 , targets['''input_ids'''].shape[1] )
@require_torch
def A__ ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowercase : Union[str, Any] =tokenizer(
['''I am a small frog''' * 1024, '''I am a small frog'''] , padding=UpperCAmelCase , truncation=UpperCAmelCase , return_tensors='''pt''' )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
self.assertEqual(batch.input_ids.shape , (2, 1024) )
@require_torch
def A__ ( self : str ) -> List[Any]:
'''simple docstring'''
lowercase : Optional[Any] =['''A long paragraph for summarization.''']
lowercase : List[Any] =[
'''Summary of the text.''',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowercase : List[str] =tokenizer(UpperCAmelCase , text_target=UpperCAmelCase , return_tensors='''pt''' )
lowercase : Optional[int] =inputs['''input_ids''']
lowercase : Optional[Any] =inputs['''labels''']
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
def A__ ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
pass
def A__ ( self : List[str] ) -> Tuple:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
lowercase : Union[str, Any] =self.rust_tokenizer_class.from_pretrained(UpperCAmelCase , **UpperCAmelCase )
lowercase : Tuple =self.tokenizer_class.from_pretrained(UpperCAmelCase , **UpperCAmelCase )
lowercase : Optional[Any] ='''A, <mask> AllenNLP sentence.'''
lowercase : int =tokenizer_r.encode_plus(UpperCAmelCase , add_special_tokens=UpperCAmelCase , return_token_type_ids=UpperCAmelCase )
lowercase : List[Any] =tokenizer_p.encode_plus(UpperCAmelCase , add_special_tokens=UpperCAmelCase , return_token_type_ids=UpperCAmelCase )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r['''token_type_ids'''] ) , sum(tokens_p['''token_type_ids'''] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r['''attention_mask'''] ) / len(tokens_r['''attention_mask'''] ) , sum(tokens_p['''attention_mask'''] ) / len(tokens_p['''attention_mask'''] ) , )
lowercase : Any =tokenizer_r.convert_ids_to_tokens(tokens_r['''input_ids'''] )
lowercase : str =tokenizer_p.convert_ids_to_tokens(tokens_p['''input_ids'''] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p['''input_ids'''] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] )
self.assertSequenceEqual(tokens_r['''input_ids'''] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] )
self.assertSequenceEqual(
UpperCAmelCase , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
self.assertSequenceEqual(
UpperCAmelCase , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
| 94
| 0
|
from __future__ import annotations
class a__ :
def __init__( self : Optional[int] , lowerCamelCase_ : int = 0 ):
a_ : str = key
def UpperCAmelCase( self : Optional[Any] , lowerCamelCase_ : str , lowerCamelCase_ : int ):
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and isinstance(lowerCamelCase_ , lowerCamelCase_ )
a_ : Tuple = key or self.__key or 1
# make sure key is an appropriate size
key %= 2_5_5
return [chr(ord(lowerCamelCase_ ) ^ key ) for ch in content]
def UpperCAmelCase( self : List[str] , lowerCamelCase_ : str , lowerCamelCase_ : int ):
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and isinstance(lowerCamelCase_ , lowerCamelCase_ )
a_ : str = key or self.__key or 1
# make sure key is an appropriate size
key %= 2_5_5
return [chr(ord(lowerCamelCase_ ) ^ key ) for ch in content]
def UpperCAmelCase( self : List[Any] , lowerCamelCase_ : str , lowerCamelCase_ : int = 0 ):
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and isinstance(lowerCamelCase_ , lowerCamelCase_ )
a_ : Optional[int] = key or self.__key or 1
# make sure key can be any size
while key > 2_5_5:
key -= 2_5_5
# This will be returned
a_ : Optional[int] = """"""
for ch in content:
ans += chr(ord(lowerCamelCase_ ) ^ key )
return ans
def UpperCAmelCase( self : int , lowerCamelCase_ : str , lowerCamelCase_ : int = 0 ):
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and isinstance(lowerCamelCase_ , lowerCamelCase_ )
a_ : List[Any] = key or self.__key or 1
# make sure key can be any size
while key > 2_5_5:
key -= 2_5_5
# This will be returned
a_ : Dict = """"""
for ch in content:
ans += chr(ord(lowerCamelCase_ ) ^ key )
return ans
def UpperCAmelCase( self : int , lowerCamelCase_ : str , lowerCamelCase_ : int = 0 ):
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and isinstance(lowerCamelCase_ , lowerCamelCase_ )
try:
with open(lowerCamelCase_ ) as fin, open("""encrypt.out""" , """w+""" ) as fout:
# actual encrypt-process
for line in fin:
fout.write(self.encrypt_string(lowerCamelCase_ , lowerCamelCase_ ) )
except OSError:
return False
return True
def UpperCAmelCase( self : List[Any] , lowerCamelCase_ : str , lowerCamelCase_ : int ):
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and isinstance(lowerCamelCase_ , lowerCamelCase_ )
try:
with open(lowerCamelCase_ ) as fin, open("""decrypt.out""" , """w+""" ) as fout:
# actual encrypt-process
for line in fin:
fout.write(self.decrypt_string(lowerCamelCase_ , lowerCamelCase_ ) )
except OSError:
return False
return True
# Tests
# crypt = XORCipher()
# key = 67
# # test encrypt
# print(crypt.encrypt("hallo welt",key))
# # test decrypt
# print(crypt.decrypt(crypt.encrypt("hallo welt",key), key))
# # test encrypt_string
# print(crypt.encrypt_string("hallo welt",key))
# # test decrypt_string
# print(crypt.decrypt_string(crypt.encrypt_string("hallo welt",key),key))
# if (crypt.encrypt_file("test.txt",key)):
# print("encrypt successful")
# else:
# print("encrypt unsuccessful")
# if (crypt.decrypt_file("encrypt.out",key)):
# print("decrypt successful")
# else:
# print("decrypt unsuccessful")
| 718
|
from heapq import heappop, heappush
import numpy as np
def _a ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , ):
a_ , a_ : Any = grid.shape
a_ : Dict = [-1, 1, 0, 0]
a_ : List[Any] = [0, 0, -1, 1]
if allow_diagonal:
dx += [-1, -1, 1, 1]
dy += [-1, 1, -1, 1]
a_ , a_ : int = [(0, source)], set()
a_ : Optional[Any] = np.full((rows, cols) , np.inf )
a_ : Tuple = 0
a_ : Optional[Any] = np.empty((rows, cols) , dtype=__UpperCamelCase )
a_ : Optional[Any] = None
while queue:
((a_) , (a_)) : Any = heappop(__UpperCamelCase )
if (x, y) in visited:
continue
visited.add((x, y) )
if (x, y) == destination:
a_ : Tuple = []
while (x, y) != source:
path.append((x, y) )
a_ , a_ : Any = predecessors[x, y]
path.append(__UpperCamelCase ) # add the source manually
path.reverse()
return matrix[destination], path
for i in range(len(__UpperCamelCase ) ):
a_ , a_ : Dict = x + dx[i], y + dy[i]
if 0 <= nx < rows and 0 <= ny < cols:
a_ : List[Any] = grid[nx][ny]
if next_node == 1 and matrix[nx, ny] > dist + 1:
heappush(__UpperCamelCase , (dist + 1, (nx, ny)) )
a_ : List[str] = dist + 1
a_ : Union[str, Any] = (x, y)
return np.inf, []
if __name__ == "__main__":
import doctest
doctest.testmod()
| 478
| 0
|
from typing import List, Union
import numpy as np
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_DEPTH_ESTIMATION_MAPPING
__a :Union[str, Any] = logging.get_logger(__name__)
@add_end_docstrings(snake_case_ )
class _a ( snake_case_ ):
"""simple docstring"""
def __init__( self : List[str] , *UpperCAmelCase : str , **UpperCAmelCase : Optional[int] ):
super().__init__(*UpperCAmelCase , **UpperCAmelCase )
requires_backends(self , "vision" )
self.check_model_type(UpperCAmelCase )
def __call__( self : List[Any] , UpperCAmelCase : Union[str, List[str], "Image.Image", List["Image.Image"]] , **UpperCAmelCase : str ):
return super().__call__(UpperCAmelCase , **UpperCAmelCase )
def __A ( self : int , **UpperCAmelCase : Optional[Any] ):
return {}, {}, {}
def __A ( self : Any , UpperCAmelCase : int ):
A_ = load_image(UpperCAmelCase )
A_ = image.size
A_ = self.image_processor(images=UpperCAmelCase , return_tensors=self.framework )
return model_inputs
def __A ( self : Tuple , UpperCAmelCase : str ):
A_ = self.model(**UpperCAmelCase )
return model_outputs
def __A ( self : List[str] , UpperCAmelCase : Optional[int] ):
A_ = model_outputs.predicted_depth
A_ = torch.nn.functional.interpolate(
predicted_depth.unsqueeze(1 ) , size=self.image_size[::-1] , mode="bicubic" , align_corners=UpperCAmelCase )
A_ = prediction.squeeze().cpu().numpy()
A_ = (output * 255 / np.max(UpperCAmelCase )).astype("uint8" )
A_ = Image.fromarray(UpperCAmelCase )
A_ = {}
A_ = predicted_depth
A_ = depth
return output_dict
| 86
|
"""simple docstring"""
from __future__ import annotations
lowerCamelCase__ : Optional[int] = [True] * 1_00_00_01
lowerCamelCase__ : List[Any] = 2
while i * i <= 1_00_00_00:
if seive[i]:
for j in range(i * i, 1_00_00_01, i):
lowerCamelCase__ : Optional[Any] = False
i += 1
def UpperCamelCase ( _lowerCAmelCase : int ) -> bool:
return seive[n]
def UpperCamelCase ( _lowerCAmelCase : int ) -> bool:
return any(digit in """02468""" for digit in str(_lowerCAmelCase ) )
def UpperCamelCase ( _lowerCAmelCase : int = 1000000 ) -> list[int]:
_UpperCAmelCase : List[Any] = [2] # result already includes the number 2.
for num in range(3, limit + 1, 2 ):
if is_prime(_lowerCAmelCase ) and not contains_an_even_digit(_lowerCAmelCase ):
_UpperCAmelCase : List[Any] = str(_lowerCAmelCase )
_UpperCAmelCase : Optional[int] = [int(str_num[j:] + str_num[:j] ) for j in range(len(_lowerCAmelCase ) )]
if all(is_prime(_lowerCAmelCase ) for i in list_nums ):
result.append(_lowerCAmelCase )
return result
def UpperCamelCase ( ) -> int:
return len(find_circular_primes() )
if __name__ == "__main__":
print(F'''{len(find_circular_primes()) = }''')
| 238
| 0
|
from __future__ import annotations
import pandas as pd
def UpperCamelCase ( snake_case__ : list[int] ,snake_case__ : list[int] ,snake_case__ : int ):
'''simple docstring'''
__snake_case :Optional[int] = [0] * no_of_processes
__snake_case :Tuple = [0] * no_of_processes
# Copy the burst time into remaining_time[]
for i in range(snake_case__ ):
__snake_case :Optional[int] = burst_time[i]
__snake_case :Tuple = 0
__snake_case :List[str] = 0
__snake_case :int = 9_9999_9999
__snake_case :Optional[int] = 0
__snake_case :Optional[int] = False
# Process until all processes are completed
while complete != no_of_processes:
for j in range(snake_case__ ):
if arrival_time[j] <= increment_time and remaining_time[j] > 0:
if remaining_time[j] < minm:
__snake_case :Optional[int] = remaining_time[j]
__snake_case :int = j
__snake_case :str = True
if not check:
increment_time += 1
continue
remaining_time[short] -= 1
__snake_case :str = remaining_time[short]
if minm == 0:
__snake_case :List[str] = 9_9999_9999
if remaining_time[short] == 0:
complete += 1
__snake_case :Optional[Any] = False
# Find finish time of current process
__snake_case :List[Any] = increment_time + 1
# Calculate waiting time
__snake_case :Optional[int] = finish_time - arrival_time[short]
__snake_case :int = finar - burst_time[short]
if waiting_time[short] < 0:
__snake_case :str = 0
# Increment time
increment_time += 1
return waiting_time
def UpperCamelCase ( snake_case__ : list[int] ,snake_case__ : int ,snake_case__ : list[int] ):
'''simple docstring'''
__snake_case :Tuple = [0] * no_of_processes
for i in range(snake_case__ ):
__snake_case :List[Any] = burst_time[i] + waiting_time[i]
return turn_around_time
def UpperCamelCase ( snake_case__ : list[int] ,snake_case__ : list[int] ,snake_case__ : int ):
'''simple docstring'''
__snake_case :Optional[Any] = 0
__snake_case :Dict = 0
for i in range(snake_case__ ):
__snake_case :Optional[int] = total_waiting_time + waiting_time[i]
__snake_case :Optional[int] = total_turn_around_time + turn_around_time[i]
print(f'''Average waiting time = {total_waiting_time / no_of_processes:.5f}''' )
print("""Average turn around time =""" ,total_turn_around_time / no_of_processes )
if __name__ == "__main__":
print("""Enter how many process you want to analyze""")
lowerCamelCase__ = int(input())
lowerCamelCase__ = [0] * no_of_processes
lowerCamelCase__ = [0] * no_of_processes
lowerCamelCase__ = list(range(1, no_of_processes + 1))
for i in range(no_of_processes):
print("""Enter the arrival time and burst time for process:--""" + str(i + 1))
lowerCamelCase__ , lowerCamelCase__ = map(int, input().split())
lowerCamelCase__ = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
lowerCamelCase__ = burst_time
lowerCamelCase__ = no_of_processes
lowerCamelCase__ = waiting_time
lowerCamelCase__ = calculate_turnaroundtime(bt, n, wt)
calculate_average_times(waiting_time, turn_around_time, no_of_processes)
lowerCamelCase__ = pd.DataFrame(
list(zip(processes, burst_time, arrival_time, waiting_time, turn_around_time)),
columns=[
"""Process""",
"""BurstTime""",
"""ArrivalTime""",
"""WaitingTime""",
"""TurnAroundTime""",
],
)
# Printing the dataFrame
pd.set_option("""display.max_rows""", fcfs.shape[0] + 1)
print(fcfs)
| 291
|
from __future__ import annotations
import time
import numpy as np
lowerCamelCase__ = [8, 5, 9, 7]
lowerCamelCase__ = [
[2, 0, 1, 1],
[0, 1, 2, 1],
[4, 0, 0, 3],
[0, 2, 1, 0],
[1, 0, 3, 0],
]
lowerCamelCase__ = [
[3, 2, 1, 4],
[0, 2, 5, 2],
[5, 1, 0, 5],
[1, 5, 3, 0],
[3, 0, 3, 3],
]
class snake_case__ :
'''simple docstring'''
def __init__( self , a__ , a__ , a__ , ) -> None:
'''simple docstring'''
__snake_case :Dict = claim_vector
__snake_case :Optional[int] = allocated_resources_table
__snake_case :Optional[int] = maximum_claim_table
def __lowercase ( self ) -> list[int]:
'''simple docstring'''
return [
sum(p_item[i] for p_item in self.__allocated_resources_table )
for i in range(len(self.__allocated_resources_table[0] ) )
]
def __lowercase ( self ) -> list[int]:
'''simple docstring'''
return np.array(self.__claim_vector ) - np.array(
self.__processes_resource_summation() )
def __lowercase ( self ) -> list[list[int]]:
'''simple docstring'''
return [
list(np.array(self.__maximum_claim_table[i] ) - np.array(a__ ) )
for i, allocated_resource in enumerate(self.__allocated_resources_table )
]
def __lowercase ( self ) -> dict[int, list[int]]:
'''simple docstring'''
return {self.__need().index(a__ ): i for i in self.__need()}
def __lowercase ( self , **a__ ) -> None:
'''simple docstring'''
__snake_case :Optional[int] = self.__need()
__snake_case :List[Any] = self.__allocated_resources_table
__snake_case :str = self.__available_resources()
__snake_case :List[Any] = self.__need_index_manager()
for kw, val in kwargs.items():
if kw and val is True:
self.__pretty_data()
print("""_""" * 50 + """\n""" )
while need_list:
__snake_case :Dict = False
for each_need in need_list:
__snake_case :Dict = True
for index, need in enumerate(a__ ):
if need > available_resources[index]:
__snake_case :Dict = False
break
if execution:
__snake_case :Any = True
# get the original index of the process from ind_ctrl db
for original_need_index, need_clone in need_index_manager.items():
if each_need == need_clone:
__snake_case :List[str] = original_need_index
print(F'''Process {process_number + 1} is executing.''' )
# remove the process run from stack
need_list.remove(a__ )
# update available/freed resources stack
__snake_case :Tuple = np.array(a__ ) + np.array(
alloc_resources_table[process_number] )
print(
"""Updated available resource stack for processes: """
+ """ """.join([str(a__ ) for x in available_resources] ) )
break
if safe:
print("""The process is in a safe state.\n""" )
else:
print("""System in unsafe state. Aborting...\n""" )
break
def __lowercase ( self ) -> Dict:
'''simple docstring'''
print(""" """ * 9 + """Allocated Resource Table""" )
for item in self.__allocated_resources_table:
print(
F'''P{self.__allocated_resources_table.index(a__ ) + 1}'''
+ """ """.join(F'''{it:>8}''' for it in item )
+ """\n""" )
print(""" """ * 9 + """System Resource Table""" )
for item in self.__maximum_claim_table:
print(
F'''P{self.__maximum_claim_table.index(a__ ) + 1}'''
+ """ """.join(F'''{it:>8}''' for it in item )
+ """\n""" )
print(
"""Current Usage by Active Processes: """
+ """ """.join(str(a__ ) for x in self.__claim_vector ) )
print(
"""Initial Available Resources: """
+ """ """.join(str(a__ ) for x in self.__available_resources() ) )
time.sleep(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 291
| 1
|
import argparse
import json
from collections import OrderedDict
from functools import partial
from pathlib import Path
import timm
import torch
from huggingface_hub import hf_hub_download
from transformers import LevitConfig, LevitForImageClassificationWithTeacher, LevitImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__A : Tuple = logging.get_logger()
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = True ) -> Union[str, Any]:
'''simple docstring'''
print(F"""Converting {name}...""" )
with torch.no_grad():
if hidden_sizes == 128:
if name[-1] == "S":
UpperCAmelCase = timm.create_model('''levit_128s''' , pretrained=UpperCamelCase__ )
else:
UpperCAmelCase = timm.create_model('''levit_128''' , pretrained=UpperCamelCase__ )
if hidden_sizes == 192:
UpperCAmelCase = timm.create_model('''levit_192''' , pretrained=UpperCamelCase__ )
if hidden_sizes == 256:
UpperCAmelCase = timm.create_model('''levit_256''' , pretrained=UpperCamelCase__ )
if hidden_sizes == 384:
UpperCAmelCase = timm.create_model('''levit_384''' , pretrained=UpperCamelCase__ )
from_model.eval()
UpperCAmelCase = LevitForImageClassificationWithTeacher(UpperCamelCase__ ).eval()
UpperCAmelCase = OrderedDict()
UpperCAmelCase = from_model.state_dict()
UpperCAmelCase = list(from_model.state_dict().keys() )
UpperCAmelCase = list(our_model.state_dict().keys() )
print(len(UpperCamelCase__ ) , len(UpperCamelCase__ ) )
for i in range(len(UpperCamelCase__ ) ):
UpperCAmelCase = weights[og_keys[i]]
our_model.load_state_dict(UpperCamelCase__ )
UpperCAmelCase = torch.randn((2, 3, 224, 224) )
UpperCAmelCase = from_model(UpperCamelCase__ )
UpperCAmelCase = our_model(UpperCamelCase__ ).logits
assert torch.allclose(UpperCamelCase__ , UpperCamelCase__ ), "The model logits don't match the original one."
UpperCAmelCase = name
print(UpperCamelCase__ )
if push_to_hub:
our_model.save_pretrained(save_directory / checkpoint_name )
UpperCAmelCase = LevitImageProcessor()
image_processor.save_pretrained(save_directory / checkpoint_name )
print(F"""Pushed {checkpoint_name}""" )
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ = None , UpperCamelCase__ = True ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase = '''imagenet-1k-id2label.json'''
UpperCAmelCase = 1000
UpperCAmelCase = (1, num_labels)
UpperCAmelCase = '''huggingface/label-files'''
UpperCAmelCase = num_labels
UpperCAmelCase = json.load(open(hf_hub_download(UpperCamelCase__ , UpperCamelCase__ , repo_type='''dataset''' ) , '''r''' ) )
UpperCAmelCase = {int(UpperCamelCase__ ): v for k, v in idalabel.items()}
UpperCAmelCase = idalabel
UpperCAmelCase = {v: k for k, v in idalabel.items()}
UpperCAmelCase = partial(UpperCamelCase__ , num_labels=UpperCamelCase__ , idalabel=UpperCamelCase__ , labelaid=UpperCamelCase__ )
UpperCAmelCase = {
'''levit-128S''': 128,
'''levit-128''': 128,
'''levit-192''': 192,
'''levit-256''': 256,
'''levit-384''': 384,
}
UpperCAmelCase = {
'''levit-128S''': ImageNetPreTrainedConfig(
hidden_sizes=[128, 256, 384] , num_attention_heads=[4, 6, 8] , depths=[2, 3, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
'''levit-128''': ImageNetPreTrainedConfig(
hidden_sizes=[128, 256, 384] , num_attention_heads=[4, 8, 12] , depths=[4, 4, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
'''levit-192''': ImageNetPreTrainedConfig(
hidden_sizes=[192, 288, 384] , num_attention_heads=[3, 5, 6] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
'''levit-256''': ImageNetPreTrainedConfig(
hidden_sizes=[256, 384, 512] , num_attention_heads=[4, 6, 8] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
'''levit-384''': ImageNetPreTrainedConfig(
hidden_sizes=[384, 512, 768] , num_attention_heads=[6, 9, 12] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0.1 , ),
}
if model_name:
convert_weight_and_push(
names_to_hidden_sizes[model_name] , UpperCamelCase__ , names_to_config[model_name] , UpperCamelCase__ , UpperCamelCase__ )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(names_to_hidden_sizes[model_name] , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
return config, expected_shape
if __name__ == "__main__":
__A : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default=None,
type=str,
help="The name of the model you wish to convert, it must be one of the supported Levit* architecture,",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default="levit-dump-folder/",
type=Path,
required=False,
help="Path to the output PyTorch model directory.",
)
parser.add_argument("--push_to_hub", action="store_true", help="Push model and image processor to the hub")
parser.add_argument(
"--no-push_to_hub",
dest="push_to_hub",
action="store_false",
help="Do not push model and image processor to the hub",
)
__A : List[Any] = parser.parse_args()
__A : Path = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 130
|
import inspect
import re
from hashlib import shaaaa
from typing import Dict, List
from .arrow import arrow
from .audiofolder import audiofolder
from .csv import csv
from .imagefolder import imagefolder
from .json import json
from .pandas import pandas
from .parquet import parquet
from .sql import sql # noqa F401
from .text import text
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ ) -> str:
'''simple docstring'''
UpperCAmelCase = []
for line in lines:
UpperCAmelCase = re.sub(R'''#.*''' , '''''' , UpperCamelCase__ ) # remove comments
if line:
filtered_lines.append(UpperCamelCase__ )
UpperCAmelCase = '''\n'''.join(UpperCamelCase__ )
# Make a hash from all this code
UpperCAmelCase = full_str.encode('''utf-8''' )
return shaaaa(UpperCamelCase__ ).hexdigest()
# get importable module names and hash for caching
__A : List[str] = {
"csv": (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())),
"json": (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())),
"pandas": (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())),
"parquet": (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())),
"arrow": (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())),
"text": (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())),
"imagefolder": (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())),
"audiofolder": (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())),
}
# Used to infer the module to use based on the data files extensions
__A : Tuple = {
".csv": ("csv", {}),
".tsv": ("csv", {"sep": "\t"}),
".json": ("json", {}),
".jsonl": ("json", {}),
".parquet": ("parquet", {}),
".arrow": ("arrow", {}),
".txt": ("text", {}),
}
_EXTENSION_TO_MODULE.update({ext: ("imagefolder", {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ("imagefolder", {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext: ("audiofolder", {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ("audiofolder", {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
__A : int = {"imagefolder", "audiofolder"}
# Used to filter data files based on extensions given a module name
__A : Dict[str, List[str]] = {}
for _ext, (_module, _) in _EXTENSION_TO_MODULE.items():
_MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext)
_MODULE_TO_EXTENSIONS["imagefolder"].append(".zip")
_MODULE_TO_EXTENSIONS["audiofolder"].append(".zip")
| 130
| 1
|
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def UpperCamelCase () -> Any:
A__ : Any = HfArgumentParser(lowercase_ )
A__ : List[Any] = parser.parse_args_into_dataclasses()[0]
A__ : str = TensorFlowBenchmark(args=lowercase_ )
try:
A__ : str = parser.parse_args_into_dataclasses()[0]
except ValueError as e:
A__ : List[Any] = """Arg --no_{0} is no longer used, please use --no-{0} instead."""
A__ : Tuple = """ """.join(str(lowercase_ ).split(""" """ )[:-1] )
A__ : str = """"""
A__ : Tuple = eval(str(lowercase_ ).split(""" """ )[-1] )
A__ : int = []
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:] )
else:
wrong_args.append(lowercase_ )
if len(lowercase_ ) > 0:
A__ : str = full_error_msg + begin_error_msg + str(lowercase_ )
raise ValueError(lowercase_ )
benchmark.run()
if __name__ == "__main__":
main()
| 64
|
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, PegasusConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFPegasusForConditionalGeneration, TFPegasusModel
@require_tf
class _a :
'''simple docstring'''
UpperCAmelCase__: List[Any] = PegasusConfig
UpperCAmelCase__: Optional[int] = {}
UpperCAmelCase__: List[str] = '''gelu'''
def __init__( self , A__ , A__=13 , A__=7 , A__=True , A__=False , A__=99 , A__=32 , A__=2 , A__=4 , A__=37 , A__=0.1 , A__=0.1 , A__=40 , A__=2 , A__=1 , A__=0 , ):
A__ : Dict = parent
A__ : Dict = batch_size
A__ : Any = seq_length
A__ : Optional[Any] = is_training
A__ : int = use_labels
A__ : Any = vocab_size
A__ : Union[str, Any] = hidden_size
A__ : Tuple = num_hidden_layers
A__ : Tuple = num_attention_heads
A__ : List[Any] = intermediate_size
A__ : Union[str, Any] = hidden_dropout_prob
A__ : Optional[Any] = attention_probs_dropout_prob
A__ : List[Any] = max_position_embeddings
A__ : Any = eos_token_id
A__ : List[Any] = pad_token_id
A__ : List[Any] = bos_token_id
def __A ( self ):
A__ : str = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
A__ : Dict = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
A__ : List[Any] = tf.concat([input_ids, eos_tensor] , axis=1 )
A__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A__ : Tuple = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
A__ : str = prepare_pegasus_inputs_dict(A__ , A__ , A__ )
return config, inputs_dict
def __A ( self , A__ , A__ ):
A__ : int = TFPegasusModel(config=A__ ).get_decoder()
A__ : List[Any] = inputs_dict["""input_ids"""]
A__ : Any = input_ids[:1, :]
A__ : Optional[Any] = inputs_dict["""attention_mask"""][:1, :]
A__ : Optional[int] = inputs_dict["""head_mask"""]
A__ : Any = 1
# first forward pass
A__ : Tuple = model(A__ , attention_mask=A__ , head_mask=A__ , use_cache=A__ )
A__ , A__ : Dict = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
A__ : Tuple = ids_tensor((self.batch_size, 3) , config.vocab_size )
A__ : Optional[Any] = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
A__ : List[Any] = tf.concat([input_ids, next_tokens] , axis=-1 )
A__ : Tuple = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
A__ : Optional[Any] = model(A__ , attention_mask=A__ )[0]
A__ : Any = model(A__ , attention_mask=A__ , past_key_values=A__ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
A__ : int = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
A__ : Any = output_from_no_past[:, -3:, random_slice_idx]
A__ : Tuple = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(A__ , A__ , rtol=1e-3 )
def UpperCamelCase (lowercase_: Union[str, Any] , lowercase_: Dict , lowercase_: List[Any] , lowercase_: Dict=None , lowercase_: int=None , lowercase_: List[Any]=None , lowercase_: List[Any]=None , lowercase_: str=None , ) -> int:
if attention_mask is None:
A__ : List[str] = tf.cast(tf.math.not_equal(lowercase_ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
A__ : Dict = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
A__ : Any = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
A__ : Tuple = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
A__ : List[str] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class _a (__magic_name__ , __magic_name__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__: List[Any] = (TFPegasusForConditionalGeneration, TFPegasusModel) if is_tf_available() else ()
UpperCAmelCase__: Tuple = (TFPegasusForConditionalGeneration,) if is_tf_available() else ()
UpperCAmelCase__: Tuple = (
{
'''conversational''': TFPegasusForConditionalGeneration,
'''feature-extraction''': TFPegasusModel,
'''summarization''': TFPegasusForConditionalGeneration,
'''text2text-generation''': TFPegasusForConditionalGeneration,
'''translation''': TFPegasusForConditionalGeneration,
}
if is_tf_available()
else {}
)
UpperCAmelCase__: int = True
UpperCAmelCase__: Union[str, Any] = False
UpperCAmelCase__: List[str] = False
def __A ( self ):
A__ : Optional[Any] = TFPegasusModelTester(self )
A__ : Tuple = ConfigTester(self , config_class=A__ )
def __A ( self ):
self.config_tester.run_common_tests()
def __A ( self ):
A__ : int = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*A__ )
@require_sentencepiece
@require_tokenizers
@require_tf
class _a (unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__: Optional[int] = [
''' PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.''',
''' The London trio are up for best UK act and best album, as well as getting two nominations in the best song category."We got told like this morning \'Oh I think you\'re nominated\'", said Dappy."And I was like \'Oh yeah, which one?\' And now we\'ve got nominated for four awards. I mean, wow!"Bandmate Fazer added: "We thought it\'s best of us to come down and mingle with everyone and say hello to the cameras. And now we find we\'ve got four nominations."The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn\'t be too disappointed if they didn\'t win this time around."At the end of the day we\'re grateful to be where we are in our careers."If it don\'t happen then it don\'t happen - live to fight another day and keep on making albums and hits for the fans."Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers\' All These Things That I\'ve Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year\'s Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border."We just done Edinburgh the other day," said Dappy."We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!" ''',
]
UpperCAmelCase__: Any = [
'''California\'s largest electricity provider has cut power to hundreds of thousands of customers in an effort to'''
''' reduce the risk of wildfires.''',
'''N-Dubz have revealed they\'re "grateful" to have been nominated for four Mobo Awards.''',
] # differs slightly from pytorch, likely due to numerical differences in linear layers
UpperCAmelCase__: List[str] = '''google/pegasus-xsum'''
@cached_property
def __A ( self ):
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def __A ( self ):
A__ : int = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def __A ( self , **A__ ):
A__ : str = self.translate_src_text(**A__ )
assert self.expected_text == generated_words
def __A ( self , **A__ ):
A__ : List[str] = self.tokenizer(self.src_text , **A__ , padding=A__ , return_tensors="""tf""" )
A__ : Optional[int] = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=A__ , )
A__ : Dict = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=A__ )
return generated_words
@slow
def __A ( self ):
self._assert_generated_batch_equal_expected()
| 64
| 1
|
'''simple docstring'''
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class lowerCamelCase__ ( __lowerCamelCase ):
"""simple docstring"""
UpperCamelCase__ = ['''image_processor''', '''tokenizer''']
UpperCamelCase__ = '''BlipImageProcessor'''
UpperCamelCase__ = ('''BertTokenizer''', '''BertTokenizerFast''')
def __init__( self : List[str] ,a__ : Dict ,a__ : Any ):
a__ = False
super().__init__(a__ ,a__ )
a__ = self.image_processor
def __call__( self : Tuple ,a__ : ImageInput = None ,a__ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None ,a__ : bool = True ,a__ : Union[bool, str, PaddingStrategy] = False ,a__ : Union[bool, str, TruncationStrategy] = None ,a__ : Optional[int] = None ,a__ : int = 0 ,a__ : Optional[int] = None ,a__ : Optional[bool] = None ,a__ : bool = False ,a__ : bool = False ,a__ : bool = False ,a__ : bool = False ,a__ : bool = False ,a__ : bool = True ,a__ : Optional[Union[str, TensorType]] = None ,**a__ : Optional[Any] ,):
if images is None and text is None:
raise ValueError("You have to specify either images or text." )
# Get only text
if images is None:
a__ = self.tokenizer
a__ = self.tokenizer(
text=a__ ,add_special_tokens=a__ ,padding=a__ ,truncation=a__ ,max_length=a__ ,stride=a__ ,pad_to_multiple_of=a__ ,return_attention_mask=a__ ,return_overflowing_tokens=a__ ,return_special_tokens_mask=a__ ,return_offsets_mapping=a__ ,return_token_type_ids=a__ ,return_length=a__ ,verbose=a__ ,return_tensors=a__ ,**a__ ,)
return text_encoding
# add pixel_values
a__ = self.image_processor(a__ ,return_tensors=a__ )
if text is not None:
a__ = self.tokenizer(
text=a__ ,add_special_tokens=a__ ,padding=a__ ,truncation=a__ ,max_length=a__ ,stride=a__ ,pad_to_multiple_of=a__ ,return_attention_mask=a__ ,return_overflowing_tokens=a__ ,return_special_tokens_mask=a__ ,return_offsets_mapping=a__ ,return_token_type_ids=a__ ,return_length=a__ ,verbose=a__ ,return_tensors=a__ ,**a__ ,)
else:
a__ = None
if text_encoding is not None:
encoding_image_processor.update(a__ )
return encoding_image_processor
def lowerCAmelCase_ ( self : Any ,*a__ : Union[str, Any] ,**a__ : Optional[Any] ):
return self.tokenizer.batch_decode(*a__ ,**a__ )
def lowerCAmelCase_ ( self : Optional[int] ,*a__ : List[Any] ,**a__ : Union[str, Any] ):
return self.tokenizer.decode(*a__ ,**a__ )
@property
def lowerCAmelCase_ ( self : int ):
a__ = self.tokenizer.model_input_names
a__ = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 331
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase_ : Optional[int] = logging.get_logger(__name__)
UpperCamelCase_ : str = {
"""abeja/gpt-neox-japanese-2.7b""": """https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/config.json""",
}
class lowerCamelCase__ ( __lowerCamelCase ):
"""simple docstring"""
UpperCamelCase__ = '''gpt_neox_japanese'''
def __init__( self : Tuple ,a__ : str=3_20_00 ,a__ : Union[str, Any]=25_60 ,a__ : Optional[Any]=32 ,a__ : Tuple=32 ,a__ : str=4 ,a__ : int="gelu" ,a__ : Tuple=1.00 ,a__ : Optional[int]=1_00_00 ,a__ : Union[str, Any]=20_48 ,a__ : List[str]=0.02 ,a__ : Optional[int]=1e-5 ,a__ : Union[str, Any]=True ,a__ : int=3_19_96 ,a__ : List[Any]=3_19_99 ,a__ : str=0.1 ,a__ : Dict=0.0 ,**a__ : List[Any] ,):
super().__init__(bos_token_id=a__ ,eos_token_id=a__ ,**a__ )
a__ = vocab_size
a__ = max_position_embeddings
a__ = hidden_size
a__ = num_hidden_layers
a__ = num_attention_heads
a__ = intermediate_multiple_size
a__ = hidden_act
a__ = rotary_pct
a__ = rotary_emb_base
a__ = initializer_range
a__ = layer_norm_eps
a__ = use_cache
a__ = attention_dropout
a__ = hidden_dropout
| 331
| 1
|
"""simple docstring"""
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
ConditionalDetrConfig,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCamelCase = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
__lowerCamelCase = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f"""transformer.encoder.layers.{i}.self_attn.out_proj.weight""", f"""encoder.layers.{i}.self_attn.out_proj.weight""")
)
rename_keys.append(
(f"""transformer.encoder.layers.{i}.self_attn.out_proj.bias""", f"""encoder.layers.{i}.self_attn.out_proj.bias""")
)
rename_keys.append((f"""transformer.encoder.layers.{i}.linear1.weight""", f"""encoder.layers.{i}.fc1.weight"""))
rename_keys.append((f"""transformer.encoder.layers.{i}.linear1.bias""", f"""encoder.layers.{i}.fc1.bias"""))
rename_keys.append((f"""transformer.encoder.layers.{i}.linear2.weight""", f"""encoder.layers.{i}.fc2.weight"""))
rename_keys.append((f"""transformer.encoder.layers.{i}.linear2.bias""", f"""encoder.layers.{i}.fc2.bias"""))
rename_keys.append(
(f"""transformer.encoder.layers.{i}.norm1.weight""", f"""encoder.layers.{i}.self_attn_layer_norm.weight""")
)
rename_keys.append((f"""transformer.encoder.layers.{i}.norm1.bias""", f"""encoder.layers.{i}.self_attn_layer_norm.bias"""))
rename_keys.append((f"""transformer.encoder.layers.{i}.norm2.weight""", f"""encoder.layers.{i}.final_layer_norm.weight"""))
rename_keys.append((f"""transformer.encoder.layers.{i}.norm2.bias""", f"""encoder.layers.{i}.final_layer_norm.bias"""))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(f"""transformer.decoder.layers.{i}.self_attn.out_proj.weight""", f"""decoder.layers.{i}.self_attn.out_proj.weight""")
)
rename_keys.append(
(f"""transformer.decoder.layers.{i}.self_attn.out_proj.bias""", f"""decoder.layers.{i}.self_attn.out_proj.bias""")
)
rename_keys.append(
(
f"""transformer.decoder.layers.{i}.cross_attn.out_proj.weight""",
f"""decoder.layers.{i}.encoder_attn.out_proj.weight""",
)
)
rename_keys.append(
(
f"""transformer.decoder.layers.{i}.cross_attn.out_proj.bias""",
f"""decoder.layers.{i}.encoder_attn.out_proj.bias""",
)
)
rename_keys.append((f"""transformer.decoder.layers.{i}.linear1.weight""", f"""decoder.layers.{i}.fc1.weight"""))
rename_keys.append((f"""transformer.decoder.layers.{i}.linear1.bias""", f"""decoder.layers.{i}.fc1.bias"""))
rename_keys.append((f"""transformer.decoder.layers.{i}.linear2.weight""", f"""decoder.layers.{i}.fc2.weight"""))
rename_keys.append((f"""transformer.decoder.layers.{i}.linear2.bias""", f"""decoder.layers.{i}.fc2.bias"""))
rename_keys.append(
(f"""transformer.decoder.layers.{i}.norm1.weight""", f"""decoder.layers.{i}.self_attn_layer_norm.weight""")
)
rename_keys.append((f"""transformer.decoder.layers.{i}.norm1.bias""", f"""decoder.layers.{i}.self_attn_layer_norm.bias"""))
rename_keys.append(
(f"""transformer.decoder.layers.{i}.norm2.weight""", f"""decoder.layers.{i}.encoder_attn_layer_norm.weight""")
)
rename_keys.append(
(f"""transformer.decoder.layers.{i}.norm2.bias""", f"""decoder.layers.{i}.encoder_attn_layer_norm.bias""")
)
rename_keys.append((f"""transformer.decoder.layers.{i}.norm3.weight""", f"""decoder.layers.{i}.final_layer_norm.weight"""))
rename_keys.append((f"""transformer.decoder.layers.{i}.norm3.bias""", f"""decoder.layers.{i}.final_layer_norm.bias"""))
# q, k, v projections in self/cross-attention in decoder for conditional DETR
rename_keys.append(
(f"""transformer.decoder.layers.{i}.sa_qcontent_proj.weight""", f"""decoder.layers.{i}.sa_qcontent_proj.weight""")
)
rename_keys.append(
(f"""transformer.decoder.layers.{i}.sa_kcontent_proj.weight""", f"""decoder.layers.{i}.sa_kcontent_proj.weight""")
)
rename_keys.append(
(f"""transformer.decoder.layers.{i}.sa_qpos_proj.weight""", f"""decoder.layers.{i}.sa_qpos_proj.weight""")
)
rename_keys.append(
(f"""transformer.decoder.layers.{i}.sa_kpos_proj.weight""", f"""decoder.layers.{i}.sa_kpos_proj.weight""")
)
rename_keys.append((f"""transformer.decoder.layers.{i}.sa_v_proj.weight""", f"""decoder.layers.{i}.sa_v_proj.weight"""))
rename_keys.append(
(f"""transformer.decoder.layers.{i}.ca_qcontent_proj.weight""", f"""decoder.layers.{i}.ca_qcontent_proj.weight""")
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight"))
rename_keys.append(
(f"""transformer.decoder.layers.{i}.ca_kcontent_proj.weight""", f"""decoder.layers.{i}.ca_kcontent_proj.weight""")
)
rename_keys.append(
(f"""transformer.decoder.layers.{i}.ca_kpos_proj.weight""", f"""decoder.layers.{i}.ca_kpos_proj.weight""")
)
rename_keys.append((f"""transformer.decoder.layers.{i}.ca_v_proj.weight""", f"""decoder.layers.{i}.ca_v_proj.weight"""))
rename_keys.append(
(f"""transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight""", f"""decoder.layers.{i}.ca_qpos_sine_proj.weight""")
)
rename_keys.append(
(f"""transformer.decoder.layers.{i}.sa_qcontent_proj.bias""", f"""decoder.layers.{i}.sa_qcontent_proj.bias""")
)
rename_keys.append(
(f"""transformer.decoder.layers.{i}.sa_kcontent_proj.bias""", f"""decoder.layers.{i}.sa_kcontent_proj.bias""")
)
rename_keys.append((f"""transformer.decoder.layers.{i}.sa_qpos_proj.bias""", f"""decoder.layers.{i}.sa_qpos_proj.bias"""))
rename_keys.append((f"""transformer.decoder.layers.{i}.sa_kpos_proj.bias""", f"""decoder.layers.{i}.sa_kpos_proj.bias"""))
rename_keys.append((f"""transformer.decoder.layers.{i}.sa_v_proj.bias""", f"""decoder.layers.{i}.sa_v_proj.bias"""))
rename_keys.append(
(f"""transformer.decoder.layers.{i}.ca_qcontent_proj.bias""", f"""decoder.layers.{i}.ca_qcontent_proj.bias""")
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias"))
rename_keys.append(
(f"""transformer.decoder.layers.{i}.ca_kcontent_proj.bias""", f"""decoder.layers.{i}.ca_kcontent_proj.bias""")
)
rename_keys.append((f"""transformer.decoder.layers.{i}.ca_kpos_proj.bias""", f"""decoder.layers.{i}.ca_kpos_proj.bias"""))
rename_keys.append((f"""transformer.decoder.layers.{i}.ca_v_proj.bias""", f"""decoder.layers.{i}.ca_v_proj.bias"""))
rename_keys.append(
(f"""transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias""", f"""decoder.layers.{i}.ca_qpos_sine_proj.bias""")
)
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
# for conditional DETR, also convert reference point head and query scale MLP
rename_keys.extend(
[
("input_proj.weight", "input_projection.weight"),
("input_proj.bias", "input_projection.bias"),
("query_embed.weight", "query_position_embeddings.weight"),
("transformer.decoder.norm.weight", "decoder.layernorm.weight"),
("transformer.decoder.norm.bias", "decoder.layernorm.bias"),
("class_embed.weight", "class_labels_classifier.weight"),
("class_embed.bias", "class_labels_classifier.bias"),
("bbox_embed.layers.0.weight", "bbox_predictor.layers.0.weight"),
("bbox_embed.layers.0.bias", "bbox_predictor.layers.0.bias"),
("bbox_embed.layers.1.weight", "bbox_predictor.layers.1.weight"),
("bbox_embed.layers.1.bias", "bbox_predictor.layers.1.bias"),
("bbox_embed.layers.2.weight", "bbox_predictor.layers.2.weight"),
("bbox_embed.layers.2.bias", "bbox_predictor.layers.2.bias"),
("transformer.decoder.ref_point_head.layers.0.weight", "decoder.ref_point_head.layers.0.weight"),
("transformer.decoder.ref_point_head.layers.0.bias", "decoder.ref_point_head.layers.0.bias"),
("transformer.decoder.ref_point_head.layers.1.weight", "decoder.ref_point_head.layers.1.weight"),
("transformer.decoder.ref_point_head.layers.1.bias", "decoder.ref_point_head.layers.1.bias"),
("transformer.decoder.query_scale.layers.0.weight", "decoder.query_scale.layers.0.weight"),
("transformer.decoder.query_scale.layers.0.bias", "decoder.query_scale.layers.0.bias"),
("transformer.decoder.query_scale.layers.1.weight", "decoder.query_scale.layers.1.weight"),
("transformer.decoder.query_scale.layers.1.bias", "decoder.query_scale.layers.1.bias"),
("transformer.decoder.layers.0.ca_qpos_proj.weight", "decoder.layers.0.ca_qpos_proj.weight"),
("transformer.decoder.layers.0.ca_qpos_proj.bias", "decoder.layers.0.ca_qpos_proj.bias"),
]
)
def lowercase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Any:
__magic_name__ = state_dict.pop(__UpperCamelCase )
__magic_name__ = val
def lowercase ( __UpperCamelCase ) -> Union[str, Any]:
__magic_name__ = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
__magic_name__ = key.replace('''backbone.0.body''' , '''backbone.conv_encoder.model''' )
__magic_name__ = value
else:
__magic_name__ = value
return new_state_dict
def lowercase ( __UpperCamelCase , __UpperCamelCase=False ) -> Tuple:
__magic_name__ = ''''''
if is_panoptic:
__magic_name__ = '''conditional_detr.'''
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
__magic_name__ = state_dict.pop(f'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight''' )
__magic_name__ = state_dict.pop(f'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
__magic_name__ = in_proj_weight[:256, :]
__magic_name__ = in_proj_bias[:256]
__magic_name__ = in_proj_weight[256:512, :]
__magic_name__ = in_proj_bias[256:512]
__magic_name__ = in_proj_weight[-256:, :]
__magic_name__ = in_proj_bias[-256:]
def lowercase ( ) -> List[Any]:
__magic_name__ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
__magic_name__ = Image.open(requests.get(__UpperCamelCase , stream=__UpperCamelCase ).raw )
return im
@torch.no_grad()
def lowercase ( __UpperCamelCase , __UpperCamelCase ) -> Optional[int]:
__magic_name__ = ConditionalDetrConfig()
# set backbone and dilation attributes
if "resnet101" in model_name:
__magic_name__ = '''resnet101'''
if "dc5" in model_name:
__magic_name__ = True
__magic_name__ = '''panoptic''' in model_name
if is_panoptic:
__magic_name__ = 250
else:
__magic_name__ = 91
__magic_name__ = '''huggingface/label-files'''
__magic_name__ = '''coco-detection-id2label.json'''
__magic_name__ = json.load(open(hf_hub_download(__UpperCamelCase , __UpperCamelCase , repo_type='''dataset''' ) , '''r''' ) )
__magic_name__ = {int(__UpperCamelCase ): v for k, v in idalabel.items()}
__magic_name__ = idalabel
__magic_name__ = {v: k for k, v in idalabel.items()}
# load image processor
__magic_name__ = '''coco_panoptic''' if is_panoptic else '''coco_detection'''
__magic_name__ = ConditionalDetrImageProcessor(format=__UpperCamelCase )
# prepare image
__magic_name__ = prepare_img()
__magic_name__ = image_processor(images=__UpperCamelCase , return_tensors='''pt''' )
__magic_name__ = encoding['''pixel_values''']
logger.info(f'''Converting model {model_name}...''' )
# load original model from torch hub
__magic_name__ = torch.hub.load('''DeppMeng/ConditionalDETR''' , __UpperCamelCase , pretrained=__UpperCamelCase ).eval()
__magic_name__ = conditional_detr.state_dict()
# rename keys
for src, dest in rename_keys:
if is_panoptic:
__magic_name__ = '''conditional_detr.''' + src
rename_key(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
__magic_name__ = rename_backbone_keys(__UpperCamelCase )
# query, key and value matrices need special treatment
read_in_q_k_v(__UpperCamelCase , is_panoptic=__UpperCamelCase )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
__magic_name__ = '''conditional_detr.model.''' if is_panoptic else '''model.'''
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith('''conditional_detr''' )
and not key.startswith('''class_labels_classifier''' )
and not key.startswith('''bbox_predictor''' )
):
__magic_name__ = state_dict.pop(__UpperCamelCase )
__magic_name__ = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
__magic_name__ = state_dict.pop(__UpperCamelCase )
__magic_name__ = val
elif key.startswith('''bbox_attention''' ) or key.startswith('''mask_head''' ):
continue
else:
__magic_name__ = state_dict.pop(__UpperCamelCase )
__magic_name__ = val
else:
if not key.startswith('''class_labels_classifier''' ) and not key.startswith('''bbox_predictor''' ):
__magic_name__ = state_dict.pop(__UpperCamelCase )
__magic_name__ = val
# finally, create HuggingFace model and load state dict
__magic_name__ = ConditionalDetrForSegmentation(__UpperCamelCase ) if is_panoptic else ConditionalDetrForObjectDetection(__UpperCamelCase )
model.load_state_dict(__UpperCamelCase )
model.eval()
model.push_to_hub(repo_id=__UpperCamelCase , organization='''DepuMeng''' , commit_message='''Add model''' )
# verify our conversion
__magic_name__ = conditional_detr(__UpperCamelCase )
__magic_name__ = model(__UpperCamelCase )
assert torch.allclose(outputs.logits , original_outputs['''pred_logits'''] , atol=1e-4 )
assert torch.allclose(outputs.pred_boxes , original_outputs['''pred_boxes'''] , atol=1e-4 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs['''pred_masks'''] , atol=1e-4 )
# Save model and image processor
logger.info(f'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(__UpperCamelCase ).mkdir(exist_ok=__UpperCamelCase )
model.save_pretrained(__UpperCamelCase )
image_processor.save_pretrained(__UpperCamelCase )
if __name__ == "__main__":
__lowerCamelCase = argparse.ArgumentParser()
parser.add_argument(
"--model_name",
default="conditional_detr_resnet50",
type=str,
help="Name of the CONDITIONAL_DETR model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
__lowerCamelCase = parser.parse_args()
convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 190
|
"""simple docstring"""
import torch
from diffusers import EulerDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class _lowercase ( __UpperCAmelCase ):
_lowerCamelCase = (EulerDiscreteScheduler,)
_lowerCamelCase = 10
def lowerCAmelCase__ ( self , **UpperCamelCase_ ):
__magic_name__ = {
'''num_train_timesteps''': 1100,
'''beta_start''': 0.0_0_0_1,
'''beta_end''': 0.0_2,
'''beta_schedule''': '''linear''',
}
config.update(**UpperCamelCase_ )
return config
def lowerCAmelCase__ ( self ):
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=UpperCamelCase_ )
def lowerCAmelCase__ ( self ):
for beta_start, beta_end in zip([0.0_0_0_0_1, 0.0_0_0_1, 0.0_0_1] , [0.0_0_0_2, 0.0_0_2, 0.0_2] ):
self.check_over_configs(beta_start=UpperCamelCase_ , beta_end=UpperCamelCase_ )
def lowerCAmelCase__ ( self ):
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=UpperCamelCase_ )
def lowerCAmelCase__ ( self ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=UpperCamelCase_ )
def lowerCAmelCase__ ( self ):
__magic_name__ = self.scheduler_classes[0]
__magic_name__ = self.get_scheduler_config()
__magic_name__ = scheduler_class(**UpperCamelCase_ )
scheduler.set_timesteps(self.num_inference_steps )
__magic_name__ = torch.manual_seed(0 )
__magic_name__ = self.dummy_model()
__magic_name__ = self.dummy_sample_deter * scheduler.init_noise_sigma
__magic_name__ = sample.to(UpperCamelCase_ )
for i, t in enumerate(scheduler.timesteps ):
__magic_name__ = scheduler.scale_model_input(UpperCamelCase_ , UpperCamelCase_ )
__magic_name__ = model(UpperCamelCase_ , UpperCamelCase_ )
__magic_name__ = scheduler.step(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , generator=UpperCamelCase_ )
__magic_name__ = output.prev_sample
__magic_name__ = torch.sum(torch.abs(UpperCamelCase_ ) )
__magic_name__ = torch.mean(torch.abs(UpperCamelCase_ ) )
assert abs(result_sum.item() - 1_0.0_8_0_7 ) < 1E-2
assert abs(result_mean.item() - 0.0_1_3_1 ) < 1E-3
def lowerCAmelCase__ ( self ):
__magic_name__ = self.scheduler_classes[0]
__magic_name__ = self.get_scheduler_config(prediction_type='''v_prediction''' )
__magic_name__ = scheduler_class(**UpperCamelCase_ )
scheduler.set_timesteps(self.num_inference_steps )
__magic_name__ = torch.manual_seed(0 )
__magic_name__ = self.dummy_model()
__magic_name__ = self.dummy_sample_deter * scheduler.init_noise_sigma
__magic_name__ = sample.to(UpperCamelCase_ )
for i, t in enumerate(scheduler.timesteps ):
__magic_name__ = scheduler.scale_model_input(UpperCamelCase_ , UpperCamelCase_ )
__magic_name__ = model(UpperCamelCase_ , UpperCamelCase_ )
__magic_name__ = scheduler.step(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , generator=UpperCamelCase_ )
__magic_name__ = output.prev_sample
__magic_name__ = torch.sum(torch.abs(UpperCamelCase_ ) )
__magic_name__ = torch.mean(torch.abs(UpperCamelCase_ ) )
assert abs(result_sum.item() - 0.0_0_0_2 ) < 1E-2
assert abs(result_mean.item() - 2.2_6_7_6E-0_6 ) < 1E-3
def lowerCAmelCase__ ( self ):
__magic_name__ = self.scheduler_classes[0]
__magic_name__ = self.get_scheduler_config()
__magic_name__ = scheduler_class(**UpperCamelCase_ )
scheduler.set_timesteps(self.num_inference_steps , device=UpperCamelCase_ )
__magic_name__ = torch.manual_seed(0 )
__magic_name__ = self.dummy_model()
__magic_name__ = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
__magic_name__ = sample.to(UpperCamelCase_ )
for t in scheduler.timesteps:
__magic_name__ = scheduler.scale_model_input(UpperCamelCase_ , UpperCamelCase_ )
__magic_name__ = model(UpperCamelCase_ , UpperCamelCase_ )
__magic_name__ = scheduler.step(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , generator=UpperCamelCase_ )
__magic_name__ = output.prev_sample
__magic_name__ = torch.sum(torch.abs(UpperCamelCase_ ) )
__magic_name__ = torch.mean(torch.abs(UpperCamelCase_ ) )
assert abs(result_sum.item() - 1_0.0_8_0_7 ) < 1E-2
assert abs(result_mean.item() - 0.0_1_3_1 ) < 1E-3
def lowerCAmelCase__ ( self ):
__magic_name__ = self.scheduler_classes[0]
__magic_name__ = self.get_scheduler_config()
__magic_name__ = scheduler_class(**UpperCamelCase_ , use_karras_sigmas=UpperCamelCase_ )
scheduler.set_timesteps(self.num_inference_steps , device=UpperCamelCase_ )
__magic_name__ = torch.manual_seed(0 )
__magic_name__ = self.dummy_model()
__magic_name__ = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
__magic_name__ = sample.to(UpperCamelCase_ )
for t in scheduler.timesteps:
__magic_name__ = scheduler.scale_model_input(UpperCamelCase_ , UpperCamelCase_ )
__magic_name__ = model(UpperCamelCase_ , UpperCamelCase_ )
__magic_name__ = scheduler.step(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , generator=UpperCamelCase_ )
__magic_name__ = output.prev_sample
__magic_name__ = torch.sum(torch.abs(UpperCamelCase_ ) )
__magic_name__ = torch.mean(torch.abs(UpperCamelCase_ ) )
assert abs(result_sum.item() - 1_2_4.5_2_2_9_9_4_9_9_5_1_1_7_1_9 ) < 1E-2
assert abs(result_mean.item() - 0.1_6_2_1_3_9_3_2_6_3_3_3_9_9_9_6_3 ) < 1E-3
| 190
| 1
|
from copy import deepcopy
from typing import Optional, Union
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_tf_available, is_torch_available
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
class __lowerCamelCase (_a ):
_lowercase = ["""image_processor"""]
_lowercase = """SamImageProcessor"""
def __init__( self: Dict,A_: Union[str, Any] ):
'''simple docstring'''
super().__init__(A_ )
__UpperCamelCase = self.image_processor
__UpperCamelCase = -10
__UpperCamelCase = self.image_processor.size['longest_edge']
def __call__( self: Optional[Any],A_: Optional[int]=None,A_: int=None,A_: str=None,A_: str=None,A_: Optional[Union[str, TensorType]] = None,**A_: Optional[int],):
'''simple docstring'''
__UpperCamelCase = self.image_processor(
A_,return_tensors=A_,**A_,)
# pop arguments that are not used in the foward but used nevertheless
__UpperCamelCase = encoding_image_processor['original_sizes']
if hasattr(A_,'numpy' ): # Checks if Torch or TF tensor
__UpperCamelCase = original_sizes.numpy()
__UpperCamelCase, __UpperCamelCase, __UpperCamelCase = self._check_and_preprocess_points(
input_points=A_,input_labels=A_,input_boxes=A_,)
__UpperCamelCase = self._normalize_and_convert(
A_,A_,input_points=A_,input_labels=A_,input_boxes=A_,return_tensors=A_,)
return encoding_image_processor
def snake_case_ ( self: Tuple,A_: Any,A_: str,A_: Dict=None,A_: Dict=None,A_: int=None,A_: List[Any]="pt",):
'''simple docstring'''
if input_points is not None:
if len(A_ ) != len(A_ ):
__UpperCamelCase = [
self._normalize_coordinates(self.target_size,A_,original_sizes[0] ) for point in input_points
]
else:
__UpperCamelCase = [
self._normalize_coordinates(self.target_size,A_,A_ )
for point, original_size in zip(A_,A_ )
]
# check that all arrays have the same shape
if not all(point.shape == input_points[0].shape for point in input_points ):
if input_labels is not None:
__UpperCamelCase, __UpperCamelCase = self._pad_points_and_labels(A_,A_ )
__UpperCamelCase = np.array(A_ )
if input_labels is not None:
__UpperCamelCase = np.array(A_ )
if input_boxes is not None:
if len(A_ ) != len(A_ ):
__UpperCamelCase = [
self._normalize_coordinates(self.target_size,A_,original_sizes[0],is_bounding_box=A_ )
for box in input_boxes
]
else:
__UpperCamelCase = [
self._normalize_coordinates(self.target_size,A_,A_,is_bounding_box=A_ )
for box, original_size in zip(A_,A_ )
]
__UpperCamelCase = np.array(A_ )
if input_boxes is not None:
if return_tensors == "pt":
__UpperCamelCase = torch.from_numpy(A_ )
# boxes batch size of 1 by default
__UpperCamelCase = input_boxes.unsqueeze(1 ) if len(input_boxes.shape ) != 3 else input_boxes
elif return_tensors == "tf":
__UpperCamelCase = tf.convert_to_tensor(A_ )
# boxes batch size of 1 by default
__UpperCamelCase = tf.expand_dims(A_,1 ) if len(input_boxes.shape ) != 3 else input_boxes
encoding_image_processor.update({'input_boxes': input_boxes} )
if input_points is not None:
if return_tensors == "pt":
__UpperCamelCase = torch.from_numpy(A_ )
# point batch size of 1 by default
__UpperCamelCase = input_points.unsqueeze(1 ) if len(input_points.shape ) != 4 else input_points
elif return_tensors == "tf":
__UpperCamelCase = tf.convert_to_tensor(A_ )
# point batch size of 1 by default
__UpperCamelCase = tf.expand_dims(A_,1 ) if len(input_points.shape ) != 4 else input_points
encoding_image_processor.update({'input_points': input_points} )
if input_labels is not None:
if return_tensors == "pt":
__UpperCamelCase = torch.from_numpy(A_ )
# point batch size of 1 by default
__UpperCamelCase = input_labels.unsqueeze(1 ) if len(input_labels.shape ) != 3 else input_labels
elif return_tensors == "tf":
__UpperCamelCase = tf.convert_to_tensor(A_ )
# point batch size of 1 by default
__UpperCamelCase = tf.expand_dims(A_,1 ) if len(input_labels.shape ) != 3 else input_labels
encoding_image_processor.update({'input_labels': input_labels} )
return encoding_image_processor
def snake_case_ ( self: List[str],A_: int,A_: Optional[Any] ):
'''simple docstring'''
__UpperCamelCase = max([point.shape[0] for point in input_points] )
__UpperCamelCase = []
for i, point in enumerate(A_ ):
if point.shape[0] != expected_nb_points:
__UpperCamelCase = np.concatenate(
[point, np.zeros((expected_nb_points - point.shape[0], 2) ) + self.point_pad_value],axis=0 )
__UpperCamelCase = np.append(input_labels[i],[self.point_pad_value] )
processed_input_points.append(A_ )
__UpperCamelCase = processed_input_points
return input_points, input_labels
def snake_case_ ( self: Optional[int],A_: int,A_: np.ndarray,A_: Union[str, Any],A_: int=False ):
'''simple docstring'''
__UpperCamelCase, __UpperCamelCase = original_size
__UpperCamelCase, __UpperCamelCase = self.image_processor._get_preprocess_shape(A_,longest_edge=A_ )
__UpperCamelCase = deepcopy(A_ ).astype(A_ )
if is_bounding_box:
__UpperCamelCase = coords.reshape(-1,2,2 )
__UpperCamelCase = coords[..., 0] * (new_w / old_w)
__UpperCamelCase = coords[..., 1] * (new_h / old_h)
if is_bounding_box:
__UpperCamelCase = coords.reshape(-1,4 )
return coords
def snake_case_ ( self: Dict,A_: Optional[Any]=None,A_: List[str]=None,A_: Tuple=None,):
'''simple docstring'''
if input_points is not None:
if hasattr(A_,'numpy' ): # Checks for TF or Torch tensor
__UpperCamelCase = input_points.numpy().tolist()
if not isinstance(A_,A_ ) or not isinstance(input_points[0],A_ ):
raise ValueError('Input points must be a list of list of floating points.' )
__UpperCamelCase = [np.array(A_ ) for input_point in input_points]
else:
__UpperCamelCase = None
if input_labels is not None:
if hasattr(A_,'numpy' ):
__UpperCamelCase = input_labels.numpy().tolist()
if not isinstance(A_,A_ ) or not isinstance(input_labels[0],A_ ):
raise ValueError('Input labels must be a list of list integers.' )
__UpperCamelCase = [np.array(A_ ) for label in input_labels]
else:
__UpperCamelCase = None
if input_boxes is not None:
if hasattr(A_,'numpy' ):
__UpperCamelCase = input_boxes.numpy().tolist()
if (
not isinstance(A_,A_ )
or not isinstance(input_boxes[0],A_ )
or not isinstance(input_boxes[0][0],A_ )
):
raise ValueError('Input boxes must be a list of list of list of floating points.' )
__UpperCamelCase = [np.array(A_ ).astype(np.floataa ) for box in input_boxes]
else:
__UpperCamelCase = None
return input_points, input_labels, input_boxes
@property
def snake_case_ ( self: List[str] ):
'''simple docstring'''
__UpperCamelCase = self.image_processor.model_input_names
return list(dict.fromkeys(A_ ) )
def snake_case_ ( self: int,*A_: int,**A_: Tuple ):
'''simple docstring'''
return self.image_processor.post_process_masks(*A_,**A_ )
| 1
|
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
"""xlnet-base-cased""": """https://huggingface.co/xlnet-base-cased/resolve/main/config.json""",
"""xlnet-large-cased""": """https://huggingface.co/xlnet-large-cased/resolve/main/config.json""",
}
class lowerCamelCase__ ( _A):
"""simple docstring"""
a__ : Any = "xlnet"
a__ : Dict = ["mems"]
a__ : List[str] = {
"n_token": "vocab_size", # Backward compatibility
"hidden_size": "d_model",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self : int , __lowerCAmelCase : Dict=3_20_00 , __lowerCAmelCase : List[str]=10_24 , __lowerCAmelCase : Dict=24 , __lowerCAmelCase : Optional[Any]=16 , __lowerCAmelCase : Dict=40_96 , __lowerCAmelCase : Any="gelu" , __lowerCAmelCase : int=True , __lowerCAmelCase : List[str]="bi" , __lowerCAmelCase : Dict=0.02 , __lowerCAmelCase : Union[str, Any]=1E-12 , __lowerCAmelCase : Optional[Any]=0.1 , __lowerCAmelCase : Optional[Any]=5_12 , __lowerCAmelCase : List[str]=None , __lowerCAmelCase : Tuple=True , __lowerCAmelCase : Tuple=False , __lowerCAmelCase : Optional[Any]=False , __lowerCAmelCase : Union[str, Any]=-1 , __lowerCAmelCase : Optional[Any]=False , __lowerCAmelCase : Any="last" , __lowerCAmelCase : List[Any]=True , __lowerCAmelCase : Tuple="tanh" , __lowerCAmelCase : int=0.1 , __lowerCAmelCase : str=5 , __lowerCAmelCase : str=5 , __lowerCAmelCase : List[str]=5 , __lowerCAmelCase : List[str]=1 , __lowerCAmelCase : Optional[int]=2 , **__lowerCAmelCase : List[str] , ) -> Tuple:
_A = vocab_size
_A = d_model
_A = n_layer
_A = n_head
if d_model % n_head != 0:
raise ValueError(f'''\'d_model % n_head\' ({d_model % n_head}) should be equal to 0''' )
if "d_head" in kwargs:
if kwargs["d_head"] != d_model // n_head:
raise ValueError(
f'''`d_head` ({kwargs['d_head']}) should be equal to `d_model // n_head` ({d_model // n_head})''' )
_A = d_model // n_head
_A = ff_activation
_A = d_inner
_A = untie_r
_A = attn_type
_A = initializer_range
_A = layer_norm_eps
_A = dropout
_A = mem_len
_A = reuse_len
_A = bi_data
_A = clamp_len
_A = same_length
_A = summary_type
_A = summary_use_proj
_A = summary_activation
_A = summary_last_dropout
_A = start_n_top
_A = end_n_top
_A = bos_token_id
_A = pad_token_id
_A = eos_token_id
if "use_cache" in kwargs:
warnings.warn(
'''The `use_cache` argument is deprecated and will be removed in a future version, use `use_mems_eval`'''
''' instead.''' , __lowerCAmelCase , )
_A = kwargs['''use_cache''']
_A = use_mems_eval
_A = use_mems_train
super().__init__(pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , **__lowerCAmelCase )
@property
def snake_case_ ( self : Optional[Any] ) -> Union[str, Any]:
logger.info(f'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
return -1
@max_position_embeddings.setter
def snake_case_ ( self : Tuple , __lowerCAmelCase : Optional[Any] ) -> Dict:
# Message copied from Transformer-XL documentation
raise NotImplementedError(
f'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
| 2
| 0
|
'''simple docstring'''
a__ : Optional[Any] ={
'''a''': '''AAAAA''',
'''b''': '''AAAAB''',
'''c''': '''AAABA''',
'''d''': '''AAABB''',
'''e''': '''AABAA''',
'''f''': '''AABAB''',
'''g''': '''AABBA''',
'''h''': '''AABBB''',
'''i''': '''ABAAA''',
'''j''': '''BBBAA''',
'''k''': '''ABAAB''',
'''l''': '''ABABA''',
'''m''': '''ABABB''',
'''n''': '''ABBAA''',
'''o''': '''ABBAB''',
'''p''': '''ABBBA''',
'''q''': '''ABBBB''',
'''r''': '''BAAAA''',
'''s''': '''BAAAB''',
'''t''': '''BAABA''',
'''u''': '''BAABB''',
'''v''': '''BBBAB''',
'''w''': '''BABAA''',
'''x''': '''BABAB''',
'''y''': '''BABBA''',
'''z''': '''BABBB''',
''' ''': ''' ''',
}
a__ : List[Any] ={value: key for key, value in encode_dict.items()}
def lowercase__ ( __lowercase : str ) -> str:
"""simple docstring"""
__UpperCamelCase = ''
for letter in word.lower():
if letter.isalpha() or letter == " ":
encoded += encode_dict[letter]
else:
raise Exception('encode() accepts only letters of the alphabet and spaces' )
return encoded
def lowercase__ ( __lowercase : str ) -> str:
"""simple docstring"""
if set(__lowercase ) - {"A", "B", " "} != set():
raise Exception('decode() accepts only \'A\', \'B\' and spaces' )
__UpperCamelCase = ''
for word in coded.split():
while len(__lowercase ) != 0:
decoded += decode_dict[word[:5]]
__UpperCamelCase = word[5:]
decoded += " "
return decoded.strip()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 434
|
'''simple docstring'''
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Sequence, Value
from .base import TaskTemplate
@dataclass(frozen=__lowerCamelCase )
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str =field(default="question-answering-extractive" , metadata={"include_in_asdict_even_if_is_default": True} )
SCREAMING_SNAKE_CASE_ : ClassVar[Features] =Features({"question": Value("string" ), "context": Value("string" )} )
SCREAMING_SNAKE_CASE_ : ClassVar[Features] =Features(
{
"answers": Sequence(
{
"text": Value("string" ),
"answer_start": Value("int32" ),
} )
} )
SCREAMING_SNAKE_CASE_ : str ="question"
SCREAMING_SNAKE_CASE_ : str ="context"
SCREAMING_SNAKE_CASE_ : str ="answers"
@property
def _lowerCamelCase ( self : List[Any] ):
return {self.question_column: "question", self.context_column: "context", self.answers_column: "answers"}
| 434
| 1
|
import argparse
import hashlib # hashlib is only used inside the Test class
import struct
class _a :
"""simple docstring"""
def __init__( self , _UpperCAmelCase ) -> Optional[int]:
UpperCamelCase_ = data
UpperCamelCase_ = [0x6745_2301, 0xEFCD_AB89, 0x98BA_DCFE, 0x1032_5476, 0xC3D2_E1F0]
@staticmethod
def _UpperCAmelCase ( _UpperCAmelCase , _UpperCAmelCase ) -> Dict:
return ((n << b) | (n >> (32 - b))) & 0xFFFF_FFFF
def _UpperCAmelCase ( self ) -> str:
UpperCamelCase_ = B'\x80' + B'\x00' * (63 - (len(self.data ) + 8) % 64)
UpperCamelCase_ = self.data + padding + struct.pack('>Q' , 8 * len(self.data ) )
return padded_data
def _UpperCAmelCase ( self ) -> Optional[int]:
return [
self.padded_data[i : i + 64] for i in range(0 , len(self.padded_data ) , 64 )
]
def _UpperCAmelCase ( self , _UpperCAmelCase ) -> Optional[int]:
UpperCamelCase_ = list(struct.unpack('>16L' , _UpperCAmelCase ) ) + [0] * 64
for i in range(16 , 80 ):
UpperCamelCase_ = self.rotate((w[i - 3] ^ w[i - 8] ^ w[i - 14] ^ w[i - 16]) , 1 )
return w
def _UpperCAmelCase ( self ) -> Any:
UpperCamelCase_ = self.padding()
UpperCamelCase_ = self.split_blocks()
for block in self.blocks:
UpperCamelCase_ = self.expand_block(_UpperCAmelCase )
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = self.h
for i in range(0 , 80 ):
if 0 <= i < 20:
UpperCamelCase_ = (b & c) | ((~b) & d)
UpperCamelCase_ = 0x5A82_7999
elif 20 <= i < 40:
UpperCamelCase_ = b ^ c ^ d
UpperCamelCase_ = 0x6ED9_EBA1
elif 40 <= i < 60:
UpperCamelCase_ = (b & c) | (b & d) | (c & d)
UpperCamelCase_ = 0x8F1B_BCDC
elif 60 <= i < 80:
UpperCamelCase_ = b ^ c ^ d
UpperCamelCase_ = 0xCA62_C1D6
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = (
self.rotate(_UpperCAmelCase , 5 ) + f + e + k + expanded_block[i] & 0xFFFF_FFFF,
a,
self.rotate(_UpperCAmelCase , 30 ),
c,
d,
)
UpperCamelCase_ = (
self.h[0] + a & 0xFFFF_FFFF,
self.h[1] + b & 0xFFFF_FFFF,
self.h[2] + c & 0xFFFF_FFFF,
self.h[3] + d & 0xFFFF_FFFF,
self.h[4] + e & 0xFFFF_FFFF,
)
return ("{:08x}" * 5).format(*self.h )
def _snake_case ():
UpperCamelCase_ = B'Test String'
assert SHAaHash(__lowercase).final_hash() == hashlib.shaa(__lowercase).hexdigest() # noqa: S324
def _snake_case ():
UpperCamelCase_ = argparse.ArgumentParser(description='Process some strings or files')
parser.add_argument(
'--string' , dest='input_string' , default='Hello World!! Welcome to Cryptography' , help='Hash the string' , )
parser.add_argument('--file' , dest='input_file' , help='Hash contents of a file')
UpperCamelCase_ = parser.parse_args()
UpperCamelCase_ = args.input_string
# In any case hash input should be a bytestring
if args.input_file:
with open(args.input_file , 'rb') as f:
UpperCamelCase_ = f.read()
else:
UpperCamelCase_ = bytes(__lowercase , 'utf-8')
print(SHAaHash(__lowercase).final_hash())
if __name__ == "__main__":
main()
import doctest
doctest.testmod()
| 23
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE : Tuple = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : Optional[int] = {
'''google/canine-s''': '''https://huggingface.co/google/canine-s/resolve/main/config.json''',
# See all CANINE models at https://huggingface.co/models?filter=canine
}
class __lowerCamelCase ( __lowercase ):
__UpperCamelCase = 'canine'
def __init__(self , lowerCamelCase=768 , lowerCamelCase=12 , lowerCamelCase=12 , lowerCamelCase=3_072 , lowerCamelCase="gelu" , lowerCamelCase=0.1 , lowerCamelCase=0.1 , lowerCamelCase=16_384 , lowerCamelCase=16 , lowerCamelCase=0.02 , lowerCamelCase=1e-12 , lowerCamelCase=0 , lowerCamelCase=0Xe_0_0_0 , lowerCamelCase=0Xe_0_0_1 , lowerCamelCase=4 , lowerCamelCase=4 , lowerCamelCase=8 , lowerCamelCase=16_384 , lowerCamelCase=128 , **lowerCamelCase , ):
'''simple docstring'''
super().__init__(pad_token_id=lowerCamelCase , bos_token_id=lowerCamelCase , eos_token_id=lowerCamelCase , **lowerCamelCase )
_lowerCAmelCase = max_position_embeddings
_lowerCAmelCase = hidden_size
_lowerCAmelCase = num_hidden_layers
_lowerCAmelCase = num_attention_heads
_lowerCAmelCase = intermediate_size
_lowerCAmelCase = hidden_act
_lowerCAmelCase = hidden_dropout_prob
_lowerCAmelCase = attention_probs_dropout_prob
_lowerCAmelCase = initializer_range
_lowerCAmelCase = type_vocab_size
_lowerCAmelCase = layer_norm_eps
# Character config:
_lowerCAmelCase = downsampling_rate
_lowerCAmelCase = upsampling_kernel_size
_lowerCAmelCase = num_hash_functions
_lowerCAmelCase = num_hash_buckets
_lowerCAmelCase = local_transformer_stride
| 156
| 0
|
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
__a = subprocess.check_output("""git merge-base main HEAD""".split()).decode("""utf-8""")
__a = subprocess.check_output(F'''git diff --name-only {fork_point_sha}'''.split()).decode("""utf-8""").split()
__a = """|""".join(sys.argv[1:])
__a = re.compile(rF'''^({joined_dirs}).*?\.py$''')
__a = [x for x in modified_files if regex.match(x)]
print(""" """.join(relevant_modified_files), end="""""")
| 689
|
def UpperCamelCase_ ( a_ = 6008_5147_5143 ) ->int:
try:
A =int(a_ )
except (TypeError, ValueError):
raise TypeError("Parameter n must be int or castable to int." )
if n <= 0:
raise ValueError("Parameter n must be greater than or equal to one." )
A =2
A =0
if n == 2:
return 2
while n > 2:
while n % i != 0:
i += 1
A =i
while n % i == 0:
A =n // i
i += 1
return int(a_ )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 689
| 1
|
"""simple docstring"""
import collections
import json
import os
import re
from typing import TYPE_CHECKING, List, Optional, Tuple
import numpy as np
from ...tokenization_utils_fast import PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
a : List[str] = logging.get_logger(__name__)
a : List[Any] = {'''vocab_file''': '''vocab.txt''', '''emoji_file''': '''emoji.json'''}
a : Dict = {
'''vocab_file''': {
'''abeja/gpt-neox-japanese-2.7b''': '''https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/vocab.txt''',
},
'''emoji_file''': {
'''abeja/gpt-neox-japanese-2.7b''': '''https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/emoji.json''',
},
}
a : Tuple = {
'''abeja/gpt-neox-japanese-2.7b''': 2048,
}
def _SCREAMING_SNAKE_CASE ( _lowercase : Optional[int] , _lowercase : List[Any] ) ->Tuple:
'''simple docstring'''
with open(_lowercase , "r" , encoding="utf-8" ) as f:
a : List[str] = json.loads(f.read() )
a : str = collections.OrderedDict()
a : Any = collections.OrderedDict()
a : List[Any] = collections.OrderedDict()
with open(_lowercase , "r" , encoding="utf-8" ) as f:
a : int = f.readlines()
a : Any = [[t.rstrip("\n" )] if (t == "," or "," not in t) else t.rstrip("\n" ).split("," ) for t in token]
for idx, b in enumerate(_lowercase ):
a : str = b
a : List[str] = idx
for wd in b:
a : Any = idx
return vocab, raw_vocab, ids_to_tokens, emoji
class __UpperCamelCase ( a__ ):
lowerCamelCase : Any =VOCAB_FILES_NAMES
lowerCamelCase : Optional[Any] =PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : Tuple =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : Any =["""input_ids""", """attention_mask"""]
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__="<|endoftext|>" , lowerCAmelCase__="<|endoftext|>" , lowerCAmelCase__="<|startoftext|>" , lowerCAmelCase__="<|endoftext|>" , lowerCAmelCase__=False , **lowerCAmelCase__ , ) -> Union[str, Any]:
super().__init__(
unk_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , do_clean_text=lowerCAmelCase__ , **lowerCAmelCase__ , )
if not os.path.isfile(lowerCAmelCase__ ):
raise ValueError(
f"""Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained"""
" model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`" )
if not os.path.isfile(lowerCAmelCase__ ):
raise ValueError(
f"""Can't find a emoji file at path '{emoji_file}'. To load the emoji information from a Google"""
" pretrained model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`" )
a : Optional[int] = do_clean_text
a, a, a, a : Tuple = load_vocab_and_emoji(lowerCAmelCase__ , lowerCAmelCase__ )
a : Any = SubWordJapaneseTokenizer(
vocab=self.vocab , ids_to_tokens=self.ids_to_tokens , emoji=self.emoji )
@property
def __a ( self ) -> Any:
# self.vocab contains support for character fluctuation unique to Japanese, and has a large number of vocab
return len(self.raw_vocab )
def __a ( self ) -> int:
return dict(self.raw_vocab , **self.added_tokens_encoder )
def __a ( self , lowerCAmelCase__ ) -> List[Any]:
return self.subword_tokenizer.tokenize(lowerCAmelCase__ , clean=self.do_clean_text )
def __a ( self , lowerCAmelCase__ ) -> List[Any]:
return self.vocab.get(lowerCAmelCase__ , self.vocab.get(self.unk_token ) )
def __a ( self , lowerCAmelCase__ ) -> Union[str, Any]:
return self.subword_tokenizer.convert_id_to_token(lowerCAmelCase__ )
def __a ( self , lowerCAmelCase__ ) -> int:
a : Tuple = "".join(lowerCAmelCase__ ).strip()
return out_string
def __a ( self , lowerCAmelCase__ ) -> List[int]:
a : Tuple = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ ) + [self.eos_token_id] )
if len(lowerCAmelCase__ ) > self.model_max_length:
a : List[str] = input_ids[-self.model_max_length :]
return input_ids
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> Tuple[str]:
a : List[Any] = 0
if os.path.isdir(lowerCAmelCase__ ):
a : int = os.path.join(
lowerCAmelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
a : Tuple = os.path.join(
lowerCAmelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["emoji_file"] )
else:
a : List[Any] = (
(filename_prefix + "-" if filename_prefix else "") + save_directory + VOCAB_FILES_NAMES["vocab_file"]
)
a : Tuple = (
(filename_prefix + "-" if filename_prefix else "") + save_directory + VOCAB_FILES_NAMES["emoji_file"]
)
with open(lowerCAmelCase__ , "w" , encoding="utf-8" ) as writer:
for token_index, token in self.ids_to_tokens.items():
if index != token_index:
logger.warning(
f"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."""
" Please check that the vocabulary is not corrupted!" )
a : int = token_index
writer.write(",".join(lowerCAmelCase__ ) + "\n" )
index += 1
with open(lowerCAmelCase__ , "w" , encoding="utf-8" ) as writer:
json.dump(self.emoji , lowerCAmelCase__ )
return vocab_file, emoji_file
class __UpperCamelCase ( a__ ):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Dict:
a : Union[str, Any] = vocab # same as swe
a : str = ids_to_tokens # same as bpe
a : str = emoji
a : Tuple = np.max([len(lowerCAmelCase__ ) for w in self.vocab.keys()] )
a : List[str] = re.compile(R"(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)" )
a : Optional[int] = re.compile(R"[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*" )
a : Dict = re.compile(R"[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}" )
a : Tuple = re.compile(
R"([12]\d{3}[/\-年])*(0?[1-9]|1[0-2])[/\-月]((0?[1-9]|[12][0-9]|3[01])日?)*(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*" )
a : str = re.compile(
R"(明治|大正|昭和|平成|令和|㍾|㍽|㍼|㍻|\u32ff)\d{1,2}年(0?[1-9]|1[0-2])月(0?[1-9]|[12][0-9]|3[01])日(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*" )
a : Optional[int] = re.compile(
R"((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*億)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*万)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*千)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(\(税込\)|\(税抜\)|\+tax)*" )
a : Optional[Any] = "─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿"
a : Union[str, Any] = "▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟"
a : Any = str.maketrans({k: "<BLOCK>" for k in keisen + blocks} )
def __len__( self ) -> Dict:
return len(self.ids_to_tokens )
def __a ( self , lowerCAmelCase__ ) -> Optional[int]:
a : List[Any] = self.content_repattera.sub("<URL>" , lowerCAmelCase__ )
a : Dict = self.content_repattera.sub("<EMAIL>" , lowerCAmelCase__ )
a : Tuple = self.content_repattera.sub("<TEL>" , lowerCAmelCase__ )
a : Union[str, Any] = self.content_repattera.sub("<DATE>" , lowerCAmelCase__ )
a : Optional[int] = self.content_repattera.sub("<DATE>" , lowerCAmelCase__ )
a : Any = self.content_repattera.sub("<PRICE>" , lowerCAmelCase__ )
a : Optional[int] = content.translate(self.content_transa )
while "<BLOCK><BLOCK>" in content:
a : Any = content.replace("<BLOCK><BLOCK>" , "<BLOCK>" )
return content
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__=False ) -> Optional[int]:
a : Optional[int] = text.replace(" " , "<SP>" )
a : int = text.replace(" " , "<SP>" )
a : Optional[Any] = text.replace("\r\n" , "<BR>" )
a : str = text.replace("\n" , "<BR>" )
a : str = text.replace("\r" , "<BR>" )
a : str = text.replace("\t" , "<TAB>" )
a : Any = text.replace("—" , "ー" )
a : List[str] = text.replace("−" , "ー" )
for k, v in self.emoji["emoji"].items():
if k in text:
a : int = text.replace(lowerCAmelCase__ , lowerCAmelCase__ )
if clean:
a : Optional[int] = self.clean_text(lowerCAmelCase__ )
def check_simbol(lowerCAmelCase__ ):
a : List[str] = x.encode()
if len(lowerCAmelCase__ ) == 1 and len(lowerCAmelCase__ ) == 2:
a : Any = (int(e[0] ) << 8) + int(e[1] )
if (
(c >= 0xc_2a1 and c <= 0xc_2bf)
or (c >= 0xc_780 and c <= 0xc_783)
or (c >= 0xc_ab9 and c <= 0xc_bbf)
or (c >= 0xc_c80 and c <= 0xc_da2)
):
return True
return False
def checkuae(lowerCAmelCase__ ):
a : str = x.encode()
if len(lowerCAmelCase__ ) == 1 and len(lowerCAmelCase__ ) == 3:
a : Tuple = (int(e[0] ) << 16) + (int(e[1] ) << 8) + int(e[2] )
if c >= 0xe28_080 and c <= 0xe2b_07f:
return True
return False
a : Optional[Any] = 0
a : Tuple = []
while pos < len(lowerCAmelCase__ ):
a : Optional[Any] = min(len(lowerCAmelCase__ ) , pos + self.maxlen + 1 ) if text[pos] == "<" else pos + 3
a : Union[str, Any] = [] # (token_id, token, pos)
for e in range(lowerCAmelCase__ , lowerCAmelCase__ , -1 ):
a : List[Any] = text[pos:e]
if wd in self.vocab:
if wd[0] == "<" and len(lowerCAmelCase__ ) > 2:
a : Dict = [(self.vocab[wd], wd, e)]
break
else:
candidates.append((self.vocab[wd], wd, e) )
if len(lowerCAmelCase__ ) > 0:
# the smallest token_id is adopted
a, a, a : List[Any] = sorted(lowerCAmelCase__ , key=lambda lowerCAmelCase__ : x[0] )[0]
result.append(lowerCAmelCase__ )
a : Dict = e
else:
a : Tuple = pos + 1
a : List[str] = text[pos:end]
if check_simbol(lowerCAmelCase__ ):
result.append("<KIGOU>" )
elif checkuae(lowerCAmelCase__ ):
result.append("<U2000U2BFF>" )
else:
for i in wd.encode("utf-8" ):
result.append("<|byte%d|>" % i )
a : Tuple = end
return result
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__="\n" ) -> Any:
a : Dict = []
a : List[str] = []
a : List[str] = self.ids_to_tokens[index][0]
if word[:6] == "<|byte" and word[-2:] == "|>":
byte_tokens.append(int(word[6:-2] ) )
else:
if len(lowerCAmelCase__ ) > 0:
words.append(bytearray(lowerCAmelCase__ ).decode("utf-8" , errors="replace" ) )
a : List[str] = []
if word[:7] == "<|emoji" and word[-2:] == "|>":
words.append(self.emoji["emoji_inv"][word] )
elif word == "<SP>":
words.append(" " )
elif word == "<BR>":
words.append(lowerCAmelCase__ )
elif word == "<TAB>":
words.append("\t" )
elif word == "<BLOCK>":
words.append("▀" )
elif word == "<KIGOU>":
words.append("ǀ" )
elif word == "<U2000U2BFF>":
words.append("‖" )
else:
words.append(lowerCAmelCase__ )
if len(lowerCAmelCase__ ) > 0:
words.append(bytearray(lowerCAmelCase__ ).decode("utf-8" , errors="replace" ) )
a : Dict = "".join(lowerCAmelCase__ )
return text
| 633
|
"""simple docstring"""
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import EsmForMaskedLM, EsmForSequenceClassification, EsmForTokenClassification, EsmModel
from transformers.models.esm.modeling_esm import (
ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
EsmEmbeddings,
create_position_ids_from_input_ids,
)
class __UpperCamelCase :
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=13 , lowerCAmelCase__=7 , lowerCAmelCase__=False , lowerCAmelCase__=True , lowerCAmelCase__=False , lowerCAmelCase__=True , lowerCAmelCase__=33 , lowerCAmelCase__=32 , lowerCAmelCase__=5 , lowerCAmelCase__=4 , lowerCAmelCase__=37 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=512 , lowerCAmelCase__=16 , lowerCAmelCase__=2 , lowerCAmelCase__=0.02 , lowerCAmelCase__=3 , lowerCAmelCase__=4 , lowerCAmelCase__=None , ) -> List[str]:
a : List[str] = parent
a : Optional[int] = batch_size
a : Optional[int] = seq_length
a : Tuple = is_training
a : Optional[int] = use_input_mask
a : List[Any] = use_token_type_ids
a : Tuple = use_labels
a : Union[str, Any] = vocab_size
a : str = hidden_size
a : List[Any] = num_hidden_layers
a : Dict = num_attention_heads
a : int = intermediate_size
a : str = hidden_act
a : Optional[int] = hidden_dropout_prob
a : str = attention_probs_dropout_prob
a : str = max_position_embeddings
a : List[str] = type_vocab_size
a : List[str] = type_sequence_label_size
a : Union[str, Any] = initializer_range
a : int = num_labels
a : Optional[int] = num_choices
a : Union[str, Any] = scope
def __a ( self ) -> Optional[Any]:
a : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a : Dict = None
if self.use_input_mask:
a : str = random_attention_mask([self.batch_size, self.seq_length] )
a : Union[str, Any] = None
a : int = None
a : int = None
if self.use_labels:
a : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
a : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
a : List[Any] = ids_tensor([self.batch_size] , self.num_choices )
a : str = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def __a ( self ) -> Optional[int]:
return EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> str:
a : Union[str, Any] = EsmModel(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
a : Optional[int] = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ )
a : List[str] = model(lowerCAmelCase__ )
a : Optional[Any] = model(lowerCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Tuple:
a : int = EsmForMaskedLM(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
a : Union[str, Any] = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[Any]:
a : str = self.num_labels
a : int = EsmForTokenClassification(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
a : int = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __a ( self ) -> Union[str, Any]:
a : str = self.prepare_config_and_inputs()
(
(
a
), (
a
), (
a
), (
a
), (
a
), (
a
),
) : Optional[int] = config_and_inputs
a : int = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class __UpperCamelCase ( a__ , a__ , unittest.TestCase ):
lowerCamelCase : Optional[Any] =False
lowerCamelCase : Tuple =(
(
EsmForMaskedLM,
EsmModel,
EsmForSequenceClassification,
EsmForTokenClassification,
)
if is_torch_available()
else ()
)
lowerCamelCase : Tuple =()
lowerCamelCase : Optional[int] =(
{
"""feature-extraction""": EsmModel,
"""fill-mask""": EsmForMaskedLM,
"""text-classification""": EsmForSequenceClassification,
"""token-classification""": EsmForTokenClassification,
"""zero-shot""": EsmForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase : Any =True
def __a ( self ) -> Tuple:
a : str = EsmModelTester(self )
a : Union[str, Any] = ConfigTester(self , config_class=lowerCAmelCase__ , hidden_size=37 )
def __a ( self ) -> Dict:
self.config_tester.run_common_tests()
def __a ( self ) -> Union[str, Any]:
a : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase__ )
def __a ( self ) -> List[str]:
a : Dict = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
a : List[str] = type
self.model_tester.create_and_check_model(*lowerCAmelCase__ )
def __a ( self ) -> Optional[Any]:
a : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowerCAmelCase__ )
def __a ( self ) -> Dict:
a : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCAmelCase__ )
@slow
def __a ( self ) -> Optional[Any]:
for model_name in ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a : Tuple = EsmModel.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
def __a ( self ) -> Optional[int]:
a : List[str] = self.model_tester.prepare_config_and_inputs()[0]
a : str = EsmEmbeddings(config=lowerCAmelCase__ )
a : Optional[Any] = torch.as_tensor([[12, 31, 13, model.padding_idx]] )
a : Tuple = torch.as_tensor(
[
[
0 + model.padding_idx + 1,
1 + model.padding_idx + 1,
2 + model.padding_idx + 1,
model.padding_idx,
]
] )
a : str = create_position_ids_from_input_ids(lowerCAmelCase__ , model.padding_idx )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(lowerCAmelCase__ , lowerCAmelCase__ ) ) )
def __a ( self ) -> List[Any]:
a : int = self.model_tester.prepare_config_and_inputs()[0]
a : Dict = EsmEmbeddings(config=lowerCAmelCase__ )
a : Any = torch.empty(2 , 4 , 30 )
a : str = [
0 + embeddings.padding_idx + 1,
1 + embeddings.padding_idx + 1,
2 + embeddings.padding_idx + 1,
3 + embeddings.padding_idx + 1,
]
a : Optional[Any] = torch.as_tensor([expected_single_positions, expected_single_positions] )
a : str = embeddings.create_position_ids_from_inputs_embeds(lowerCAmelCase__ )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(lowerCAmelCase__ , lowerCAmelCase__ ) ) )
@unittest.skip("Esm does not support embedding resizing" )
def __a ( self ) -> Any:
pass
@unittest.skip("Esm does not support embedding resizing" )
def __a ( self ) -> Dict:
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def __a ( self ) -> str:
pass
@require_torch
class __UpperCamelCase ( a__ ):
@slow
def __a ( self ) -> int:
with torch.no_grad():
a : str = EsmForMaskedLM.from_pretrained("facebook/esm2_t6_8M_UR50D" )
model.eval()
a : List[str] = torch.tensor([[0, 1, 2, 3, 4, 5]] )
a : Any = model(lowerCAmelCase__ )[0]
a : Union[str, Any] = 33
a : str = torch.Size((1, 6, vocab_size) )
self.assertEqual(output.shape , lowerCAmelCase__ )
a : Optional[int] = torch.tensor(
[[[8.9_215, -10.5_898, -6.4_671], [-6.3_967, -13.9_114, -1.1_212], [-7.7_812, -13.9_516, -3.7_406]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , lowerCAmelCase__ , atol=1E-4 ) )
@slow
def __a ( self ) -> str:
with torch.no_grad():
a : Optional[int] = EsmModel.from_pretrained("facebook/esm2_t6_8M_UR50D" )
model.eval()
a : str = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
a : Tuple = model(lowerCAmelCase__ )[0]
# compare the actual values for a slice.
a : Union[str, Any] = torch.tensor(
[[[0.1_444, 0.5_413, 0.3_248], [0.3_034, 0.0_053, 0.3_108], [0.3_228, -0.2_499, 0.3_415]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , lowerCAmelCase__ , atol=1E-4 ) )
| 633
| 1
|
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
A: List[str] = logging.get_logger(__name__)
def _UpperCAmelCase ( a : str , a : str ) -> Optional[Any]:
"""simple docstring"""
lowercase_ : Dict = b.T
lowercase_ : str = np.sum(np.square(a ) , axis=1 )
lowercase_ : Optional[Any] = np.sum(np.square(a ) , axis=0 )
lowercase_ : Optional[int] = np.matmul(a , a )
lowercase_ : Any = aa[:, None] - 2 * ab + ba[None, :]
return d
def _UpperCAmelCase ( a : Dict , a : Tuple ) -> Union[str, Any]:
"""simple docstring"""
lowercase_ : List[Any] = x.reshape(-1 , 3 )
lowercase_ : Union[str, Any] = squared_euclidean_distance(a , a )
return np.argmin(a , axis=1 )
class __magic_name__ ( UpperCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = ['pixel_values']
def __init__( self , _lowercase = None , _lowercase = True , _lowercase = None , _lowercase = PILImageResampling.BILINEAR , _lowercase = True , _lowercase = True , **_lowercase , ) -> None:
super().__init__(**_lowercase )
lowercase_ : Optional[Any] = size if size is not None else {'height': 256, 'width': 256}
lowercase_ : Optional[Any] = get_size_dict(_lowercase )
lowercase_ : List[str] = np.array(_lowercase ) if clusters is not None else None
lowercase_ : Dict = do_resize
lowercase_ : Dict = size
lowercase_ : Optional[int] = resample
lowercase_ : Dict = do_normalize
lowercase_ : Tuple = do_color_quantize
def lowerCamelCase__ ( self , _lowercase , _lowercase , _lowercase = PILImageResampling.BILINEAR , _lowercase = None , **_lowercase , ) -> np.ndarray:
lowercase_ : List[Any] = get_size_dict(_lowercase )
if "height" not in size or "width" not in size:
raise ValueError(f"Size dictionary must contain both height and width keys. Got {size.keys()}" )
return resize(
_lowercase , size=(size['height'], size['width']) , resample=_lowercase , data_format=_lowercase , **_lowercase )
def lowerCamelCase__ ( self , _lowercase , _lowercase = None , ) -> np.ndarray:
lowercase_ : Optional[Any] = rescale(image=_lowercase , scale=1 / 127.5 , data_format=_lowercase )
lowercase_ : Optional[Any] = image - 1
return image
def lowerCamelCase__ ( self , _lowercase , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = ChannelDimension.FIRST , **_lowercase , ) -> PIL.Image.Image:
lowercase_ : Optional[Any] = do_resize if do_resize is not None else self.do_resize
lowercase_ : Optional[Any] = size if size is not None else self.size
lowercase_ : Any = get_size_dict(_lowercase )
lowercase_ : Any = resample if resample is not None else self.resample
lowercase_ : Tuple = do_normalize if do_normalize is not None else self.do_normalize
lowercase_ : Tuple = do_color_quantize if do_color_quantize is not None else self.do_color_quantize
lowercase_ : Union[str, Any] = clusters if clusters is not None else self.clusters
lowercase_ : Dict = np.array(_lowercase )
lowercase_ : str = make_list_of_images(_lowercase )
if not valid_images(_lowercase ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_color_quantize and clusters is None:
raise ValueError('Clusters must be specified if do_color_quantize is True.' )
# All transformations expect numpy arrays.
lowercase_ : Tuple = [to_numpy_array(_lowercase ) for image in images]
if do_resize:
lowercase_ : Dict = [self.resize(image=_lowercase , size=_lowercase , resample=_lowercase ) for image in images]
if do_normalize:
lowercase_ : str = [self.normalize(image=_lowercase ) for image in images]
if do_color_quantize:
lowercase_ : Tuple = [to_channel_dimension_format(_lowercase , ChannelDimension.LAST ) for image in images]
# color quantize from (batch_size, height, width, 3) to (batch_size, height, width)
lowercase_ : List[Any] = np.array(_lowercase )
lowercase_ : Optional[int] = color_quantize(_lowercase , _lowercase ).reshape(images.shape[:-1] )
# flatten to (batch_size, height*width)
lowercase_ : Optional[int] = images.shape[0]
lowercase_ : Optional[Any] = images.reshape(_lowercase , -1 )
# We need to convert back to a list of images to keep consistent behaviour across processors.
lowercase_ : List[str] = list(_lowercase )
else:
lowercase_ : List[str] = [to_channel_dimension_format(_lowercase , _lowercase ) for image in images]
lowercase_ : int = {'input_ids': images}
return BatchFeature(data=_lowercase , tensor_type=_lowercase )
| 702
|
'''simple docstring'''
class __magic_name__ :
"""simple docstring"""
def __init__( self , _lowercase ) -> Union[str, Any]:
lowercase_ : Dict = n
lowercase_ : Dict = [None] * self.n
lowercase_ : Tuple = 0 # index of the first element
lowercase_ : List[Any] = 0
lowercase_ : List[Any] = 0
def __len__( self ) -> int:
return self.size
def lowerCamelCase__ ( self ) -> bool:
return self.size == 0
def lowerCamelCase__ ( self ) -> List[Any]:
return False if self.is_empty() else self.array[self.front]
def lowerCamelCase__ ( self , _lowercase ) -> Any:
if self.size >= self.n:
raise Exception('QUEUE IS FULL' )
lowercase_ : Tuple = data
lowercase_ : List[Any] = (self.rear + 1) % self.n
self.size += 1
return self
def lowerCamelCase__ ( self ) -> Any:
if self.size == 0:
raise Exception('UNDERFLOW' )
lowercase_ : Dict = self.array[self.front]
lowercase_ : Tuple = None
lowercase_ : int = (self.front + 1) % self.n
self.size -= 1
return temp
| 7
| 0
|
"""simple docstring"""
import os
import sys
import unittest
__UpperCamelCase : Dict = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, '''utils'''))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
__UpperCamelCase : Tuple = os.path.join(git_repo_path, '''src''', '''diffusers''')
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def __lowerCAmelCase ( self : Any ):
lowerCAmelCase__ : Any = find_backend(''' if not is_torch_available():''' )
self.assertEqual(lowercase_ ,'''torch''' )
# backend_with_underscore = find_backend(" if not is_tensorflow_text_available():")
# self.assertEqual(backend_with_underscore, "tensorflow_text")
lowerCAmelCase__ : Optional[Any] = find_backend(''' if not (is_torch_available() and is_transformers_available()):''' )
self.assertEqual(lowercase_ ,'''torch_and_transformers''' )
# double_backend_with_underscore = find_backend(
# " if not (is_sentencepiece_available() and is_tensorflow_text_available()):"
# )
# self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text")
lowerCAmelCase__ : List[Any] = find_backend(
''' if not (is_torch_available() and is_transformers_available() and is_onnx_available()):''' )
self.assertEqual(lowercase_ ,'''torch_and_transformers_and_onnx''' )
def __lowerCAmelCase ( self : Optional[Any] ):
lowerCAmelCase__ : List[str] = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn('''torch''' ,lowercase_ )
self.assertIn('''torch_and_transformers''' ,lowercase_ )
self.assertIn('''flax_and_transformers''' ,lowercase_ )
self.assertIn('''torch_and_transformers_and_onnx''' ,lowercase_ )
# Likewise, we can't assert on the exact content of a key
self.assertIn('''UNet2DModel''' ,objects['''torch'''] )
self.assertIn('''FlaxUNet2DConditionModel''' ,objects['''flax'''] )
self.assertIn('''StableDiffusionPipeline''' ,objects['''torch_and_transformers'''] )
self.assertIn('''FlaxStableDiffusionPipeline''' ,objects['''flax_and_transformers'''] )
self.assertIn('''LMSDiscreteScheduler''' ,objects['''torch_and_scipy'''] )
self.assertIn('''OnnxStableDiffusionPipeline''' ,objects['''torch_and_transformers_and_onnx'''] )
def __lowerCAmelCase ( self : int ):
lowerCAmelCase__ : List[Any] = create_dummy_object('''CONSTANT''' ,'''\'torch\'''' )
self.assertEqual(lowercase_ ,'''\nCONSTANT = None\n''' )
lowerCAmelCase__ : Optional[Any] = create_dummy_object('''function''' ,'''\'torch\'''' )
self.assertEqual(
lowercase_ ,'''\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n''' )
lowerCAmelCase__ : Tuple = '''
class FakeClass(metaclass=DummyObject):
_backends = \'torch\'
def __init__(self, *args, **kwargs):
requires_backends(self, \'torch\')
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, \'torch\')
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, \'torch\')
'''
lowerCAmelCase__ : Union[str, Any] = create_dummy_object('''FakeClass''' ,'''\'torch\'''' )
self.assertEqual(lowercase_ ,lowercase_ )
def __lowerCAmelCase ( self : Optional[Any] ):
lowerCAmelCase__ : Any = '''# This file is autogenerated by the command `make fix-copies`, do not edit.
from ..utils import DummyObject, requires_backends
CONSTANT = None
def function(*args, **kwargs):
requires_backends(function, ["torch"])
class FakeClass(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
'''
lowerCAmelCase__ : Optional[Any] = create_dummy_files({'''torch''': ['''CONSTANT''', '''function''', '''FakeClass''']} )
self.assertEqual(dummy_files['''torch'''] ,lowercase_ )
| 450
|
"""simple docstring"""
import os
from argparse import ArgumentParser
from typing import List
import torch.utils.data
from datasets import Dataset, IterableDataset
from datasets.distributed import split_dataset_by_node
__UpperCamelCase : str = 4
__UpperCamelCase : List[str] = 3
class SCREAMING_SNAKE_CASE ( a_ ):
"""simple docstring"""
pass
def __SCREAMING_SNAKE_CASE ( A_ ):
for shard in shards:
for i in range(A_ ):
yield {"i": i, "shard": shard}
def __SCREAMING_SNAKE_CASE ( ):
lowerCAmelCase__ : List[str] = int(os.environ['''RANK'''] )
lowerCAmelCase__ : Union[str, Any] = int(os.environ['''WORLD_SIZE'''] )
lowerCAmelCase__ : Tuple = ArgumentParser()
parser.add_argument('''--streaming''' , type=A_ )
parser.add_argument('''--local_rank''' , type=A_ )
parser.add_argument('''--num_workers''' , type=A_ , default=0 )
lowerCAmelCase__ : Tuple = parser.parse_args()
lowerCAmelCase__ : Tuple = args.streaming
lowerCAmelCase__ : Any = args.num_workers
lowerCAmelCase__ : Dict = {'''shards''': [f'shard_{shard_idx}' for shard_idx in range(A_ )]}
lowerCAmelCase__ : Optional[int] = IterableDataset.from_generator(A_ , gen_kwargs=A_ )
if not streaming:
lowerCAmelCase__ : Dict = Dataset.from_list(list(A_ ) )
lowerCAmelCase__ : Optional[Any] = split_dataset_by_node(A_ , rank=A_ , world_size=A_ )
lowerCAmelCase__ : List[Any] = torch.utils.data.DataLoader(A_ , num_workers=A_ )
lowerCAmelCase__ : Optional[int] = NUM_SHARDS * NUM_ITEMS_PER_SHARD
lowerCAmelCase__ : List[str] = full_size // world_size
expected_local_size += int(rank < (full_size % world_size) )
lowerCAmelCase__ : int = sum(1 for _ in dataloader )
if local_size != expected_local_size:
raise FailedTestError(f'local_size {local_size} != expected_local_size {expected_local_size}' )
if __name__ == "__main__":
main()
| 450
| 1
|
from __future__ import annotations
import unittest
from transformers import LEDConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFLEDForConditionalGeneration, TFLEDModel
@require_tf
class lowerCAmelCase_ :
lowerCamelCase_ = LEDConfig
lowerCamelCase_ = {}
lowerCamelCase_ = 'gelu'
def __init__( self : Union[str, Any] , __A : Optional[Any] , __A : str=13 , __A : Any=7 , __A : Union[str, Any]=True , __A : Dict=False , __A : Any=99 , __A : str=32 , __A : Union[str, Any]=2 , __A : Optional[Any]=4 , __A : List[str]=37 , __A : Any=0.1 , __A : Dict=0.1 , __A : Any=20 , __A : List[str]=2 , __A : Any=1 , __A : int=0 , __A : Optional[int]=4 , ) ->Optional[int]:
"""simple docstring"""
a__ :Optional[int] = parent
a__ :str = batch_size
a__ :Dict = seq_length
a__ :Optional[int] = is_training
a__ :Tuple = use_labels
a__ :Dict = vocab_size
a__ :Any = hidden_size
a__ :Tuple = num_hidden_layers
a__ :Union[str, Any] = num_attention_heads
a__ :str = intermediate_size
a__ :str = hidden_dropout_prob
a__ :Dict = attention_probs_dropout_prob
a__ :Any = max_position_embeddings
a__ :Optional[int] = eos_token_id
a__ :Optional[int] = pad_token_id
a__ :str = bos_token_id
a__ :Optional[Any] = attention_window
# `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size
# [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention
# returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1]
# because its local attention only attends to `self.attention_window` and one before and one after
a__ :Optional[Any] = self.attention_window + 2
# because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for
# the `test_attention_outputs` and `test_hidden_states_output` tests
a__ :int = (
self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window
)
def _snake_case ( self : Union[str, Any] ) ->Tuple:
"""simple docstring"""
a__ :Optional[Any] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
a__ :Optional[int] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
a__ :Tuple = tf.concat([input_ids, eos_tensor] , axis=1 )
a__ :str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a__ :List[Any] = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , attention_window=self.attention_window , **self.config_updates , )
a__ :Any = prepare_led_inputs_dict(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
a__ :List[str] = tf.concat(
[tf.zeros_like(_UpperCAmelCase )[:, :-1], tf.ones_like(_UpperCAmelCase )[:, -1:]] , axis=-1 , )
a__ :Dict = global_attention_mask
return config, inputs_dict
def _snake_case ( self : Optional[int] , __A : int , __A : Any ) ->str:
"""simple docstring"""
a__ :Dict = TFLEDModel(config=_UpperCAmelCase ).get_decoder()
a__ :Optional[int] = inputs_dict["input_ids"]
a__ :Optional[Any] = input_ids[:1, :]
a__ :Optional[int] = inputs_dict["attention_mask"][:1, :]
a__ :Tuple = 1
# first forward pass
a__ :List[str] = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , use_cache=_UpperCAmelCase )
a__ , a__ :List[str] = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
a__ :Dict = ids_tensor((self.batch_size, 3) , config.vocab_size )
a__ :List[Any] = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
a__ :List[Any] = tf.concat([input_ids, next_tokens] , axis=-1 )
a__ :Dict = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
a__ :Tuple = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase )[0]
a__ :Optional[int] = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , past_key_values=_UpperCAmelCase )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
a__ :Optional[int] = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
a__ :int = output_from_no_past[:, -3:, random_slice_idx]
a__ :Tuple = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(_UpperCAmelCase , _UpperCAmelCase , rtol=1E-3 )
def lowerCamelCase__ ( a : List[Any] , a : List[str] , a : Tuple , a : Union[str, Any]=None , a : Union[str, Any]=None , a : Any=None , a : Union[str, Any]=None , ) -> str:
"""simple docstring"""
if attention_mask is None:
a__ :Optional[int] = tf.cast(tf.math.not_equal(lowerCAmelCase__ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
a__ :List[Any] = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
a__ :Tuple = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
a__ :Optional[int] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"attention_mask": attention_mask,
"decoder_input_ids": decoder_input_ids,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
}
@require_tf
class lowerCAmelCase_ ( _a ,_a ,unittest.TestCase):
lowerCamelCase_ = (TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else ()
lowerCamelCase_ = (TFLEDForConditionalGeneration,) if is_tf_available() else ()
lowerCamelCase_ = (
{
'conversational': TFLEDForConditionalGeneration,
'feature-extraction': TFLEDModel,
'summarization': TFLEDForConditionalGeneration,
'text2text-generation': TFLEDForConditionalGeneration,
'translation': TFLEDForConditionalGeneration,
}
if is_tf_available()
else {}
)
lowerCamelCase_ = True
lowerCamelCase_ = False
lowerCamelCase_ = False
lowerCamelCase_ = False
def _snake_case ( self : str ) ->Optional[Any]:
"""simple docstring"""
a__ :str = TFLEDModelTester(self )
a__ :List[Any] = ConfigTester(self , config_class=_UpperCAmelCase )
def _snake_case ( self : Tuple ) ->str:
"""simple docstring"""
self.config_tester.run_common_tests()
def _snake_case ( self : Union[str, Any] ) ->Any:
"""simple docstring"""
a__ :Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*_UpperCAmelCase )
def _snake_case ( self : Any ) ->Union[str, Any]:
"""simple docstring"""
a__ , a__ :Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
a__ :Dict = tf.zeros_like(inputs_dict["attention_mask"] )
a__ :List[Any] = 2
a__ :Optional[Any] = tf.where(
tf.range(self.model_tester.seq_length )[None, :] < num_global_attn_indices , 1 , inputs_dict["global_attention_mask"] , )
a__ :int = True
a__ :Optional[int] = self.model_tester.seq_length
a__ :List[Any] = self.model_tester.encoder_seq_length
def check_decoder_attentions_output(__A : Optional[int] ):
a__ :str = outputs.decoder_attentions
self.assertEqual(len(_UpperCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
def check_encoder_attentions_output(__A : List[str] ):
a__ :int = [t.numpy() for t in outputs.encoder_attentions]
a__ :Union[str, Any] = [t.numpy() for t in outputs.encoder_global_attentions]
self.assertEqual(len(_UpperCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertEqual(len(_UpperCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
self.assertListEqual(
list(global_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices] , )
for model_class in self.all_model_classes:
a__ :List[Any] = True
a__ :str = False
a__ :str = False
a__ :Optional[Any] = model_class(_UpperCAmelCase )
a__ :Tuple = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
a__ :str = len(_UpperCAmelCase )
self.assertEqual(config.output_hidden_states , _UpperCAmelCase )
check_encoder_attentions_output(_UpperCAmelCase )
if self.is_encoder_decoder:
a__ :Optional[int] = model_class(_UpperCAmelCase )
a__ :str = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
self.assertEqual(config.output_hidden_states , _UpperCAmelCase )
check_decoder_attentions_output(_UpperCAmelCase )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
a__ :Optional[Any] = True
a__ :List[str] = model_class(_UpperCAmelCase )
a__ :List[Any] = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
self.assertEqual(config.output_hidden_states , _UpperCAmelCase )
check_encoder_attentions_output(_UpperCAmelCase )
# Check attention is always last and order is fine
a__ :int = True
a__ :List[Any] = True
a__ :Any = model_class(_UpperCAmelCase )
a__ :Tuple = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(_UpperCAmelCase ) )
self.assertEqual(model.config.output_hidden_states , _UpperCAmelCase )
check_encoder_attentions_output(_UpperCAmelCase )
@unittest.skip("LED keeps using potentially symbolic tensors in conditionals and breaks tracing." )
def _snake_case ( self : Dict ) ->Union[str, Any]:
"""simple docstring"""
pass
def _snake_case ( self : Optional[int] ) ->Union[str, Any]:
"""simple docstring"""
pass
def lowerCamelCase__ ( a : int ) -> str:
"""simple docstring"""
return tf.constant(lowerCAmelCase__ , dtype=tf.intaa )
snake_case__ = 1e-4
@slow
@require_tf
class lowerCAmelCase_ ( unittest.TestCase):
def _snake_case ( self : Union[str, Any] ) ->str:
"""simple docstring"""
a__ :List[str] = TFLEDForConditionalGeneration.from_pretrained("allenai/led-base-16384" ).led
# change to intended input here
a__ :Tuple = _long_tensor([512 * [0, 31414, 232, 328, 740, 1140, 12695, 69]] )
a__ :Union[str, Any] = _long_tensor([128 * [0, 31414, 232, 328, 740, 1140, 12695, 69]] )
a__ :List[Any] = prepare_led_inputs_dict(model.config , _UpperCAmelCase , _UpperCAmelCase )
a__ :List[str] = model(**_UpperCAmelCase )[0]
a__ :List[str] = (1, 1024, 768)
self.assertEqual(output.shape , _UpperCAmelCase )
# change to expected output here
a__ :List[Any] = tf.convert_to_tensor(
[[2.3_050, 2.8_279, 0.6_531], [-1.8_457, -0.1_455, -3.5_661], [-1.0_186, 0.4_586, -2.2_043]] , )
tf.debugging.assert_near(output[:, :3, :3] , _UpperCAmelCase , atol=1E-3 )
def _snake_case ( self : str ) ->List[Any]:
"""simple docstring"""
a__ :Optional[Any] = TFLEDForConditionalGeneration.from_pretrained("allenai/led-base-16384" )
# change to intended input here
a__ :Tuple = _long_tensor([512 * [0, 31414, 232, 328, 740, 1140, 12695, 69]] )
a__ :Optional[Any] = _long_tensor([128 * [0, 31414, 232, 328, 740, 1140, 12695, 69]] )
a__ :str = prepare_led_inputs_dict(model.config , _UpperCAmelCase , _UpperCAmelCase )
a__ :List[str] = model(**_UpperCAmelCase )[0]
a__ :List[Any] = (1, 1024, model.config.vocab_size)
self.assertEqual(output.shape , _UpperCAmelCase )
# change to expected output here
a__ :Tuple = tf.convert_to_tensor(
[[33.6_507, 6.4_572, 16.8_089], [5.8_739, -2.4_238, 11.2_902], [-3.2_139, -4.3_149, 4.2_783]] , )
tf.debugging.assert_near(output[:, :3, :3] , _UpperCAmelCase , atol=1E-3 , rtol=1E-3 )
| 714
|
import numpy as np
def lowerCamelCase__ ( a : np.ndarray , a : np.ndarray , a : float = 1e-12 , a : int = 100 , ) -> tuple[float, np.ndarray]:
"""simple docstring"""
assert np.shape(a )[0] == np.shape(a )[1]
# Ensure proper dimensionality.
assert np.shape(a )[0] == np.shape(a )[0]
# Ensure inputs are either both complex or both real
assert np.iscomplexobj(a ) == np.iscomplexobj(a )
a__ :List[str] = np.iscomplexobj(a )
if is_complex:
# Ensure complex input_matrix is Hermitian
assert np.array_equal(a , input_matrix.conj().T )
# Set convergence to False. Will define convergence when we exceed max_iterations
# or when we have small changes from one iteration to next.
a__ :List[str] = False
a__ :str = 0
a__ :Dict = 0
a__ :Tuple = 1e12
while not convergence:
# Multiple matrix by the vector.
a__ :Tuple = np.dot(a , a )
# Normalize the resulting output vector.
a__ :List[Any] = w / np.linalg.norm(a )
# Find rayleigh quotient
# (faster than usual b/c we know vector is normalized already)
a__ :List[str] = vector.conj().T if is_complex else vector.T
a__ :Any = np.dot(a , np.dot(a , a ) )
# Check convergence.
a__ :Optional[Any] = np.abs(lambda_ - lambda_previous ) / lambda_
iterations += 1
if error <= error_tol or iterations >= max_iterations:
a__ :List[str] = True
a__ :Optional[int] = lambda_
if is_complex:
a__ :Union[str, Any] = np.real(lambda_ )
return lambda_, vector
def lowerCamelCase__ ( ) -> None:
"""simple docstring"""
a__ :List[str] = np.array([[41, 4, 20], [4, 26, 30], [20, 30, 50]] )
a__ :Dict = np.array([41, 4, 20] )
a__ :Union[str, Any] = real_input_matrix.astype(np.complexaaa )
a__ :Optional[Any] = np.triu(1j * complex_input_matrix , 1 )
complex_input_matrix += imag_matrix
complex_input_matrix += -1 * imag_matrix.T
a__ :Union[str, Any] = np.array([41, 4, 20] ).astype(np.complexaaa )
for problem_type in ["real", "complex"]:
if problem_type == "real":
a__ :Union[str, Any] = real_input_matrix
a__ :int = real_vector
elif problem_type == "complex":
a__ :Optional[Any] = complex_input_matrix
a__ :List[Any] = complex_vector
# Our implementation.
a__ , a__ :Any = power_iteration(a , a )
# Numpy implementation.
# Get eigenvalues and eigenvectors using built-in numpy
# eigh (eigh used for symmetric or hermetian matrices).
a__ , a__ :int = np.linalg.eigh(a )
# Last eigenvalue is the maximum one.
a__ :int = eigen_values[-1]
# Last column in this matrix is eigenvector corresponding to largest eigenvalue.
a__ :Optional[Any] = eigen_vectors[:, -1]
# Check our implementation and numpy gives close answers.
assert np.abs(eigen_value - eigen_value_max ) <= 1e-6
# Take absolute values element wise of each eigenvector.
# as they are only unique to a minus sign.
assert np.linalg.norm(np.abs(a ) - np.abs(a ) ) <= 1e-6
if __name__ == "__main__":
import doctest
doctest.testmod()
test_power_iteration()
| 373
| 0
|
"""simple docstring"""
from typing import Optional
from urllib.parse import quote
import huggingface_hub as hfh
from packaging import version
def _A ( __lowercase , __lowercase , __lowercase = None ):
"""simple docstring"""
if version.parse(hfh.__version__ ).release < version.parse("""0.11.0""" ).release:
# old versions of hfh don't url-encode the file path
lowerCamelCase__ = quote(__lowercase )
return hfh.hf_hub_url(__lowercase , __lowercase , repo_type="""dataset""" , revision=__lowercase )
| 129
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__magic_name__ = {"""configuration_reformer""": ["""REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ReformerConfig"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ["""ReformerTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ["""ReformerTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
"""REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ReformerAttention""",
"""ReformerForMaskedLM""",
"""ReformerForQuestionAnswering""",
"""ReformerForSequenceClassification""",
"""ReformerLayer""",
"""ReformerModel""",
"""ReformerModelWithLMHead""",
"""ReformerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer import ReformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer_fast import ReformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_reformer import (
REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ReformerAttention,
ReformerForMaskedLM,
ReformerForQuestionAnswering,
ReformerForSequenceClassification,
ReformerLayer,
ReformerModel,
ReformerModelWithLMHead,
ReformerPreTrainedModel,
)
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 129
| 1
|
"""simple docstring"""
def UpperCAmelCase__ (snake_case__ : list[list] ):
"""simple docstring"""
_snake_case : Optional[int] = current_set.copy()
for row_index, row in enumerate(_lowerCamelCase ):
_snake_case : Any = row[0]
for column_index, column in enumerate(_lowerCamelCase ):
if magnitude == 0:
_snake_case : str = column
continue
_snake_case : List[Any] = column / magnitude
# Subtract to cancel term
_snake_case : List[Any] = current_set[0]
_snake_case : int = [first_row]
_snake_case : Optional[int] = current_set[1::]
for row in current_set:
_snake_case : List[Any] = []
# If first term is 0, it is already in form we want, so we preserve it
if row[0] == 0:
final_set.append(_lowerCamelCase )
continue
for column_index in range(len(_lowerCamelCase ) ):
temp_row.append(first_row[column_index] - row[column_index] )
final_set.append(_lowerCamelCase )
# Create next recursion iteration set
if len(final_set[0] ) != 3:
_snake_case : str = final_set[0]
_snake_case : Any = []
_snake_case : int = []
for row in final_set[1::]:
current_first_column.append(row[0] )
next_iteration.append(row[1::] )
_snake_case : Dict = simplify(_lowerCamelCase )
for i in range(len(_lowerCamelCase ) ):
resultant[i].insert(0 , current_first_column[i] )
resultant.insert(0 , _lowerCamelCase )
_snake_case : Dict = resultant
return final_set
def UpperCAmelCase__ (snake_case__ : list[list] ):
"""simple docstring"""
if len(_lowerCamelCase ) == 0:
raise IndexError("""solve_simultaneous() requires n lists of length n+1""" )
_snake_case : Dict = len(_lowerCamelCase ) + 1
if any(len(_lowerCamelCase ) != _length for item in equations ):
raise IndexError("""solve_simultaneous() requires n lists of length n+1""" )
for row in equations:
if any(not isinstance(_lowerCamelCase , (int, float) ) for column in row ):
raise ValueError("""solve_simultaneous() requires lists of integers""" )
if len(_lowerCamelCase ) == 1:
return [equations[0][-1] / equations[0][0]]
_snake_case : Any = equations.copy()
if any(0 in row for row in data_set ):
_snake_case : Optional[int] = data_set.copy()
_snake_case : Union[str, Any] = []
for row_index, row in enumerate(_lowerCamelCase ):
if 0 not in row:
_snake_case : List[Any] = data_set.pop(_lowerCamelCase )
break
if not full_row:
raise ValueError("""solve_simultaneous() requires at least 1 full equation""" )
data_set.insert(0 , _lowerCamelCase )
_snake_case : int = data_set.copy()
_snake_case : List[str] = simplify(_lowerCamelCase )
_snake_case : List[Any] = simplified[::-1]
_snake_case : list = []
for row in simplified:
_snake_case : int = row[-1]
if not solutions:
if row[-2] == 0:
solutions.append(0 )
continue
solutions.append(current_solution / row[-2] )
continue
_snake_case : Optional[Any] = row.copy()[: len(_lowerCamelCase ) - 1 :]
while temp_row[0] == 0:
temp_row.pop(0 )
if len(_lowerCamelCase ) == 0:
solutions.append(0 )
continue
_snake_case : int = temp_row[1::]
_snake_case : Tuple = temp_row[::-1]
for column_index, column in enumerate(_lowerCamelCase ):
current_solution -= column * solutions[column_index]
solutions.append(_lowerCamelCase )
_snake_case : str = []
for item in solutions:
final.append(float(round(_lowerCamelCase , 5 ) ) )
return final[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
A_ = [
[2, 1, 1, 1, 1, 4],
[1, 2, 1, 1, 1, 5],
[1, 1, 2, 1, 1, 6],
[1, 1, 1, 2, 1, 7],
[1, 1, 1, 1, 2, 8],
]
print(solve_simultaneous(eq))
print(solve_simultaneous([[4, 2]]))
| 705
|
"""simple docstring"""
import os
import sys
import unittest
A_ = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, '''utils'''))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
A_ = os.path.join(git_repo_path, '''src''', '''diffusers''')
class lowercase( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
_snake_case : Optional[int] = find_backend(""" if not is_torch_available():""" )
self.assertEqual(a_, """torch""" )
# backend_with_underscore = find_backend(" if not is_tensorflow_text_available():")
# self.assertEqual(backend_with_underscore, "tensorflow_text")
_snake_case : Any = find_backend(""" if not (is_torch_available() and is_transformers_available()):""" )
self.assertEqual(a_, """torch_and_transformers""" )
# double_backend_with_underscore = find_backend(
# " if not (is_sentencepiece_available() and is_tensorflow_text_available()):"
# )
# self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text")
_snake_case : Union[str, Any] = find_backend(
""" if not (is_torch_available() and is_transformers_available() and is_onnx_available()):""" )
self.assertEqual(a_, """torch_and_transformers_and_onnx""" )
def UpperCamelCase_ ( self: List[Any] ):
'''simple docstring'''
_snake_case : Dict = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn("""torch""", a_ )
self.assertIn("""torch_and_transformers""", a_ )
self.assertIn("""flax_and_transformers""", a_ )
self.assertIn("""torch_and_transformers_and_onnx""", a_ )
# Likewise, we can't assert on the exact content of a key
self.assertIn("""UNet2DModel""", objects["""torch"""] )
self.assertIn("""FlaxUNet2DConditionModel""", objects["""flax"""] )
self.assertIn("""StableDiffusionPipeline""", objects["""torch_and_transformers"""] )
self.assertIn("""FlaxStableDiffusionPipeline""", objects["""flax_and_transformers"""] )
self.assertIn("""LMSDiscreteScheduler""", objects["""torch_and_scipy"""] )
self.assertIn("""OnnxStableDiffusionPipeline""", objects["""torch_and_transformers_and_onnx"""] )
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
_snake_case : Optional[Any] = create_dummy_object("""CONSTANT""", """'torch'""" )
self.assertEqual(a_, """\nCONSTANT = None\n""" )
_snake_case : Optional[int] = create_dummy_object("""function""", """'torch'""" )
self.assertEqual(
a_, """\ndef function(*args, **kwargs):\n requires_backends(function, 'torch')\n""" )
_snake_case : List[Any] = """
class FakeClass(metaclass=DummyObject):
_backends = 'torch'
def __init__(self, *args, **kwargs):
requires_backends(self, 'torch')
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, 'torch')
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, 'torch')
"""
_snake_case : Union[str, Any] = create_dummy_object("""FakeClass""", """'torch'""" )
self.assertEqual(a_, a_ )
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
_snake_case : Union[str, Any] = """# This file is autogenerated by the command `make fix-copies`, do not edit.
from ..utils import DummyObject, requires_backends
CONSTANT = None
def function(*args, **kwargs):
requires_backends(function, [\"torch\"])
class FakeClass(metaclass=DummyObject):
_backends = [\"torch\"]
def __init__(self, *args, **kwargs):
requires_backends(self, [\"torch\"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, [\"torch\"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, [\"torch\"])
"""
_snake_case : List[Any] = create_dummy_files({"""torch""": ["""CONSTANT""", """function""", """FakeClass"""]} )
self.assertEqual(dummy_files["""torch"""], a_ )
| 28
| 0
|
import json
import os
import tempfile
import datasets
from utils import generate_example_dataset, get_duration
__UpperCamelCase : List[Any] = 50000
__UpperCamelCase : str = 5000
__UpperCamelCase , __UpperCamelCase : Tuple = os.path.split(__file__)
__UpperCamelCase : Union[str, Any] = os.path.join(RESULTS_BASEPATH, 'results', RESULTS_FILENAME.replace('.py', '.json'))
@get_duration
def _UpperCAmelCase ( UpperCAmelCase : Optional[Any] , UpperCAmelCase : Optional[Any] ):
"""simple docstring"""
for i in range(A__ ):
__lowerCamelCase : List[Any] = dataset[i]
@get_duration
def _UpperCAmelCase ( UpperCAmelCase : Any , UpperCAmelCase : Optional[Any] , UpperCAmelCase : int ):
"""simple docstring"""
for i in range(0 , len(A__ ) , A__ ):
__lowerCamelCase : int = dataset[i : i + batch_size]
@get_duration
def _UpperCAmelCase ( UpperCAmelCase : Tuple , UpperCAmelCase : Tuple , UpperCAmelCase : Tuple ):
"""simple docstring"""
with dataset.formatted_as(type=A__ ):
for i in range(A__ ):
__lowerCamelCase : Optional[int] = dataset[i]
@get_duration
def _UpperCAmelCase ( UpperCAmelCase : Union[str, Any] , UpperCAmelCase : List[Any] , UpperCAmelCase : List[Any] , UpperCAmelCase : Optional[int] ):
"""simple docstring"""
with dataset.formatted_as(type=A__ ):
for i in range(0 , A__ , A__ ):
__lowerCamelCase : Tuple = dataset[i : i + batch_size]
def _UpperCAmelCase ( ):
"""simple docstring"""
__lowerCamelCase : Tuple = {"""num examples""": SPEED_TEST_N_EXAMPLES}
__lowerCamelCase : List[str] = [
(read, {"""length""": SMALL_TEST}),
(read, {"""length""": SPEED_TEST_N_EXAMPLES}),
(read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 10}),
(read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 100}),
(read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 1_000}),
(read_formatted, {"""type""": """numpy""", """length""": SMALL_TEST}),
(read_formatted, {"""type""": """pandas""", """length""": SMALL_TEST}),
(read_formatted, {"""type""": """torch""", """length""": SMALL_TEST}),
(read_formatted, {"""type""": """tensorflow""", """length""": SMALL_TEST}),
(read_formatted_batch, {"""type""": """numpy""", """length""": SMALL_TEST, """batch_size""": 10}),
(read_formatted_batch, {"""type""": """numpy""", """length""": SMALL_TEST, """batch_size""": 1_000}),
]
__lowerCamelCase : Dict = [
(read, {"""length""": SMALL_TEST}),
(read, {"""length""": SPEED_TEST_N_EXAMPLES}),
(read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 10}),
(read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 100}),
(read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 1_000}),
(read_formatted, {"""type""": """numpy""", """length""": SMALL_TEST}),
(read_formatted_batch, {"""type""": """numpy""", """length""": SMALL_TEST, """batch_size""": 10}),
(read_formatted_batch, {"""type""": """numpy""", """length""": SMALL_TEST, """batch_size""": 1_000}),
]
with tempfile.TemporaryDirectory() as tmp_dir:
print("""generating dataset""" )
__lowerCamelCase : Tuple = datasets.Features(
{"""list""": datasets.Sequence(datasets.Value("""float32""" ) ), """numbers""": datasets.Value("""float32""" )} )
__lowerCamelCase : List[Any] = generate_example_dataset(
os.path.join(A__ , """dataset.arrow""" ) , A__ , num_examples=A__ , seq_shapes={"""list""": (100,)} , )
print("""first set of iterations""" )
for func, kwargs in functions:
print(func.__name__ , str(A__ ) )
__lowerCamelCase : Dict = func(A__ , **A__ )
print("""shuffling dataset""" )
__lowerCamelCase : Tuple = dataset.shuffle()
print("""Second set of iterations (after shuffling""" )
for func, kwargs in functions_shuffled:
print("""shuffled """ , func.__name__ , str(A__ ) )
__lowerCamelCase : str = func(
A__ , **A__ )
with open(A__ , """wb""" ) as f:
f.write(json.dumps(A__ ).encode("""utf-8""" ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_iterating()
| 519
|
'''simple docstring'''
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DPMSolverMultistepScheduler,
TextToVideoSDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, load_numpy, skip_mps, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class lowercase_ (lowerCamelCase__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = TextToVideoSDPipeline
SCREAMING_SNAKE_CASE : List[str] = TEXT_TO_IMAGE_PARAMS
SCREAMING_SNAKE_CASE : Dict = TEXT_TO_IMAGE_BATCH_PARAMS
# No `output_type`.
SCREAMING_SNAKE_CASE : Optional[int] = frozenset(
[
'num_inference_steps',
'generator',
'latents',
'return_dict',
'callback',
'callback_steps',
] )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
torch.manual_seed(0 )
__lowercase = UNetaDConditionModel(
block_out_channels=(3_2, 6_4, 6_4, 6_4) ,layers_per_block=2 ,sample_size=3_2 ,in_channels=4 ,out_channels=4 ,down_block_types=('''CrossAttnDownBlock3D''', '''CrossAttnDownBlock3D''', '''CrossAttnDownBlock3D''', '''DownBlock3D''') ,up_block_types=('''UpBlock3D''', '''CrossAttnUpBlock3D''', '''CrossAttnUpBlock3D''', '''CrossAttnUpBlock3D''') ,cross_attention_dim=3_2 ,attention_head_dim=4 ,)
__lowercase = DDIMScheduler(
beta_start=0.0_0_0_8_5 ,beta_end=0.0_1_2 ,beta_schedule='''scaled_linear''' ,clip_sample=lowercase__ ,set_alpha_to_one=lowercase__ ,)
torch.manual_seed(0 )
__lowercase = AutoencoderKL(
block_out_channels=[3_2, 6_4] ,in_channels=3 ,out_channels=3 ,down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] ,up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] ,latent_channels=4 ,sample_size=1_2_8 ,)
torch.manual_seed(0 )
__lowercase = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=3_2 ,intermediate_size=3_7 ,layer_norm_eps=1e-0_5 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1_0_0_0 ,hidden_act='''gelu''' ,projection_dim=5_1_2 ,)
__lowercase = CLIPTextModel(lowercase__ )
__lowercase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
__lowercase = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
}
return components
def SCREAMING_SNAKE_CASE ( self : List[str] ,lowercase__ : int ,lowercase__ : List[str]=0 ):
if str(lowercase__ ).startswith('''mps''' ):
__lowercase = torch.manual_seed(lowercase__ )
else:
__lowercase = torch.Generator(device=lowercase__ ).manual_seed(lowercase__ )
__lowercase = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''pt''',
}
return inputs
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
__lowercase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__lowercase = self.get_dummy_components()
__lowercase = TextToVideoSDPipeline(**lowercase__ )
__lowercase = sd_pipe.to(lowercase__ )
sd_pipe.set_progress_bar_config(disable=lowercase__ )
__lowercase = self.get_dummy_inputs(lowercase__ )
__lowercase = '''np'''
__lowercase = sd_pipe(**lowercase__ ).frames
__lowercase = frames[0][-3:, -3:, -1]
assert frames[0].shape == (6_4, 6_4, 3)
__lowercase = np.array([1_5_8.0, 1_6_0.0, 1_5_3.0, 1_2_5.0, 1_0_0.0, 1_2_1.0, 1_1_1.0, 9_3.0, 1_1_3.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=lowercase__ ,expected_max_diff=3e-3 )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() ,reason='''XFormers attention is only available with CUDA and `xformers` installed''' ,)
def SCREAMING_SNAKE_CASE ( self : Any ):
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=lowercase__ ,expected_max_diff=1e-2 )
@unittest.skip(reason='''Batching needs to be properly figured out first for this pipeline.''' )
def SCREAMING_SNAKE_CASE ( self : List[str] ):
pass
@unittest.skip(reason='''Batching needs to be properly figured out first for this pipeline.''' )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
pass
@unittest.skip(reason='''`num_images_per_prompt` argument is not supported for this pipeline.''' )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
pass
def SCREAMING_SNAKE_CASE ( self : List[str] ):
return super().test_progress_bar()
@slow
@skip_mps
class lowercase_ (unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self : int ):
__lowercase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video.npy''' )
__lowercase = TextToVideoSDPipeline.from_pretrained('''damo-vilab/text-to-video-ms-1.7b''' )
__lowercase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
__lowercase = pipe.to('''cuda''' )
__lowercase = '''Spiderman is surfing'''
__lowercase = torch.Generator(device='''cpu''' ).manual_seed(0 )
__lowercase = pipe(lowercase__ ,generator=lowercase__ ,num_inference_steps=2_5 ,output_type='''pt''' ).frames
__lowercase = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5e-2
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
__lowercase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy''' )
__lowercase = TextToVideoSDPipeline.from_pretrained('''damo-vilab/text-to-video-ms-1.7b''' )
__lowercase = pipe.to('''cuda''' )
__lowercase = '''Spiderman is surfing'''
__lowercase = torch.Generator(device='''cpu''' ).manual_seed(0 )
__lowercase = pipe(lowercase__ ,generator=lowercase__ ,num_inference_steps=2 ,output_type='''pt''' ).frames
__lowercase = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5e-2
| 41
| 0
|
'''simple docstring'''
import math
import os
from copy import deepcopy
import datasets
import evaluate
import torch
import transformers
from datasets import load_dataset
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from accelerate import Accelerator
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import is_tpu_available, set_seed
_lowerCAmelCase :Optional[int] = """true"""
def __lowerCAmelCase ( a_ , a_=82 , a_=16 ) -> Union[str, Any]:
'''simple docstring'''
set_seed(42 )
SCREAMING_SNAKE_CASE : List[Any] = RegressionModel()
SCREAMING_SNAKE_CASE : List[Any] = deepcopy(a_ )
SCREAMING_SNAKE_CASE : Optional[int] = RegressionDataset(length=a_ )
SCREAMING_SNAKE_CASE : Tuple = DataLoader(a_ , batch_size=a_ )
model.to(accelerator.device )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = accelerator.prepare(a_ , a_ )
return model, ddp_model, dataloader
def __lowerCAmelCase ( a_ , a_=False ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = AutoTokenizer.from_pretrained('hf-internal-testing/mrpc-bert-base-cased' )
SCREAMING_SNAKE_CASE : Any = load_dataset('glue' , 'mrpc' , split='validation' )
def tokenize_function(a_ ):
SCREAMING_SNAKE_CASE : Dict = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=a_ , max_length=a_ )
return outputs
with accelerator.main_process_first():
SCREAMING_SNAKE_CASE : int = dataset.map(
a_ , batched=a_ , remove_columns=['idx', 'sentence1', 'sentence2'] , )
SCREAMING_SNAKE_CASE : Optional[int] = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(a_ ):
if use_longest:
return tokenizer.pad(a_ , padding='longest' , return_tensors='pt' )
return tokenizer.pad(a_ , padding='max_length' , max_length=128 , return_tensors='pt' )
return DataLoader(a_ , shuffle=a_ , collate_fn=a_ , batch_size=16 )
def __lowerCAmelCase ( a_ , a_ ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = Accelerator(dispatch_batches=a_ , split_batches=a_ )
SCREAMING_SNAKE_CASE : Any = get_dataloader(a_ , not dispatch_batches )
SCREAMING_SNAKE_CASE : Any = AutoModelForSequenceClassification.from_pretrained(
'hf-internal-testing/mrpc-bert-base-cased' , return_dict=a_ )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = accelerator.prepare(a_ , a_ )
return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator
def __lowerCAmelCase ( a_ , a_ , a_ ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = []
for batch in dataloader:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : str = batch.values()
with torch.no_grad():
SCREAMING_SNAKE_CASE : int = model(a_ )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = accelerator.gather_for_metrics((logit, target) )
logits_and_targets.append((logit, target) )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = [], []
for logit, targ in logits_and_targets:
logits.append(a_ )
targs.append(a_ )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = torch.cat(a_ ), torch.cat(a_ )
return logits, targs
def __lowerCAmelCase ( a_ , a_=82 , a_=False , a_=False , a_=16 ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = get_basic_setup(a_ , a_ , a_ )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = generate_predictions(a_ , a_ , a_ )
assert (
len(a_ ) == num_samples
), f"""Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(a_ )}"""
def __lowerCAmelCase ( a_ = False , a_ = False ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = evaluate.load('glue' , 'mrpc' )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = get_mrpc_setup(a_ , a_ )
# First do baseline
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = setup['no']
model.to(a_ )
model.eval()
for batch in dataloader:
batch.to(a_ )
with torch.inference_mode():
SCREAMING_SNAKE_CASE : Optional[int] = model(**a_ )
SCREAMING_SNAKE_CASE : List[Any] = outputs.logits.argmax(dim=-1 )
metric.add_batch(predictions=a_ , references=batch['labels'] )
SCREAMING_SNAKE_CASE : str = metric.compute()
# Then do distributed
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = setup['ddp']
model.eval()
for batch in dataloader:
with torch.inference_mode():
SCREAMING_SNAKE_CASE : List[str] = model(**a_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = outputs.logits.argmax(dim=-1 )
SCREAMING_SNAKE_CASE : int = batch['labels']
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = accelerator.gather_for_metrics((preds, references) )
metric.add_batch(predictions=a_ , references=a_ )
SCREAMING_SNAKE_CASE : Any = metric.compute()
for key in "accuracy f1".split():
assert math.isclose(
baseline[key] , distributed[key] ), f"""Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n"""
def __lowerCAmelCase ( ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = Accelerator(split_batches=a_ , dispatch_batches=a_ )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_warning()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# These are a bit slower so they should only be ran on the GPU or TPU
if torch.cuda.is_available() or is_tpu_available():
if accelerator.is_local_main_process:
print('**Testing gather_for_metrics**' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
if accelerator.is_local_main_process:
print(f"""With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`""" )
test_mrpc(a_ , a_ )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('**Test torch metrics**' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
SCREAMING_SNAKE_CASE : Dict = Accelerator(split_batches=a_ , dispatch_batches=a_ )
if accelerator.is_local_main_process:
print(f"""With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99""" )
test_torch_metrics(a_ , 99 )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('**Test last batch is not dropped when perfectly divisible**' )
SCREAMING_SNAKE_CASE : Dict = Accelerator()
test_torch_metrics(a_ , 512 )
accelerator.state._reset_state()
def __lowerCAmelCase ( a_ ) -> Any:
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 179
|
'''simple docstring'''
import unittest
from transformers import MPNetConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
class UpperCAmelCase :
'''simple docstring'''
def __init__( self , lowercase__ , lowercase__=13 , lowercase__=7 , lowercase__=True , lowercase__=True , lowercase__=False , lowercase__=True , lowercase__=99 , lowercase__=64 , lowercase__=5 , lowercase__=4 , lowercase__=64 , lowercase__="gelu" , lowercase__=0.1 , lowercase__=0.1 , lowercase__=512 , lowercase__=16 , lowercase__=2 , lowercase__=0.0_2 , lowercase__=3 , lowercase__=4 , lowercase__=None , ) -> int:
SCREAMING_SNAKE_CASE : Optional[int] = parent
SCREAMING_SNAKE_CASE : int = batch_size
SCREAMING_SNAKE_CASE : Any = seq_length
SCREAMING_SNAKE_CASE : Any = is_training
SCREAMING_SNAKE_CASE : Any = use_input_mask
SCREAMING_SNAKE_CASE : Optional[Any] = use_token_type_ids
SCREAMING_SNAKE_CASE : Union[str, Any] = use_labels
SCREAMING_SNAKE_CASE : int = vocab_size
SCREAMING_SNAKE_CASE : List[Any] = hidden_size
SCREAMING_SNAKE_CASE : Optional[int] = num_hidden_layers
SCREAMING_SNAKE_CASE : Union[str, Any] = num_attention_heads
SCREAMING_SNAKE_CASE : Any = intermediate_size
SCREAMING_SNAKE_CASE : List[Any] = hidden_act
SCREAMING_SNAKE_CASE : List[Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Any = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Tuple = max_position_embeddings
SCREAMING_SNAKE_CASE : Tuple = type_vocab_size
SCREAMING_SNAKE_CASE : List[str] = type_sequence_label_size
SCREAMING_SNAKE_CASE : int = initializer_range
SCREAMING_SNAKE_CASE : Dict = num_labels
SCREAMING_SNAKE_CASE : Dict = num_choices
SCREAMING_SNAKE_CASE : Optional[Any] = scope
def _UpperCamelCase ( self ) -> Union[str, Any]:
return MPNetConfig.from_pretrained('microsoft/mpnet-base' )
def _UpperCamelCase ( self ) -> List[str]:
SCREAMING_SNAKE_CASE : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE : str = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE : Dict = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE : Union[str, Any] = None
SCREAMING_SNAKE_CASE : Optional[Any] = None
SCREAMING_SNAKE_CASE : Optional[Any] = None
if self.use_labels:
SCREAMING_SNAKE_CASE : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE : Tuple = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE : Tuple = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def _UpperCamelCase ( self ) -> Tuple:
return MPNetConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def _UpperCamelCase ( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ) -> List[str]:
SCREAMING_SNAKE_CASE : List[str] = MPNetModel(config=lowercase__ )
model.to(lowercase__ )
model.eval()
SCREAMING_SNAKE_CASE : List[str] = model(lowercase__ , lowercase__ )
SCREAMING_SNAKE_CASE : Optional[int] = model(lowercase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _UpperCamelCase ( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE : str = MPNetForQuestionAnswering(config=lowercase__ )
model.to(lowercase__ )
model.eval()
SCREAMING_SNAKE_CASE : Any = model(
lowercase__ , attention_mask=lowercase__ , start_positions=lowercase__ , end_positions=lowercase__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _UpperCamelCase ( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE : str = self.num_labels
SCREAMING_SNAKE_CASE : Tuple = MPNetForSequenceClassification(lowercase__ )
model.to(lowercase__ )
model.eval()
SCREAMING_SNAKE_CASE : List[str] = model(lowercase__ , attention_mask=lowercase__ , labels=lowercase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _UpperCamelCase ( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ) -> List[Any]:
SCREAMING_SNAKE_CASE : Union[str, Any] = self.num_choices
SCREAMING_SNAKE_CASE : Any = MPNetForMultipleChoice(config=lowercase__ )
model.to(lowercase__ )
model.eval()
SCREAMING_SNAKE_CASE : str = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE : List[str] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE : Union[str, Any] = model(
lowercase__ , attention_mask=lowercase__ , labels=lowercase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _UpperCamelCase ( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ) -> List[str]:
SCREAMING_SNAKE_CASE : str = self.num_labels
SCREAMING_SNAKE_CASE : Union[str, Any] = MPNetForTokenClassification(config=lowercase__ )
model.to(lowercase__ )
model.eval()
SCREAMING_SNAKE_CASE : Any = model(lowercase__ , attention_mask=lowercase__ , labels=lowercase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _UpperCamelCase ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE : Any = self.prepare_config_and_inputs()
((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) : str = config_and_inputs
SCREAMING_SNAKE_CASE : Dict = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class UpperCAmelCase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
snake_case__ : Optional[int] = (
(
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
if is_torch_available()
else ()
)
snake_case__ : Optional[int] = (
{
"feature-extraction": MPNetModel,
"fill-mask": MPNetForMaskedLM,
"question-answering": MPNetForQuestionAnswering,
"text-classification": MPNetForSequenceClassification,
"token-classification": MPNetForTokenClassification,
"zero-shot": MPNetForSequenceClassification,
}
if is_torch_available()
else {}
)
snake_case__ : List[str] = False
snake_case__ : int = True
def _UpperCamelCase ( self ) -> Dict:
SCREAMING_SNAKE_CASE : int = MPNetModelTester(self )
SCREAMING_SNAKE_CASE : int = ConfigTester(self , config_class=lowercase__ , hidden_size=37 )
def _UpperCamelCase ( self ) -> Any:
self.config_tester.run_common_tests()
def _UpperCamelCase ( self ) -> Tuple:
SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_model(*lowercase__ )
def _UpperCamelCase ( self ) -> Dict:
SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_sequence_classification(*lowercase__ )
def _UpperCamelCase ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_multiple_choice(*lowercase__ )
def _UpperCamelCase ( self ) -> Dict:
SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_token_classification(*lowercase__ )
def _UpperCamelCase ( self ) -> Optional[int]:
SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_question_answering(*lowercase__ )
@require_torch
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def _UpperCamelCase ( self ) -> List[str]:
SCREAMING_SNAKE_CASE : Tuple = MPNetModel.from_pretrained('microsoft/mpnet-base' )
SCREAMING_SNAKE_CASE : str = torch.tensor([[0, 345, 232, 328, 740, 140, 1_695, 69, 6_078, 1_588, 2]] )
SCREAMING_SNAKE_CASE : Optional[Any] = model(lowercase__ )[0]
SCREAMING_SNAKE_CASE : List[str] = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , lowercase__ )
SCREAMING_SNAKE_CASE : str = torch.tensor(
[[[-0.0_5_5_0, 0.1_9_4_3, -0.0_7_4_0], [-0.0_5_6_2, 0.2_2_1_1, -0.0_5_7_9], [-0.0_4_3_7, 0.3_3_3_7, -0.0_6_4_1]]] )
# compare the actual values for a slice.
self.assertTrue(torch.allclose(output[:, :3, :3] , lowercase__ , atol=1E-4 ) )
| 179
| 1
|
import json
import os
import torch
from diffusers import UNetaDModel
os.makedirs('hub/hopper-medium-v2/unet/hor32', exist_ok=True)
os.makedirs('hub/hopper-medium-v2/unet/hor128', exist_ok=True)
os.makedirs('hub/hopper-medium-v2/value_function', exist_ok=True)
def lowerCamelCase__ (_UpperCAmelCase):
if hor == 128:
SCREAMING_SNAKE_CASE = ('DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D')
SCREAMING_SNAKE_CASE = (32, 128, 256)
SCREAMING_SNAKE_CASE = ('UpResnetBlock1D', 'UpResnetBlock1D')
elif hor == 32:
SCREAMING_SNAKE_CASE = ('DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D')
SCREAMING_SNAKE_CASE = (32, 64, 128, 256)
SCREAMING_SNAKE_CASE = ('UpResnetBlock1D', 'UpResnetBlock1D', 'UpResnetBlock1D')
SCREAMING_SNAKE_CASE = torch.load(F'''/Users/bglickenhaus/Documents/diffuser/temporal_unet-hopper-mediumv2-hor{hor}.torch''')
SCREAMING_SNAKE_CASE = model.state_dict()
SCREAMING_SNAKE_CASE = {
'down_block_types': down_block_types,
'block_out_channels': block_out_channels,
'up_block_types': up_block_types,
'layers_per_block': 1,
'use_timestep_embedding': True,
'out_block_type': 'OutConv1DBlock',
'norm_num_groups': 8,
'downsample_each_block': False,
'in_channels': 14,
'out_channels': 14,
'extra_in_channels': 0,
'time_embedding_type': 'positional',
'flip_sin_to_cos': False,
'freq_shift': 1,
'sample_size': 6_5536,
'mid_block_type': 'MidResTemporalBlock1D',
'act_fn': 'mish',
}
SCREAMING_SNAKE_CASE = UNetaDModel(**_UpperCAmelCase)
print(F'''length of state dict: {len(state_dict.keys())}''')
print(F'''length of value function dict: {len(hf_value_function.state_dict().keys())}''')
SCREAMING_SNAKE_CASE = dict(zip(model.state_dict().keys() , hf_value_function.state_dict().keys()))
for k, v in mapping.items():
SCREAMING_SNAKE_CASE = state_dict.pop(_UpperCAmelCase)
hf_value_function.load_state_dict(_UpperCAmelCase)
torch.save(hf_value_function.state_dict() , F'''hub/hopper-medium-v2/unet/hor{hor}/diffusion_pytorch_model.bin''')
with open(F'''hub/hopper-medium-v2/unet/hor{hor}/config.json''' , 'w') as f:
json.dump(_UpperCAmelCase , _UpperCAmelCase)
def lowerCamelCase__ ():
SCREAMING_SNAKE_CASE = {
'in_channels': 14,
'down_block_types': ('DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D'),
'up_block_types': (),
'out_block_type': 'ValueFunction',
'mid_block_type': 'ValueFunctionMidBlock1D',
'block_out_channels': (32, 64, 128, 256),
'layers_per_block': 1,
'downsample_each_block': True,
'sample_size': 6_5536,
'out_channels': 14,
'extra_in_channels': 0,
'time_embedding_type': 'positional',
'use_timestep_embedding': True,
'flip_sin_to_cos': False,
'freq_shift': 1,
'norm_num_groups': 8,
'act_fn': 'mish',
}
SCREAMING_SNAKE_CASE = torch.load('/Users/bglickenhaus/Documents/diffuser/value_function-hopper-mediumv2-hor32.torch')
SCREAMING_SNAKE_CASE = model
SCREAMING_SNAKE_CASE = UNetaDModel(**_UpperCAmelCase)
print(F'''length of state dict: {len(state_dict.keys())}''')
print(F'''length of value function dict: {len(hf_value_function.state_dict().keys())}''')
SCREAMING_SNAKE_CASE = dict(zip(state_dict.keys() , hf_value_function.state_dict().keys()))
for k, v in mapping.items():
SCREAMING_SNAKE_CASE = state_dict.pop(_UpperCAmelCase)
hf_value_function.load_state_dict(_UpperCAmelCase)
torch.save(hf_value_function.state_dict() , 'hub/hopper-medium-v2/value_function/diffusion_pytorch_model.bin')
with open('hub/hopper-medium-v2/value_function/config.json' , 'w') as f:
json.dump(_UpperCAmelCase , _UpperCAmelCase)
if __name__ == "__main__":
unet(32)
# unet(128)
value_function()
| 73
|
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = {str(digit): digit**5 for digit in range(1_0)}
def lowercase__ ( __UpperCamelCase )-> int:
return sum(DIGITS_FIFTH_POWER[digit] for digit in str(__UpperCamelCase ) )
def lowercase__ ( )-> int:
return sum(
number
for number in range(1000 , 1000000 )
if number == digits_fifth_powers_sum(__UpperCamelCase ) )
if __name__ == "__main__":
print(solution())
| 301
| 0
|
def snake_case_ ( lowercase__ : list , lowercase__ : int = 0 ) -> List[Any]:
'''simple docstring'''
_lowerCAmelCase =length or len(_A )
_lowerCAmelCase =False
for i in range(length - 1 ):
if list_data[i] > list_data[i + 1]:
_lowerCAmelCase =list_data[i + 1], list_data[i]
_lowerCAmelCase =True
return list_data if not swapped else bubble_sort(_A , length - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 705
|
import argparse
import re
import torch
from CLAP import create_model
from transformers import AutoFeatureExtractor, ClapConfig, ClapModel
__SCREAMING_SNAKE_CASE : List[str] = {
'''text_branch''': '''text_model''',
'''audio_branch''': '''audio_model.audio_encoder''',
'''attn''': '''attention.self''',
'''self.proj''': '''output.dense''',
'''attention.self_mask''': '''attn_mask''',
'''mlp.fc1''': '''intermediate.dense''',
'''mlp.fc2''': '''output.dense''',
'''norm1''': '''layernorm_before''',
'''norm2''': '''layernorm_after''',
'''bn0''': '''batch_norm''',
}
__SCREAMING_SNAKE_CASE : Dict = AutoFeatureExtractor.from_pretrained('''laion/clap-htsat-unfused''', truncation='''rand_trunc''')
def snake_case_ ( lowercase__ : Any , lowercase__ : Union[str, Any]=False ):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase =create_model(
"""HTSAT-tiny""" , """roberta""" , lowercase__ , precision="""fp32""" , device="""cuda:0""" if torch.cuda.is_available() else """cpu""" , enable_fusion=lowercase__ , fusion_type="""aff_2d""" if enable_fusion else None , )
return model, model_cfg
def snake_case_ ( lowercase__ : str ):
'''simple docstring'''
_lowerCAmelCase ={}
_lowerCAmelCase =r""".*sequential.(\d+).*"""
_lowerCAmelCase =r""".*_projection.(\d+).*"""
for key, value in state_dict.items():
# check if any key needs to be modified
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
_lowerCAmelCase =key.replace(lowercase__ , lowercase__ )
if re.match(lowercase__ , lowercase__ ):
# replace sequential layers with list
_lowerCAmelCase =re.match(lowercase__ , lowercase__ ).group(1 )
_lowerCAmelCase =key.replace(f"sequential.{sequential_layer}." , f"layers.{int(lowercase__ )//3}.linear." )
elif re.match(lowercase__ , lowercase__ ):
_lowerCAmelCase =int(re.match(lowercase__ , lowercase__ ).group(1 ) )
# Because in CLAP they use `nn.Sequential`...
_lowerCAmelCase =1 if projecton_layer == 0 else 2
_lowerCAmelCase =key.replace(f"_projection.{projecton_layer}." , f"_projection.linear{transformers_projection_layer}." )
if "audio" and "qkv" in key:
# split qkv into query key and value
_lowerCAmelCase =value
_lowerCAmelCase =mixed_qkv.size(0 ) // 3
_lowerCAmelCase =mixed_qkv[:qkv_dim]
_lowerCAmelCase =mixed_qkv[qkv_dim : qkv_dim * 2]
_lowerCAmelCase =mixed_qkv[qkv_dim * 2 :]
_lowerCAmelCase =query_layer
_lowerCAmelCase =key_layer
_lowerCAmelCase =value_layer
else:
_lowerCAmelCase =value
return model_state_dict
def snake_case_ ( lowercase__ : int , lowercase__ : List[Any] , lowercase__ : Optional[int] , lowercase__ : Optional[Any]=False ):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase =init_clap(lowercase__ , enable_fusion=lowercase__ )
clap_model.eval()
_lowerCAmelCase =clap_model.state_dict()
_lowerCAmelCase =rename_state_dict(lowercase__ )
_lowerCAmelCase =ClapConfig()
_lowerCAmelCase =enable_fusion
_lowerCAmelCase =ClapModel(lowercase__ )
# ignore the spectrogram embedding layer
model.load_state_dict(lowercase__ , strict=lowercase__ )
model.save_pretrained(lowercase__ )
transformers_config.save_pretrained(lowercase__ )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : Any = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument('''--enable_fusion''', action='''store_true''', help='''Whether to enable fusion or not''')
__SCREAMING_SNAKE_CASE : int = parser.parse_args()
convert_clap_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.enable_fusion)
| 149
| 0
|
'''simple docstring'''
# DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax
import jax.numpy as jnp
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils_flax import (
CommonSchedulerState,
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
add_noise_common,
get_velocity_common,
)
@flax.struct.dataclass
class SCREAMING_SNAKE_CASE:
"""simple docstring"""
lowerCamelCase__ = 42
# setable values
lowerCamelCase__ = 42
lowerCamelCase__ = 42
lowerCamelCase__ = None
@classmethod
def A ( cls : List[str] , __snake_case : CommonSchedulerState , __snake_case : jnp.ndarray , __snake_case : jnp.ndarray ) -> List[str]:
return cls(common=__snake_case , init_noise_sigma=__snake_case , timesteps=__snake_case )
@dataclass
class SCREAMING_SNAKE_CASE( A__ ):
"""simple docstring"""
lowerCamelCase__ = 42
class SCREAMING_SNAKE_CASE( A__ , A__ ):
"""simple docstring"""
lowerCamelCase__ = [e.name for e in FlaxKarrasDiffusionSchedulers]
lowerCamelCase__ = 42
@property
def A ( self : List[Any] ) -> Tuple:
return True
@register_to_config
def __init__( self : Dict , __snake_case : int = 1000 , __snake_case : float = 0.00_01 , __snake_case : float = 0.02 , __snake_case : str = "linear" , __snake_case : Optional[jnp.ndarray] = None , __snake_case : str = "fixed_small" , __snake_case : bool = True , __snake_case : str = "epsilon" , __snake_case : jnp.dtype = jnp.floataa , ) -> Optional[Any]:
UpperCAmelCase : Any = dtype
def A ( self : Optional[Any] , __snake_case : Optional[CommonSchedulerState] = None ) -> DDPMSchedulerState:
if common is None:
UpperCAmelCase : Optional[Any] = CommonSchedulerState.create(self )
# standard deviation of the initial noise distribution
UpperCAmelCase : int = jnp.array(1.0 , dtype=self.dtype )
UpperCAmelCase : Union[str, Any] = jnp.arange(0 , self.config.num_train_timesteps ).round()[::-1]
return DDPMSchedulerState.create(
common=__snake_case , init_noise_sigma=__snake_case , timesteps=__snake_case , )
def A ( self : Tuple , __snake_case : DDPMSchedulerState , __snake_case : jnp.ndarray , __snake_case : Optional[int] = None ) -> jnp.ndarray:
return sample
def A ( self : int , __snake_case : DDPMSchedulerState , __snake_case : int , __snake_case : Tuple = () ) -> DDPMSchedulerState:
UpperCAmelCase : Tuple = self.config.num_train_timesteps // num_inference_steps
# creates integer timesteps by multiplying by ratio
# rounding to avoid issues when num_inference_step is power of 3
UpperCAmelCase : List[str] = (jnp.arange(0 , __snake_case ) * step_ratio).round()[::-1]
return state.replace(
num_inference_steps=__snake_case , timesteps=__snake_case , )
def A ( self : Optional[int] , __snake_case : DDPMSchedulerState , __snake_case : int , __snake_case : Union[str, Any]=None , __snake_case : List[str]=None ) -> List[Any]:
UpperCAmelCase : List[str] = state.common.alphas_cumprod[t]
UpperCAmelCase : List[str] = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
UpperCAmelCase : List[str] = (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * state.common.betas[t]
if variance_type is None:
UpperCAmelCase : Optional[int] = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small":
UpperCAmelCase : Optional[int] = jnp.clip(__snake_case , a_min=1E-20 )
# for rl-diffuser https://arxiv.org/abs/2205.09991
elif variance_type == "fixed_small_log":
UpperCAmelCase : Dict = jnp.log(jnp.clip(__snake_case , a_min=1E-20 ) )
elif variance_type == "fixed_large":
UpperCAmelCase : int = state.common.betas[t]
elif variance_type == "fixed_large_log":
# Glide max_log
UpperCAmelCase : int = jnp.log(state.common.betas[t] )
elif variance_type == "learned":
return predicted_variance
elif variance_type == "learned_range":
UpperCAmelCase : Dict = variance
UpperCAmelCase : int = state.common.betas[t]
UpperCAmelCase : Optional[Any] = (predicted_variance + 1) / 2
UpperCAmelCase : Optional[int] = frac * max_log + (1 - frac) * min_log
return variance
def A ( self : str , __snake_case : DDPMSchedulerState , __snake_case : jnp.ndarray , __snake_case : int , __snake_case : jnp.ndarray , __snake_case : Optional[jax.random.KeyArray] = None , __snake_case : bool = True , ) -> Union[FlaxDDPMSchedulerOutput, Tuple]:
UpperCAmelCase : Optional[int] = timestep
if key is None:
UpperCAmelCase : Dict = jax.random.PRNGKey(0 )
if model_output.shape[1] == sample.shape[1] * 2 and self.config.variance_type in ["learned", "learned_range"]:
UpperCAmelCase , UpperCAmelCase : List[Any] = jnp.split(__snake_case , sample.shape[1] , axis=1 )
else:
UpperCAmelCase : Optional[Any] = None
# 1. compute alphas, betas
UpperCAmelCase : List[Any] = state.common.alphas_cumprod[t]
UpperCAmelCase : Optional[Any] = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
UpperCAmelCase : Union[str, Any] = 1 - alpha_prod_t
UpperCAmelCase : List[str] = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
UpperCAmelCase : Union[str, Any] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
UpperCAmelCase : List[str] = model_output
elif self.config.prediction_type == "v_prediction":
UpperCAmelCase : Dict = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
else:
raise ValueError(
F"""prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` """
''' for the FlaxDDPMScheduler.''' )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
UpperCAmelCase : Optional[Any] = jnp.clip(__snake_case , -1 , 1 )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
UpperCAmelCase : Optional[Any] = (alpha_prod_t_prev ** 0.5 * state.common.betas[t]) / beta_prod_t
UpperCAmelCase : Tuple = state.common.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
UpperCAmelCase : int = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
def random_variance():
UpperCAmelCase : Tuple = jax.random.split(__snake_case , num=1 )
UpperCAmelCase : str = jax.random.normal(__snake_case , shape=model_output.shape , dtype=self.dtype )
return (self._get_variance(__snake_case , __snake_case , predicted_variance=__snake_case ) ** 0.5) * noise
UpperCAmelCase : Dict = jnp.where(t > 0 , random_variance() , jnp.zeros(model_output.shape , dtype=self.dtype ) )
UpperCAmelCase : Any = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample, state)
return FlaxDDPMSchedulerOutput(prev_sample=__snake_case , state=__snake_case )
def A ( self : List[str] , __snake_case : DDPMSchedulerState , __snake_case : jnp.ndarray , __snake_case : jnp.ndarray , __snake_case : jnp.ndarray , ) -> jnp.ndarray:
return add_noise_common(state.common , __snake_case , __snake_case , __snake_case )
def A ( self : str , __snake_case : DDPMSchedulerState , __snake_case : jnp.ndarray , __snake_case : jnp.ndarray , __snake_case : jnp.ndarray , ) -> jnp.ndarray:
return get_velocity_common(state.common , __snake_case , __snake_case , __snake_case )
def __len__( self : str ) -> List[str]:
return self.config.num_train_timesteps
| 127
|
'''simple docstring'''
import argparse
import hashlib # hashlib is only used inside the Test class
import struct
class SCREAMING_SNAKE_CASE:
"""simple docstring"""
def __init__( self : List[str] , __snake_case : Any ) -> List[Any]:
UpperCAmelCase : Any = data
UpperCAmelCase : Optional[Any] = [0X6745_2301, 0Xefcd_ab89, 0X98ba_dcfe, 0X1032_5476, 0Xc3d2_e1f0]
@staticmethod
def A ( __snake_case : List[str] , __snake_case : Any ) -> int:
return ((n << b) | (n >> (32 - b))) & 0Xffff_ffff
def A ( self : Optional[int] ) -> Tuple:
UpperCAmelCase : Dict = b'''\x80''' + b'''\x00''' * (63 - (len(self.data ) + 8) % 64)
UpperCAmelCase : int = self.data + padding + struct.pack('''>Q''' , 8 * len(self.data ) )
return padded_data
def A ( self : str ) -> List[Any]:
return [
self.padded_data[i : i + 64] for i in range(0 , len(self.padded_data ) , 64 )
]
def A ( self : List[str] , __snake_case : int ) -> Optional[Any]:
UpperCAmelCase : List[Any] = list(struct.unpack('''>16L''' , __snake_case ) ) + [0] * 64
for i in range(16 , 80 ):
UpperCAmelCase : Any = self.rotate((w[i - 3] ^ w[i - 8] ^ w[i - 14] ^ w[i - 16]) , 1 )
return w
def A ( self : Optional[Any] ) -> Dict:
UpperCAmelCase : Tuple = self.padding()
UpperCAmelCase : Optional[Any] = self.split_blocks()
for block in self.blocks:
UpperCAmelCase : str = self.expand_block(__snake_case )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Optional[int] = self.h
for i in range(0 , 80 ):
if 0 <= i < 20:
UpperCAmelCase : Optional[int] = (b & c) | ((~b) & d)
UpperCAmelCase : List[str] = 0X5a82_7999
elif 20 <= i < 40:
UpperCAmelCase : List[str] = b ^ c ^ d
UpperCAmelCase : List[Any] = 0X6ed9_eba1
elif 40 <= i < 60:
UpperCAmelCase : Union[str, Any] = (b & c) | (b & d) | (c & d)
UpperCAmelCase : Dict = 0X8f1b_bcdc
elif 60 <= i < 80:
UpperCAmelCase : Optional[int] = b ^ c ^ d
UpperCAmelCase : Optional[int] = 0Xca62_c1d6
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Union[str, Any] = (
self.rotate(__snake_case , 5 ) + f + e + k + expanded_block[i] & 0Xffff_ffff,
a,
self.rotate(__snake_case , 30 ),
c,
d,
)
UpperCAmelCase : Tuple = (
self.h[0] + a & 0Xffff_ffff,
self.h[1] + b & 0Xffff_ffff,
self.h[2] + c & 0Xffff_ffff,
self.h[3] + d & 0Xffff_ffff,
self.h[4] + e & 0Xffff_ffff,
)
return ("{:08x}" * 5).format(*self.h )
def snake_case_ ( ) -> List[str]:
UpperCAmelCase : int = b'''Test String'''
assert SHAaHash(_lowerCAmelCase ).final_hash() == hashlib.shaa(_lowerCAmelCase ).hexdigest() # noqa: S324
def snake_case_ ( ) -> List[str]:
UpperCAmelCase : Tuple = argparse.ArgumentParser(description='''Process some strings or files''' )
parser.add_argument(
'''--string''' , dest='''input_string''' , default='''Hello World!! Welcome to Cryptography''' , help='''Hash the string''' , )
parser.add_argument('''--file''' , dest='''input_file''' , help='''Hash contents of a file''' )
UpperCAmelCase : List[str] = parser.parse_args()
UpperCAmelCase : Dict = args.input_string
# In any case hash input should be a bytestring
if args.input_file:
with open(args.input_file , '''rb''' ) as f:
UpperCAmelCase : Dict = f.read()
else:
UpperCAmelCase : Optional[Any] = bytes(_lowerCAmelCase , '''utf-8''' )
print(SHAaHash(_lowerCAmelCase ).final_hash() )
if __name__ == "__main__":
main()
import doctest
doctest.testmod()
| 127
| 1
|
'''simple docstring'''
import torch
from diffusers import DiffusionPipeline
class a__( lowerCamelCase__ ):
def __init__( self : List[str] , __snake_case : List[Any] , __snake_case : int ):
super().__init__()
self.register_modules(unet=__snake_case , scheduler=__snake_case )
def __call__( self : List[Any] ):
a : Optional[int] = torch.randn(
(1, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , )
a : List[str] = 1
a : Any = self.unet(__snake_case , __snake_case ).sample
a : Any = self.scheduler.step(__snake_case , __snake_case , __snake_case ).prev_sample
a : Optional[int] = scheduler_output - scheduler_output + torch.ones_like(__snake_case )
return result
| 195
|
'''simple docstring'''
class a__:
def __init__( self : Dict , __snake_case : Optional[int] , __snake_case : Any , __snake_case : Tuple ):
a : List[str] = name
a : Dict = value
a : List[str] = weight
def __repr__( self : int ):
return F"""{self.__class__.__name__}({self.name}, {self.value}, {self.weight})"""
def lowercase_ ( self : Optional[int] ):
return self.value
def lowercase_ ( self : List[str] ):
return self.name
def lowercase_ ( self : int ):
return self.weight
def lowercase_ ( self : List[str] ):
return self.value / self.weight
def lowerCamelCase__ ( _A , _A , _A ):
a : Optional[int] = []
for i in range(len(_A ) ):
menu.append(Things(name[i] , value[i] , weight[i] ) )
return menu
def lowerCamelCase__ ( _A , _A , _A ):
a : Optional[Any] = sorted(_A , key=_A , reverse=_A )
a : Optional[int] = []
a , a : str = 0.0, 0.0
for i in range(len(_A ) ):
if (total_cost + items_copy[i].get_weight()) <= max_cost:
result.append(items_copy[i] )
total_cost += items_copy[i].get_weight()
total_value += items_copy[i].get_value()
return (result, total_value)
def lowerCamelCase__ ( ):
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 195
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case :str =logging.get_logger(__name__)
__snake_case :int ={
"""tiiuae/falcon-40b""": """https://huggingface.co/tiiuae/falcon-40b/resolve/main/config.json""",
"""tiiuae/falcon-7b""": """https://huggingface.co/tiiuae/falcon-7b/resolve/main/config.json""",
}
class lowerCAmelCase__ ( _lowerCamelCase ):
A_ : Optional[Any] = "falcon"
A_ : List[Any] = ["past_key_values"]
def __init__( self : List[Any] , __UpperCamelCase : List[str]=65_024 , __UpperCamelCase : int=4_544 , __UpperCamelCase : Optional[Any]=32 , __UpperCamelCase : int=71 , __UpperCamelCase : Optional[int]=1e-5 , __UpperCamelCase : Any=0.0_2 , __UpperCamelCase : str=True , __UpperCamelCase : List[Any]=0.0 , __UpperCamelCase : str=0.0 , __UpperCamelCase : Any=None , __UpperCamelCase : int=False , __UpperCamelCase : List[str]=False , __UpperCamelCase : Tuple=True , __UpperCamelCase : Optional[int]=True , __UpperCamelCase : int=False , __UpperCamelCase : Union[str, Any]=11 , __UpperCamelCase : str=11 , **__UpperCamelCase : Any , ) -> Tuple:
A = vocab_size
# Backward compatibility with n_embed kwarg
A = kwargs.pop('n_embed' , UpperCamelCase__ )
A = hidden_size if n_embed is None else n_embed
A = num_hidden_layers
A = num_attention_heads
A = layer_norm_epsilon
A = initializer_range
A = use_cache
A = hidden_dropout
A = attention_dropout
A = bos_token_id
A = eos_token_id
A = num_attention_heads if num_kv_heads is None else num_kv_heads
A = alibi
A = new_decoder_architecture
A = multi_query # Ignored when new_decoder_architecture is True
A = parallel_attn
A = bias
super().__init__(bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , **UpperCamelCase__ )
@property
def __UpperCamelCase ( self : Optional[Any] ) -> Dict:
return self.hidden_size // self.num_attention_heads
@property
def __UpperCamelCase ( self : Optional[Any] ) -> str:
return not self.alibi
| 106
|
"""simple docstring"""
import os
def lowerCamelCase_ ( ):
lowerCamelCase_ = os.path.dirname(os.path.realpath(_lowerCamelCase ) )
lowerCamelCase_ = os.path.join(_lowerCamelCase , '''triangle.txt''' )
with open(_lowerCamelCase ) as f:
lowerCamelCase_ = f.readlines()
lowerCamelCase_ = []
for line in triangle:
lowerCamelCase_ = []
for number in line.strip().split(''' ''' ):
numbers_from_line.append(int(_lowerCamelCase ) )
a.append(_lowerCamelCase )
for i in range(1 , len(_lowerCamelCase ) ):
for j in range(len(a[i] ) ):
lowerCamelCase_ = a[i - 1][j] if j != len(a[i - 1] ) else 0
lowerCamelCase_ = a[i - 1][j - 1] if j > 0 else 0
a[i][j] += max(_lowerCamelCase , _lowerCamelCase )
return max(a[-1] )
if __name__ == "__main__":
print(solution())
| 142
| 0
|
from __future__ import annotations
from math import gcd
def __SCREAMING_SNAKE_CASE ( UpperCamelCase : int , UpperCamelCase : int = 2 , UpperCamelCase : int = 1 , UpperCamelCase : int = 3 , ) -> int | None:
"""simple docstring"""
if num < 2:
raise ValueError("""The input value cannot be less than 2""" )
# Because of the relationship between ``f(f(x))`` and ``f(x)``, this
# algorithm struggles to find factors that are divisible by two.
# As a workaround, we specifically check for two and even inputs.
# See: https://math.stackexchange.com/a/2856214/165820
if num > 2 and num % 2 == 0:
return 2
# Pollard's Rho algorithm requires a function that returns pseudorandom
# values between 0 <= X < ``num``. It doesn't need to be random in the
# sense that the output value is cryptographically secure or difficult
# to calculate, it only needs to be random in the sense that all output
# values should be equally likely to appear.
# For this reason, Pollard suggested using ``f(x) = (x**2 - 1) % num``
# However, the success of Pollard's algorithm isn't guaranteed and is
# determined in part by the initial seed and the chosen random function.
# To make retries easier, we will instead use ``f(x) = (x**2 + C) % num``
# where ``C`` is a value that we can modify between each attempt.
def rand_fn(UpperCamelCase : int , UpperCamelCase : int , UpperCamelCase : int ) -> int:
return (pow(UpperCamelCase , 2 ) + step) % modulus
for _ in range(UpperCamelCase ):
# These track the position within the cycle detection logic.
a_ = seed
a_ = seed
while True:
# At each iteration, the tortoise moves one step and the hare moves two.
a_ = rand_fn(UpperCamelCase , UpperCamelCase , UpperCamelCase )
a_ = rand_fn(UpperCamelCase , UpperCamelCase , UpperCamelCase )
a_ = rand_fn(UpperCamelCase , UpperCamelCase , UpperCamelCase )
# At some point both the tortoise and the hare will enter a cycle whose
# length ``p`` is a divisor of ``num``. Once in that cycle, at some point
# the tortoise and hare will end up on the same value modulo ``p``.
# We can detect when this happens because the position difference between
# the tortoise and the hare will share a common divisor with ``num``.
a_ = gcd(hare - tortoise , UpperCamelCase )
if divisor == 1:
# No common divisor yet, just keep searching.
continue
else:
# We found a common divisor!
if divisor == num:
# Unfortunately, the divisor is ``num`` itself and is useless.
break
else:
# The divisor is a nontrivial factor of ``num``!
return divisor
# If we made it here, then this attempt failed.
# We need to pick a new starting seed for the tortoise and hare
# in addition to a new step value for the random function.
# To keep this example implementation deterministic, the
# new values will be generated based on currently available
# values instead of using something like ``random.randint``.
# We can use the hare's position as the new seed.
# This is actually what Richard Brent's the "optimized" variant does.
a_ = hare
# The new step value for the random function can just be incremented.
# At first the results will be similar to what the old function would
# have produced, but the value will quickly diverge after a bit.
step += 1
# We haven't found a divisor within the requested number of attempts.
# We were unlucky or ``num`` itself is actually prime.
return None
if __name__ == "__main__":
import argparse
_A = argparse.ArgumentParser()
parser.add_argument(
'num',
type=int,
help='The value to find a divisor of',
)
parser.add_argument(
'--attempts',
type=int,
default=3,
help='The number of attempts before giving up',
)
_A = parser.parse_args()
_A = pollard_rho(args.num, attempts=args.attempts)
if divisor is None:
print(f'{args.num} is probably prime')
else:
_A = args.num // divisor
print(f'{args.num} = {divisor} * {quotient}')
| 705
|
import argparse
import json
import logging
import os
import shutil
import sys
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.utils import write_basic_config
from transformers.testing_utils import TestCasePlus, get_gpu_count, run_command, slow, torch_device
from transformers.utils import is_apex_available
logging.basicConfig(level=logging.DEBUG)
_A = logging.getLogger()
def __SCREAMING_SNAKE_CASE ( ) -> Optional[int]:
"""simple docstring"""
a_ = argparse.ArgumentParser()
parser.add_argument("""-f""" )
a_ = parser.parse_args()
return args.f
def __SCREAMING_SNAKE_CASE ( UpperCamelCase : int ) -> Optional[Any]:
"""simple docstring"""
a_ = {}
a_ = os.path.join(UpperCamelCase , """all_results.json""" )
if os.path.exists(UpperCamelCase ):
with open(UpperCamelCase , """r""" ) as f:
a_ = json.load(UpperCamelCase )
else:
raise ValueError(F"""can't find {path}""" )
return results
def __SCREAMING_SNAKE_CASE ( ) -> Any:
"""simple docstring"""
a_ = torch.cuda.is_available() and torch_device == """cuda"""
return is_using_cuda and is_apex_available()
_A = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
@classmethod
def __magic_name__ ( cls ):
# Write Accelerate config, will pick up on CPU, GPU, and multi-GPU
a_ = tempfile.mkdtemp()
a_ = os.path.join(cls.tmpdir , """default_config.yml""" )
write_basic_config(save_location=cls.configPath )
a_ = ["""accelerate""", """launch""", """--config_file""", cls.configPath]
@classmethod
def __magic_name__ ( cls ):
shutil.rmtree(cls.tmpdir )
@mock.patch.dict(os.environ , {"""WANDB_MODE""": """offline"""} )
def __magic_name__ ( self ):
a_ = self.get_auto_remove_tmp_dir()
a_ = f"""
{self.examples_dir}/pytorch/text-classification/run_glue_no_trainer.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--seed=42
--checkpointing_steps epoch
--with_tracking
""".split()
if is_cuda_and_apex_available():
testargs.append("""--fp16""" )
run_command(self._launch_args + testargs )
a_ = get_results(_SCREAMING_SNAKE_CASE )
self.assertGreaterEqual(result["""eval_accuracy"""] , 0.7_5 )
self.assertTrue(os.path.exists(os.path.join(_SCREAMING_SNAKE_CASE , """epoch_0""" ) ) )
self.assertTrue(os.path.exists(os.path.join(_SCREAMING_SNAKE_CASE , """glue_no_trainer""" ) ) )
@mock.patch.dict(os.environ , {"""WANDB_MODE""": """offline"""} )
def __magic_name__ ( self ):
a_ = self.get_auto_remove_tmp_dir()
a_ = f"""
{self.examples_dir}/pytorch/language-modeling/run_clm_no_trainer.py
--model_name_or_path distilgpt2
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--block_size 128
--per_device_train_batch_size 5
--per_device_eval_batch_size 5
--num_train_epochs 2
--output_dir {tmp_dir}
--checkpointing_steps epoch
--with_tracking
""".split()
if torch.cuda.device_count() > 1:
# Skipping because there are not enough batches to train the model + would need a drop_last to work.
return
run_command(self._launch_args + testargs )
a_ = get_results(_SCREAMING_SNAKE_CASE )
self.assertLess(result["""perplexity"""] , 100 )
self.assertTrue(os.path.exists(os.path.join(_SCREAMING_SNAKE_CASE , """epoch_0""" ) ) )
self.assertTrue(os.path.exists(os.path.join(_SCREAMING_SNAKE_CASE , """clm_no_trainer""" ) ) )
@mock.patch.dict(os.environ , {"""WANDB_MODE""": """offline"""} )
def __magic_name__ ( self ):
a_ = self.get_auto_remove_tmp_dir()
a_ = f"""
{self.examples_dir}/pytorch/language-modeling/run_mlm_no_trainer.py
--model_name_or_path distilroberta-base
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--output_dir {tmp_dir}
--num_train_epochs=1
--checkpointing_steps epoch
--with_tracking
""".split()
run_command(self._launch_args + testargs )
a_ = get_results(_SCREAMING_SNAKE_CASE )
self.assertLess(result["""perplexity"""] , 42 )
self.assertTrue(os.path.exists(os.path.join(_SCREAMING_SNAKE_CASE , """epoch_0""" ) ) )
self.assertTrue(os.path.exists(os.path.join(_SCREAMING_SNAKE_CASE , """mlm_no_trainer""" ) ) )
@mock.patch.dict(os.environ , {"""WANDB_MODE""": """offline"""} )
def __magic_name__ ( self ):
# with so little data distributed training needs more epochs to get the score on par with 0/1 gpu
a_ = 7 if get_gpu_count() > 1 else 2
a_ = self.get_auto_remove_tmp_dir()
a_ = f"""
{self.examples_dir}/pytorch/token-classification/run_ner_no_trainer.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/conll/sample.json
--validation_file tests/fixtures/tests_samples/conll/sample.json
--output_dir {tmp_dir}
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=2
--num_train_epochs={epochs}
--seed 7
--checkpointing_steps epoch
--with_tracking
""".split()
run_command(self._launch_args + testargs )
a_ = get_results(_SCREAMING_SNAKE_CASE )
self.assertGreaterEqual(result["""eval_accuracy"""] , 0.7_5 )
self.assertLess(result["""train_loss"""] , 0.5 )
self.assertTrue(os.path.exists(os.path.join(_SCREAMING_SNAKE_CASE , """epoch_0""" ) ) )
self.assertTrue(os.path.exists(os.path.join(_SCREAMING_SNAKE_CASE , """ner_no_trainer""" ) ) )
@unittest.skip(reason="""Fix me @muellerzr""" )
@mock.patch.dict(os.environ , {"""WANDB_MODE""": """offline"""} )
def __magic_name__ ( self ):
a_ = self.get_auto_remove_tmp_dir()
a_ = f"""
{self.examples_dir}/pytorch/question-answering/run_qa_no_trainer.py
--model_name_or_path bert-base-uncased
--version_2_with_negative
--train_file tests/fixtures/tests_samples/SQUAD/sample.json
--validation_file tests/fixtures/tests_samples/SQUAD/sample.json
--output_dir {tmp_dir}
--seed=42
--max_train_steps=10
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
--with_tracking
""".split()
run_command(self._launch_args + testargs )
a_ = get_results(_SCREAMING_SNAKE_CASE )
# Because we use --version_2_with_negative the testing script uses SQuAD v2 metrics.
self.assertGreaterEqual(result["""eval_f1"""] , 28 )
self.assertGreaterEqual(result["""eval_exact"""] , 28 )
self.assertTrue(os.path.exists(os.path.join(_SCREAMING_SNAKE_CASE , """epoch_0""" ) ) )
self.assertTrue(os.path.exists(os.path.join(_SCREAMING_SNAKE_CASE , """qa_no_trainer""" ) ) )
@mock.patch.dict(os.environ , {"""WANDB_MODE""": """offline"""} )
def __magic_name__ ( self ):
a_ = self.get_auto_remove_tmp_dir()
a_ = f"""
{self.examples_dir}/pytorch/multiple-choice/run_swag_no_trainer.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/swag/sample.json
--validation_file tests/fixtures/tests_samples/swag/sample.json
--output_dir {tmp_dir}
--max_train_steps=20
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--with_tracking
""".split()
run_command(self._launch_args + testargs )
a_ = get_results(_SCREAMING_SNAKE_CASE )
self.assertGreaterEqual(result["""eval_accuracy"""] , 0.8 )
self.assertTrue(os.path.exists(os.path.join(_SCREAMING_SNAKE_CASE , """swag_no_trainer""" ) ) )
@slow
@mock.patch.dict(os.environ , {"""WANDB_MODE""": """offline"""} )
def __magic_name__ ( self ):
a_ = self.get_auto_remove_tmp_dir()
a_ = f"""
{self.examples_dir}/pytorch/summarization/run_summarization_no_trainer.py
--model_name_or_path t5-small
--train_file tests/fixtures/tests_samples/xsum/sample.json
--validation_file tests/fixtures/tests_samples/xsum/sample.json
--output_dir {tmp_dir}
--max_train_steps=50
--num_warmup_steps=8
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
--with_tracking
""".split()
run_command(self._launch_args + testargs )
a_ = get_results(_SCREAMING_SNAKE_CASE )
self.assertGreaterEqual(result["""eval_rouge1"""] , 10 )
self.assertGreaterEqual(result["""eval_rouge2"""] , 2 )
self.assertGreaterEqual(result["""eval_rougeL"""] , 7 )
self.assertGreaterEqual(result["""eval_rougeLsum"""] , 7 )
self.assertTrue(os.path.exists(os.path.join(_SCREAMING_SNAKE_CASE , """epoch_0""" ) ) )
self.assertTrue(os.path.exists(os.path.join(_SCREAMING_SNAKE_CASE , """summarization_no_trainer""" ) ) )
@slow
@mock.patch.dict(os.environ , {"""WANDB_MODE""": """offline"""} )
def __magic_name__ ( self ):
a_ = self.get_auto_remove_tmp_dir()
a_ = f"""
{self.examples_dir}/pytorch/translation/run_translation_no_trainer.py
--model_name_or_path sshleifer/student_marian_en_ro_6_1
--source_lang en
--target_lang ro
--train_file tests/fixtures/tests_samples/wmt16/sample.json
--validation_file tests/fixtures/tests_samples/wmt16/sample.json
--output_dir {tmp_dir}
--max_train_steps=50
--num_warmup_steps=8
--num_beams=6
--learning_rate=3e-3
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--source_lang en_XX
--target_lang ro_RO
--checkpointing_steps epoch
--with_tracking
""".split()
run_command(self._launch_args + testargs )
a_ = get_results(_SCREAMING_SNAKE_CASE )
self.assertGreaterEqual(result["""eval_bleu"""] , 30 )
self.assertTrue(os.path.exists(os.path.join(_SCREAMING_SNAKE_CASE , """epoch_0""" ) ) )
self.assertTrue(os.path.exists(os.path.join(_SCREAMING_SNAKE_CASE , """translation_no_trainer""" ) ) )
@slow
def __magic_name__ ( self ):
a_ = logging.StreamHandler(sys.stdout )
logger.addHandler(_SCREAMING_SNAKE_CASE )
a_ = self.get_auto_remove_tmp_dir()
a_ = f"""
{self.examples_dir}/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py
--dataset_name huggingface/semantic-segmentation-test-sample
--output_dir {tmp_dir}
--max_train_steps=10
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
""".split()
run_command(self._launch_args + testargs )
a_ = get_results(_SCREAMING_SNAKE_CASE )
self.assertGreaterEqual(result["""eval_overall_accuracy"""] , 0.1_0 )
@mock.patch.dict(os.environ , {"""WANDB_MODE""": """offline"""} )
def __magic_name__ ( self ):
a_ = self.get_auto_remove_tmp_dir()
a_ = f"""
{self.examples_dir}/pytorch/image-classification/run_image_classification_no_trainer.py
--model_name_or_path google/vit-base-patch16-224-in21k
--dataset_name hf-internal-testing/cats_vs_dogs_sample
--learning_rate 1e-4
--per_device_train_batch_size 2
--per_device_eval_batch_size 1
--max_train_steps 2
--train_val_split 0.1
--seed 42
--output_dir {tmp_dir}
--with_tracking
--checkpointing_steps 1
""".split()
if is_cuda_and_apex_available():
testargs.append("""--fp16""" )
run_command(self._launch_args + testargs )
a_ = get_results(_SCREAMING_SNAKE_CASE )
# The base model scores a 25%
self.assertGreaterEqual(result["""eval_accuracy"""] , 0.6 )
self.assertTrue(os.path.exists(os.path.join(_SCREAMING_SNAKE_CASE , """step_1""" ) ) )
self.assertTrue(os.path.exists(os.path.join(_SCREAMING_SNAKE_CASE , """image_classification_no_trainer""" ) ) )
| 403
| 0
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SwiftFormerConfig,
SwiftFormerForImageClassification,
ViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowercase : Dict = logging.get_logger(__name__)
lowercase : List[str] = torch.device("cpu")
def snake_case__ ( ):
A : List[str] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
A : Any = Image.open(requests.get(lowerCamelCase_ , stream=lowerCamelCase_ ).raw )
return im
def snake_case__ ( lowerCamelCase_ ):
if swiftformer_name == "swiftformer_xs":
return torch.tensor([-2.1_703E00, 2.1_107E00, -2.0_811E00, 8.8_685E-01, 2.4_360E-01] )
elif swiftformer_name == "swiftformer_s":
return torch.tensor([3.9_636E-01, 2.3_478E-01, -1.6_963E00, -1.7_381E00, -8.6_337E-01] )
elif swiftformer_name == "swiftformer_l1":
return torch.tensor([-4.2_768E-01, -4.7_429E-01, -1.0_897E00, -1.0_248E00, 3.5_523E-02] )
elif swiftformer_name == "swiftformer_l3":
return torch.tensor([-2.5_330E-01, 2.4_211E-01, -6.0_185E-01, -8.2_789E-01, -6.0_446E-02] )
def snake_case__ ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
A : Tuple = dct.pop(lowerCamelCase_ )
A : Tuple = val
def snake_case__ ( lowerCamelCase_ ):
A : Optional[int] = []
for k in state_dict.keys():
A : Any = k
if ".pwconv" in k:
A : str = k_new.replace('''.pwconv''' , '''.point_wise_conv''' )
if ".dwconv" in k:
A : Union[str, Any] = k_new.replace('''.dwconv''' , '''.depth_wise_conv''' )
if ".Proj." in k:
A : List[Any] = k_new.replace('''.Proj.''' , '''.proj.''' )
if "patch_embed" in k_new:
A : int = k_new.replace('''patch_embed''' , '''swiftformer.patch_embed.patch_embedding''' )
if "network" in k_new:
A : str = k_new.split('''.''' )
if ls[2].isdigit():
A : int = '''swiftformer.encoder.network.''' + ls[1] + '''.blocks.''' + ls[2] + '''.''' + '''.'''.join(ls[3:] )
else:
A : str = k_new.replace('''network''' , '''swiftformer.encoder.network''' )
rename_keys.append((k, k_new) )
return rename_keys
@torch.no_grad()
def snake_case__ ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
A : List[str] = SwiftFormerConfig()
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
A : Dict = 1000
A : int = '''huggingface/label-files'''
A : Any = '''imagenet-1k-id2label.json'''
A : int = json.load(open(hf_hub_download(lowerCamelCase_ , lowerCamelCase_ , repo_type='''dataset''' ) , '''r''' ) )
A : Union[str, Any] = {int(lowerCamelCase_ ): v for k, v in idalabel.items()}
A : str = idalabel
A : Dict = {v: k for k, v in idalabel.items()}
# size of the architecture
if swiftformer_name == "swiftformer_xs":
A : Tuple = [3, 3, 6, 4]
A : str = [48, 56, 112, 220]
elif swiftformer_name == "swiftformer_s":
A : Any = [3, 3, 9, 6]
A : Tuple = [48, 64, 168, 224]
elif swiftformer_name == "swiftformer_l1":
A : int = [4, 3, 10, 5]
A : List[Any] = [48, 96, 192, 384]
elif swiftformer_name == "swiftformer_l3":
A : List[str] = [4, 4, 12, 6]
A : str = [64, 128, 320, 512]
# load state_dict of original model, remove and rename some keys
if original_ckpt:
if original_ckpt.startswith('''https''' ):
A : Optional[Any] = torch.hub.load_state_dict_from_url(lowerCamelCase_ , map_location='''cpu''' , check_hash=lowerCamelCase_ )
else:
A : Dict = torch.load(lowerCamelCase_ , map_location='''cpu''' )
A : Any = checkpoint
A : Any = create_rename_keys(lowerCamelCase_ )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
# load HuggingFace model
A : Dict = SwiftFormerForImageClassification(lowerCamelCase_ ).eval()
hf_model.load_state_dict(lowerCamelCase_ )
# prepare test inputs
A : Optional[Any] = prepare_img()
A : Optional[int] = ViTImageProcessor.from_pretrained('''preprocessor_config''' )
A : Optional[int] = processor(images=lowerCamelCase_ , return_tensors='''pt''' )
# compare outputs from both models
A : Union[str, Any] = get_expected_output(lowerCamelCase_ )
A : Tuple = hf_model(inputs['''pixel_values'''] ).logits
assert hf_logits.shape == torch.Size([1, 1000] )
assert torch.allclose(hf_logits[0, 0:5] , lowerCamelCase_ , atol=1E-3 )
Path(lowerCamelCase_ ).mkdir(exist_ok=lowerCamelCase_ )
print(F'Saving model {swiftformer_name} to {pytorch_dump_folder_path}' )
hf_model.save_pretrained(lowerCamelCase_ )
if __name__ == "__main__":
lowercase : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--swiftformer_name",
default="swiftformer_xs",
choices=["swiftformer_xs", "swiftformer_s", "swiftformer_l1", "swiftformer_l3"],
type=str,
help="Name of the SwiftFormer model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default="./converted_outputs/",
type=str,
help="Path to the output PyTorch model directory.",
)
parser.add_argument("--original_ckpt", default=None, type=str, help="Path to the original model checkpoint.")
lowercase : Any = parser.parse_args()
convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
| 542
|
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionInstructPixaPixPipeline,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.utils import floats_tensor, load_image, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = StableDiffusionInstructPixaPixPipeline
UpperCAmelCase_ : str = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width''', '''cross_attention_kwargs'''}
UpperCAmelCase_ : List[Any] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
UpperCAmelCase_ : Optional[int] = IMAGE_TO_IMAGE_IMAGE_PARAMS
UpperCAmelCase_ : Any = IMAGE_TO_IMAGE_IMAGE_PARAMS
def snake_case ( self ) -> List[Any]:
torch.manual_seed(0 )
A : Tuple = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=8 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
A : Any = PNDMScheduler(skip_prk_steps=__UpperCAmelCase )
torch.manual_seed(0 )
A : Dict = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
A : str = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
A : Dict = CLIPTextModel(__UpperCAmelCase )
A : Dict = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
A : int = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase=0 ) -> Dict:
A : Any = floats_tensor((1, 3, 32, 32) , rng=random.Random(__UpperCAmelCase ) ).to(__UpperCAmelCase )
A : Dict = image.cpu().permute(0 , 2 , 3 , 1 )[0]
A : Optional[Any] = Image.fromarray(np.uinta(__UpperCAmelCase ) ).convert('''RGB''' )
if str(__UpperCAmelCase ).startswith('''mps''' ):
A : str = torch.manual_seed(__UpperCAmelCase )
else:
A : Optional[Any] = torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase )
A : List[Any] = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''image_guidance_scale''': 1,
'''output_type''': '''numpy''',
}
return inputs
def snake_case ( self ) -> Union[str, Any]:
A : Optional[Any] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
A : Dict = self.get_dummy_components()
A : str = StableDiffusionInstructPixaPixPipeline(**__UpperCAmelCase )
A : Any = sd_pipe.to(__UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
A : Optional[Any] = self.get_dummy_inputs(__UpperCAmelCase )
A : int = sd_pipe(**__UpperCAmelCase ).images
A : List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
A : Optional[Any] = np.array([0.7_5_2_6, 0.3_7_5_0, 0.4_5_4_7, 0.6_1_1_7, 0.5_8_6_6, 0.5_0_1_6, 0.4_3_2_7, 0.5_6_4_2, 0.4_8_1_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def snake_case ( self ) -> int:
A : Any = '''cpu''' # ensure determinism for the device-dependent torch.Generator
A : Optional[Any] = self.get_dummy_components()
A : Any = StableDiffusionInstructPixaPixPipeline(**__UpperCAmelCase )
A : str = sd_pipe.to(__UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
A : str = self.get_dummy_inputs(__UpperCAmelCase )
A : Optional[int] = '''french fries'''
A : Any = sd_pipe(**__UpperCAmelCase , negative_prompt=__UpperCAmelCase )
A : Optional[int] = output.images
A : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
A : Any = np.array([0.7_5_1_1, 0.3_6_4_2, 0.4_5_5_3, 0.6_2_3_6, 0.5_7_9_7, 0.5_0_1_3, 0.4_3_4_3, 0.5_6_1_1, 0.4_8_3_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def snake_case ( self ) -> Tuple:
A : List[Any] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
A : List[Any] = self.get_dummy_components()
A : Optional[int] = StableDiffusionInstructPixaPixPipeline(**__UpperCAmelCase )
A : Tuple = sd_pipe.to(__UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
A : str = self.get_dummy_inputs(__UpperCAmelCase )
A : int = [inputs['''prompt''']] * 2
A : List[str] = np.array(inputs['''image'''] ).astype(np.floataa ) / 2_5_5.0
A : Any = torch.from_numpy(__UpperCAmelCase ).unsqueeze(0 ).to(__UpperCAmelCase )
A : Union[str, Any] = image / 2 + 0.5
A : str = image.permute(0 , 3 , 1 , 2 )
A : List[str] = image.repeat(2 , 1 , 1 , 1 )
A : Union[str, Any] = sd_pipe(**__UpperCAmelCase ).images
A : int = image[-1, -3:, -3:, -1]
assert image.shape == (2, 32, 32, 3)
A : Any = np.array([0.5_8_1_2, 0.5_7_4_8, 0.5_2_2_2, 0.5_9_0_8, 0.5_6_9_5, 0.7_1_7_4, 0.6_8_0_4, 0.5_5_2_3, 0.5_5_7_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def snake_case ( self ) -> Dict:
A : int = '''cpu''' # ensure determinism for the device-dependent torch.Generator
A : List[Any] = self.get_dummy_components()
A : Union[str, Any] = EulerAncestralDiscreteScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='''scaled_linear''' )
A : int = StableDiffusionInstructPixaPixPipeline(**__UpperCAmelCase )
A : Tuple = sd_pipe.to(__UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
A : Tuple = self.get_dummy_inputs(__UpperCAmelCase )
A : Optional[Any] = sd_pipe(**__UpperCAmelCase ).images
A : Dict = image[0, -3:, -3:, -1]
A : Optional[int] = [round(__UpperCAmelCase , 4 ) for x in image_slice.flatten().tolist()]
print(''','''.join([str(__UpperCAmelCase ) for x in slice] ) )
assert image.shape == (1, 32, 32, 3)
A : Dict = np.array([0.7_4_1_7, 0.3_8_4_2, 0.4_7_3_2, 0.5_7_7_6, 0.5_8_9_1, 0.5_1_3_9, 0.4_0_5_2, 0.5_6_7_3, 0.4_9_8_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def snake_case ( self ) -> Union[str, Any]:
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
def snake_case ( self ) -> Union[str, Any]:
A : int = self.get_dummy_components()
A : int = StableDiffusionInstructPixaPixPipeline(**__UpperCAmelCase )
A : Any = VaeImageProcessor(do_resize=__UpperCAmelCase , do_normalize=__UpperCAmelCase )
A : Optional[Any] = pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
A : Union[str, Any] = pipe(**self.get_dummy_inputs_by_type(__UpperCAmelCase , input_image_type='''pt''' ) )[0]
A : Optional[Any] = components['''vae''']
A : Dict = self.get_dummy_inputs_by_type(__UpperCAmelCase , input_image_type='''pt''' )
for image_param in self.image_latents_params:
if image_param in inputs.keys():
A : Optional[Any] = vae.encode(inputs[image_param] ).latent_dist.mode()
A : List[str] = pipe(**__UpperCAmelCase )[0]
A : Any = np.abs(out - out_latents_inputs ).max()
self.assertLess(__UpperCAmelCase , 1E-4 , '''passing latents as image input generate different result from passing image''' )
@slow
@require_torch_gpu
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
def snake_case ( self ) -> str:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case ( self , __UpperCAmelCase=0 ) -> Tuple:
A : List[str] = torch.manual_seed(__UpperCAmelCase )
A : List[str] = load_image(
'''https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_pix2pix/example.jpg''' )
A : Dict = {
'''prompt''': '''turn him into a cyborg''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 3,
'''guidance_scale''': 7.5,
'''image_guidance_scale''': 1.0,
'''output_type''': '''numpy''',
}
return inputs
def snake_case ( self ) -> Optional[int]:
A : Union[str, Any] = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'''timbrooks/instruct-pix2pix''' , safety_checker=__UpperCAmelCase )
pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
pipe.enable_attention_slicing()
A : Optional[int] = self.get_inputs()
A : Any = pipe(**__UpperCAmelCase ).images
A : Union[str, Any] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_12, 5_12, 3)
A : Dict = np.array([0.5_9_0_2, 0.6_0_1_5, 0.6_0_2_7, 0.5_9_8_3, 0.6_0_9_2, 0.6_0_6_1, 0.5_7_6_5, 0.5_7_8_5, 0.5_5_5_5] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def snake_case ( self ) -> Dict:
A : str = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'''timbrooks/instruct-pix2pix''' , safety_checker=__UpperCAmelCase )
A : List[str] = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
pipe.enable_attention_slicing()
A : Union[str, Any] = self.get_inputs()
A : Union[str, Any] = pipe(**__UpperCAmelCase ).images
A : str = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_12, 5_12, 3)
A : Dict = np.array([0.6_5_7_8, 0.6_8_1_7, 0.6_9_7_2, 0.6_7_6_1, 0.6_8_5_6, 0.6_9_1_6, 0.6_4_2_8, 0.6_5_1_6, 0.6_3_0_1] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def snake_case ( self ) -> List[Any]:
A : List[Any] = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'''timbrooks/instruct-pix2pix''' , safety_checker=__UpperCAmelCase )
A : List[str] = DDIMScheduler.from_config(pipe.scheduler.config )
pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
pipe.enable_attention_slicing()
A : str = self.get_inputs()
A : List[Any] = pipe(**__UpperCAmelCase ).images
A : str = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_12, 5_12, 3)
A : Any = np.array([0.3_8_2_8, 0.3_8_3_4, 0.3_8_1_8, 0.3_7_9_2, 0.3_8_6_5, 0.3_7_5_2, 0.3_7_9_2, 0.3_8_4_7, 0.3_7_5_3] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def snake_case ( self ) -> Optional[Any]:
A : Optional[Any] = 0
def callback_fn(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> None:
A : List[Any] = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
A : Optional[Any] = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
A : Union[str, Any] = latents[0, -3:, -3:, -1]
A : List[str] = np.array([-0.2_4_6_3, -0.4_6_4_4, -0.9_7_5_6, 1.5_1_7_6, 1.4_4_1_4, 0.7_8_6_6, 0.9_8_9_7, 0.8_5_2_1, 0.7_9_8_3] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
elif step == 2:
A : int = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
A : Optional[int] = latents[0, -3:, -3:, -1]
A : str = np.array([-0.2_6_4_4, -0.4_6_2_6, -0.9_6_5_3, 1.5_1_7_6, 1.4_5_5_1, 0.7_6_8_6, 0.9_8_0_5, 0.8_4_5_2, 0.8_1_1_5] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
A : Optional[Any] = False
A : str = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'''timbrooks/instruct-pix2pix''' , safety_checker=__UpperCAmelCase , torch_dtype=torch.floataa )
A : List[str] = pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
pipe.enable_attention_slicing()
A : List[Any] = self.get_inputs()
pipe(**__UpperCAmelCase , callback=__UpperCAmelCase , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def snake_case ( self ) -> Tuple:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
A : Optional[Any] = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'''timbrooks/instruct-pix2pix''' , safety_checker=__UpperCAmelCase , torch_dtype=torch.floataa )
A : Dict = pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
A : Optional[Any] = self.get_inputs()
A : str = pipe(**__UpperCAmelCase )
A : Dict = torch.cuda.max_memory_allocated()
# make sure that less than 2.2 GB is allocated
assert mem_bytes < 2.2 * 10**9
def snake_case ( self ) -> int:
A : Union[str, Any] = self.get_inputs()
# resize to resolution that is divisible by 8 but not 16 or 32
A : Union[str, Any] = inputs['''image'''].resize((5_04, 5_04) )
A : str = '''timbrooks/instruct-pix2pix'''
A : Tuple = StableDiffusionInstructPixaPixPipeline.from_pretrained(
__UpperCAmelCase , safety_checker=__UpperCAmelCase , )
pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
pipe.enable_attention_slicing()
A : Tuple = pipe(**__UpperCAmelCase )
A : List[str] = output.images[0]
A : List[Any] = image[2_55:2_58, 3_83:3_86, -1]
assert image.shape == (5_04, 5_04, 3)
A : Any = np.array([0.2_7_2_6, 0.2_5_2_9, 0.2_6_6_4, 0.2_6_5_5, 0.2_6_4_1, 0.2_6_4_2, 0.2_5_9_1, 0.2_6_4_9, 0.2_5_9_0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-3
| 542
| 1
|
from string import ascii_uppercase
__a : Optional[Any] = {char: i for i, char in enumerate(ascii_uppercase)}
__a : Optional[int] = dict(enumerate(ascii_uppercase))
def UpperCAmelCase ( lowercase , lowercase ):
"""simple docstring"""
__lowercase = len(lowercase )
__lowercase = 0
while True:
if x == i:
__lowercase = 0
if len(lowercase ) == len(lowercase ):
break
key += key[i]
i += 1
return key
def UpperCAmelCase ( lowercase , lowercase ):
"""simple docstring"""
__lowercase = ''''''
__lowercase = 0
for letter in message:
if letter == " ":
cipher_text += " "
else:
__lowercase = (dicta[letter] - dicta[key_new[i]]) % 26
i += 1
cipher_text += dicta[x]
return cipher_text
def UpperCAmelCase ( lowercase , lowercase ):
"""simple docstring"""
__lowercase = ''''''
__lowercase = 0
for letter in cipher_text:
if letter == " ":
or_txt += " "
else:
__lowercase = (dicta[letter] + dicta[key_new[i]] + 26) % 26
i += 1
or_txt += dicta[x]
return or_txt
def UpperCAmelCase ( ):
"""simple docstring"""
__lowercase = '''THE GERMAN ATTACK'''
__lowercase = '''SECRET'''
__lowercase = generate_key(lowercase , lowercase )
__lowercase = cipher_text(lowercase , lowercase )
print(F"Encrypted Text = {s}" )
print(F"Original Text = {original_text(lowercase , lowercase )}" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 522
|
from __future__ import annotations
__a : str = """Muhammad Umer Farooq"""
__a : Optional[Any] = """MIT"""
__a : int = """1.0.0"""
__a : Optional[int] = """Muhammad Umer Farooq"""
__a : Dict = """contact@muhammadumerfarooq.me"""
__a : Optional[Any] = """Alpha"""
import re
from html.parser import HTMLParser
from urllib import parse
import requests
class _UpperCamelCase ( _UpperCAmelCase ):
"""simple docstring"""
def __init__( self , lowerCAmelCase__ ) -> None:
'''simple docstring'''
super().__init__()
__lowercase = []
__lowercase = domain
def _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> None:
'''simple docstring'''
if tag == "a":
# Check the list of defined attributes.
for name, value in attrs:
# If href is defined, and not empty nor # print it.
if name == "href" and value != "#" and value != "":
# If not already in urls.
if value not in self.urls:
__lowercase = parse.urljoin(self.domain , lowerCAmelCase__ )
self.urls.append(lowerCAmelCase__ )
def UpperCAmelCase ( lowercase ):
"""simple docstring"""
return ".".join(get_sub_domain_name(lowercase ).split('''.''' )[-2:] )
def UpperCAmelCase ( lowercase ):
"""simple docstring"""
return parse.urlparse(lowercase ).netloc
def UpperCAmelCase ( lowercase = "https://github.com" ):
"""simple docstring"""
__lowercase = get_domain_name(lowercase )
# Initialize the parser
__lowercase = Parser(lowercase )
try:
# Open URL
__lowercase = requests.get(lowercase )
# pass the raw HTML to the parser to get links
parser.feed(r.text )
# Get links and loop through
__lowercase = set()
for link in parser.urls:
# open URL.
# read = requests.get(link)
try:
__lowercase = requests.get(lowercase )
# Get the valid email.
__lowercase = re.findall('''[a-zA-Z0-9]+@''' + domain , read.text )
# If not in list then append it.
for email in emails:
valid_emails.add(lowercase )
except ValueError:
pass
except ValueError:
raise SystemExit(1 )
# Finally return a sorted list of email addresses with no duplicates.
return sorted(lowercase )
if __name__ == "__main__":
__a : Union[str, Any] = emails_from_url("""https://github.com""")
print(F'''{len(emails)} emails found:''')
print("""\n""".join(sorted(emails)))
| 522
| 1
|
'''simple docstring'''
def lowerCamelCase ( lowerCAmelCase : float , lowerCAmelCase : float , lowerCAmelCase : int ):
"""simple docstring"""
if principal <= 0:
raise Exception('Principal borrowed must be > 0' )
if rate_per_annum < 0:
raise Exception('Rate of interest must be >= 0' )
if years_to_repay <= 0 or not isinstance(lowerCAmelCase , lowerCAmelCase ):
raise Exception('Years to repay must be an integer > 0' )
# Yearly rate is divided by 12 to get monthly rate
__magic_name__ : Optional[Any] = rate_per_annum / 12
# Years to repay is multiplied by 12 to get number of payments as payment is monthly
__magic_name__ : Tuple = years_to_repay * 12
return (
principal
* rate_per_month
* (1 + rate_per_month) ** number_of_payments
/ ((1 + rate_per_month) ** number_of_payments - 1)
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 561
|
'''simple docstring'''
from typing import List
import datasets
from datasets.tasks import AudioClassification
from ..folder_based_builder import folder_based_builder
lowerCAmelCase :Dict = datasets.utils.logging.get_logger(__name__)
class _lowerCamelCase ( folder_based_builder.FolderBasedBuilderConfig ):
'''simple docstring'''
A_ : bool = None
A_ : bool = None
class _lowerCamelCase ( folder_based_builder.FolderBasedBuilder ):
'''simple docstring'''
A_ : Union[str, Any] = datasets.Audio()
A_ : Tuple = """audio"""
A_ : Optional[Any] = AudioFolderConfig
A_ : List[str] # definition at the bottom of the script
A_ : Any = AudioClassification(audio_column="""audio""" , label_column="""label""" )
lowerCAmelCase :List[str] = [
'''.aiff''',
'''.au''',
'''.avr''',
'''.caf''',
'''.flac''',
'''.htk''',
'''.svx''',
'''.mat4''',
'''.mat5''',
'''.mpc2k''',
'''.ogg''',
'''.paf''',
'''.pvf''',
'''.raw''',
'''.rf64''',
'''.sd2''',
'''.sds''',
'''.ircam''',
'''.voc''',
'''.w64''',
'''.wav''',
'''.nist''',
'''.wavex''',
'''.wve''',
'''.xi''',
'''.mp3''',
'''.opus''',
]
lowerCAmelCase :str = AUDIO_EXTENSIONS
| 561
| 1
|
from argparse import ArgumentParser
from .add_new_model import AddNewModelCommand
from .add_new_model_like import AddNewModelLikeCommand
from .convert import ConvertCommand
from .download import DownloadCommand
from .env import EnvironmentCommand
from .lfs import LfsCommands
from .pt_to_tf import PTtoTFCommand
from .run import RunCommand
from .serving import ServeCommand
from .user import UserCommands
def UpperCamelCase_ ( ) ->Dict:
A =ArgumentParser("Transformers CLI tool" , usage="transformers-cli <command> [<args>]" )
A =parser.add_subparsers(help="transformers-cli command helpers" )
# Register commands
ConvertCommand.register_subcommand(a_ )
DownloadCommand.register_subcommand(a_ )
EnvironmentCommand.register_subcommand(a_ )
RunCommand.register_subcommand(a_ )
ServeCommand.register_subcommand(a_ )
UserCommands.register_subcommand(a_ )
AddNewModelCommand.register_subcommand(a_ )
AddNewModelLikeCommand.register_subcommand(a_ )
LfsCommands.register_subcommand(a_ )
PTtoTFCommand.register_subcommand(a_ )
# Let's go
A =parser.parse_args()
if not hasattr(a_ , "func" ):
parser.print_help()
exit(1 )
# Run
A =args.func(a_ )
service.run()
if __name__ == "__main__":
main()
| 719
|
from __future__ import annotations
import math
def UpperCamelCase_ ( a_ , a_ ) ->float:
A =u
for i in range(1 , a_ ):
A =temp * (u - i)
return temp
def UpperCamelCase_ ( ) ->None:
A =int(input("enter the numbers of values: " ) )
A =[]
for _ in range(a_ ):
y.append([] )
for i in range(a_ ):
for j in range(a_ ):
y[i].append(a_ )
A =0
print("enter the values of parameters in a list: " )
A =list(map(a_ , input().split() ) )
print("enter the values of corresponding parameters: " )
for i in range(a_ ):
A =float(input() )
A =int(input("enter the value to interpolate: " ) )
A =(value - x[0]) / (x[1] - x[0])
# for calculating forward difference table
for i in range(1 , a_ ):
for j in range(n - i ):
A =y[j + 1][i - 1] - y[j][i - 1]
A =y[0][0]
for i in range(1 , a_ ):
summ += (ucal(a_ , a_ ) * y[0][i]) / math.factorial(a_ )
print(f'''the value at {value} is {summ}''' )
if __name__ == "__main__":
main()
| 689
| 0
|
import math
import tensorflow as tf
from packaging import version
def snake_case__ ( lowerCamelCase_ ):
A : Optional[int] = tf.convert_to_tensor(lowerCamelCase_ )
A : Union[str, Any] = 0.5 * (1.0 + tf.math.erf(x / tf.cast(tf.sqrt(2.0 ) , x.dtype ) ))
return x * cdf
def snake_case__ ( lowerCamelCase_ ):
A : List[str] = tf.convert_to_tensor(lowerCamelCase_ )
A : Any = tf.cast(math.pi , x.dtype )
A : int = tf.cast(0.044_715 , x.dtype )
A : Tuple = 0.5 * (1.0 + tf.tanh(tf.sqrt(2.0 / pi ) * (x + coeff * tf.pow(lowerCamelCase_ , 3 )) ))
return x * cdf
def snake_case__ ( lowerCamelCase_ ):
A : Union[str, Any] = tf.convert_to_tensor(lowerCamelCase_ )
return x * tf.tanh(tf.math.softplus(lowerCamelCase_ ) )
def snake_case__ ( lowerCamelCase_ ):
A : Optional[int] = tf.convert_to_tensor(lowerCamelCase_ )
A : List[Any] = tf.cast(0.044_715 , x.dtype )
A : str = tf.cast(0.7_978_845_608 , x.dtype )
return 0.5 * x * (1.0 + tf.tanh(x * coeffa * (1.0 + coeffa * x * x) ))
def snake_case__ ( lowerCamelCase_ ):
A : List[str] = tf.convert_to_tensor(lowerCamelCase_ )
A : Any = tf.cast(1.702 , x.dtype )
return x * tf.math.sigmoid(coeff * x )
def snake_case__ ( lowerCamelCase_ ):
return tf.clip_by_value(_gelu(lowerCamelCase_ ) , -10 , 10 )
def snake_case__ ( lowerCamelCase_ , lowerCamelCase_=-1 ):
A , A : Dict = tf.split(lowerCamelCase_ , 2 , axis=lowerCamelCase_ )
return a * tf.math.sigmoid(lowerCamelCase_ )
if version.parse(tf.version.VERSION) >= version.parse("2.4"):
def snake_case__ ( lowerCamelCase_ ):
return tf.keras.activations.gelu(lowerCamelCase_ , approximate=lowerCamelCase_ )
lowercase : Tuple = tf.keras.activations.gelu
lowercase : Union[str, Any] = approximate_gelu_wrap
else:
lowercase : str = _gelu
lowercase : Dict = _gelu_new
lowercase : Union[str, Any] = {
"gelu": gelu,
"gelu_10": gelu_aa,
"gelu_fast": gelu_fast,
"gelu_new": gelu_new,
"glu": glu,
"mish": mish,
"quick_gelu": quick_gelu,
"relu": tf.keras.activations.relu,
"sigmoid": tf.keras.activations.sigmoid,
"silu": tf.keras.activations.swish,
"swish": tf.keras.activations.swish,
"tanh": tf.keras.activations.tanh,
}
def snake_case__ ( lowerCamelCase_ ):
if activation_string in ACTaFN:
return ACTaFN[activation_string]
else:
raise KeyError(F'function {activation_string} not found in ACT2FN mapping {list(ACTaFN.keys() )}' )
| 542
|
import os
import posixpath
import uuid
from dataclasses import dataclass
from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union
import numpy as np
import pyarrow as pa
import datasets
from datasets.arrow_writer import ArrowWriter, ParquetWriter
from datasets.config import MAX_SHARD_SIZE
from datasets.filesystems import (
is_remote_filesystem,
rename,
)
from datasets.iterable_dataset import _BaseExamplesIterable
from datasets.utils.py_utils import convert_file_size_to_int
lowercase : Dict = datasets.utils.logging.get_logger(__name__)
if TYPE_CHECKING:
import pyspark
@dataclass
class __lowercase ( datasets.BuilderConfig ):
"""simple docstring"""
UpperCAmelCase_ : Optional[datasets.Features] = None
def snake_case__ ( lowerCamelCase_ , lowerCamelCase_ , ):
import pyspark
def generate_fn():
A : Dict = df.select('''*''' , pyspark.sql.functions.spark_partition_id().alias('''part_id''' ) )
for partition_id in partition_order:
A : Optional[Any] = df_with_partition_id.select('''*''' ).where(F'part_id = {partition_id}' ).drop('''part_id''' )
A : Dict = partition_df.collect()
A : List[str] = 0
for row in rows:
yield F'{partition_id}_{row_id}', row.asDict()
row_id += 1
return generate_fn
class __lowercase ( _BaseExamplesIterable ):
"""simple docstring"""
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=None , ) -> List[str]:
A : List[Any] = df
A : List[str] = partition_order or range(self.df.rdd.getNumPartitions() )
A : Optional[int] = _generate_iterable_examples(self.df , self.partition_order )
def __iter__( self ) -> str:
yield from self.generate_examples_fn()
def snake_case ( self , __UpperCAmelCase ) -> "SparkExamplesIterable":
A : Tuple = list(range(self.df.rdd.getNumPartitions() ) )
generator.shuffle(__UpperCAmelCase )
return SparkExamplesIterable(self.df , partition_order=__UpperCAmelCase )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase ) -> "SparkExamplesIterable":
A : Tuple = self.split_shard_indices_by_worker(__UpperCAmelCase , __UpperCAmelCase )
return SparkExamplesIterable(self.df , partition_order=__UpperCAmelCase )
@property
def snake_case ( self ) -> int:
return len(self.partition_order )
class __lowercase ( datasets.DatasetBuilder ):
"""simple docstring"""
UpperCAmelCase_ : List[str] = SparkConfig
def __init__( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None , **__UpperCAmelCase , ) -> Optional[Any]:
import pyspark
A : Tuple = pyspark.sql.SparkSession.builder.getOrCreate()
A : List[Any] = df
A : List[Any] = working_dir
super().__init__(
cache_dir=__UpperCAmelCase , config_name=str(self.df.semanticHash() ) , **__UpperCAmelCase , )
def snake_case ( self ) -> Optional[Any]:
# Returns the path of the created file.
def create_cache_and_write_probe(__UpperCAmelCase ):
# makedirs with exist_ok will recursively create the directory. It will not throw an error if directories
# already exist.
os.makedirs(self._cache_dir , exist_ok=__UpperCAmelCase )
A : Optional[Any] = os.path.join(self._cache_dir , '''fs_test''' + uuid.uuida().hex )
# Opening the file in append mode will create a new file unless it already exists, in which case it will not
# change the file contents.
open(__UpperCAmelCase , '''a''' )
return [probe_file]
if self._spark.conf.get('''spark.master''' , '''''' ).startswith('''local''' ):
return
# If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS
# accessible to the driver.
# TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error.
if self._cache_dir:
A : Optional[int] = (
self._spark.sparkContext.parallelize(range(1 ) , 1 ).mapPartitions(__UpperCAmelCase ).collect()
)
if os.path.isfile(probe[0] ):
return
raise ValueError(
'''When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir''' )
def snake_case ( self ) -> List[Any]:
return datasets.DatasetInfo(features=self.config.features )
def snake_case ( self , __UpperCAmelCase ) -> str:
return [datasets.SplitGenerator(name=datasets.Split.TRAIN )]
def snake_case ( self , __UpperCAmelCase ) -> List[Any]:
import pyspark
def get_arrow_batch_size(__UpperCAmelCase ):
for batch in it:
yield pa.RecordBatch.from_pydict({'''batch_bytes''': [batch.nbytes]} )
A : Any = self.df.count()
A : Tuple = df_num_rows if df_num_rows <= 1_00 else 1_00
# Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample.
A : Dict = (
self.df.limit(__UpperCAmelCase )
.repartition(1 )
.mapInArrow(__UpperCAmelCase , '''batch_bytes: long''' )
.agg(pyspark.sql.functions.sum('''batch_bytes''' ).alias('''sample_bytes''' ) )
.collect()[0]
.sample_bytes
/ sample_num_rows
)
A : str = approx_bytes_per_row * df_num_rows
if approx_total_size > max_shard_size:
# Make sure there is at least one row per partition.
A : Union[str, Any] = min(__UpperCAmelCase , int(approx_total_size / max_shard_size ) )
A : List[Any] = self.df.repartition(__UpperCAmelCase )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) -> Iterable[Tuple[int, bool, Union[int, tuple]]]:
import pyspark
A : Dict = ParquetWriter if file_format == '''parquet''' else ArrowWriter
A : Optional[Any] = os.path.join(self._working_dir , os.path.basename(__UpperCAmelCase ) ) if self._working_dir else fpath
A : int = file_format == '''parquet'''
# Define these so that we don't reference self in write_arrow, which will result in a pickling error due to
# pickling the SparkContext.
A : Any = self.config.features
A : Any = self._writer_batch_size
A : int = self._fs.storage_options
def write_arrow(__UpperCAmelCase ):
# Within the same SparkContext, no two task attempts will share the same attempt ID.
A : List[Any] = pyspark.TaskContext().taskAttemptId()
A : Union[str, Any] = next(__UpperCAmelCase , __UpperCAmelCase )
if first_batch is None:
# Some partitions might not receive any data.
return pa.RecordBatch.from_arrays(
[[task_id], [0], [0]] , names=['''task_id''', '''num_examples''', '''num_bytes'''] , )
A : Optional[int] = 0
A : Optional[int] = writer_class(
features=__UpperCAmelCase , path=working_fpath.replace('''SSSSS''' , f'{shard_id:05d}' ).replace('''TTTTT''' , f'{task_id:05d}' ) , writer_batch_size=__UpperCAmelCase , storage_options=__UpperCAmelCase , embed_local_files=__UpperCAmelCase , )
A : Tuple = pa.Table.from_batches([first_batch] )
writer.write_table(__UpperCAmelCase )
for batch in it:
if max_shard_size is not None and writer._num_bytes >= max_shard_size:
A , A : str = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=['''task_id''', '''num_examples''', '''num_bytes'''] , )
shard_id += 1
A : List[str] = writer_class(
features=writer._features , path=working_fpath.replace('''SSSSS''' , f'{shard_id:05d}' ).replace('''TTTTT''' , f'{task_id:05d}' ) , writer_batch_size=__UpperCAmelCase , storage_options=__UpperCAmelCase , embed_local_files=__UpperCAmelCase , )
A : str = pa.Table.from_batches([batch] )
writer.write_table(__UpperCAmelCase )
if writer._num_bytes > 0:
A , A : Tuple = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=['''task_id''', '''num_examples''', '''num_bytes'''] , )
if working_fpath != fpath:
for file in os.listdir(os.path.dirname(__UpperCAmelCase ) ):
A : Union[str, Any] = os.path.join(os.path.dirname(__UpperCAmelCase ) , os.path.basename(__UpperCAmelCase ) )
shutil.move(__UpperCAmelCase , __UpperCAmelCase )
A : Union[str, Any] = (
self.df.mapInArrow(__UpperCAmelCase , '''task_id: long, num_examples: long, num_bytes: long''' )
.groupBy('''task_id''' )
.agg(
pyspark.sql.functions.sum('''num_examples''' ).alias('''total_num_examples''' ) , pyspark.sql.functions.sum('''num_bytes''' ).alias('''total_num_bytes''' ) , pyspark.sql.functions.count('''num_bytes''' ).alias('''num_shards''' ) , pyspark.sql.functions.collect_list('''num_examples''' ).alias('''shard_lengths''' ) , )
.collect()
)
for row in stats:
yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths)
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase = "arrow" , __UpperCAmelCase = None , __UpperCAmelCase = None , **__UpperCAmelCase , ) -> List[str]:
self._validate_cache_dir()
A : int = convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE )
self._repartition_df_if_needed(__UpperCAmelCase )
A : Any = not is_remote_filesystem(self._fs )
A : Union[str, Any] = os.path.join if is_local else posixpath.join
A : Any = '''-TTTTT-SSSSS-of-NNNNN'''
A : Any = f'{self.name}-{split_generator.name}{SUFFIX}.{file_format}'
A : List[Any] = path_join(self._output_dir , __UpperCAmelCase )
A : Union[str, Any] = 0
A : Any = 0
A : Tuple = 0
A : Union[str, Any] = []
A : List[str] = []
for task_id, content in self._prepare_split_single(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
(
(
A
) , (
A
) , (
A
) , (
A
) ,
) : List[Any] = content
if num_bytes > 0:
total_num_examples += num_examples
total_num_bytes += num_bytes
total_shards += num_shards
task_id_and_num_shards.append((task_id, num_shards) )
all_shard_lengths.extend(__UpperCAmelCase )
A : List[str] = total_num_examples
A : List[str] = total_num_bytes
# should rename everything at the end
logger.debug(f'Renaming {total_shards} shards.' )
if total_shards > 1:
A : Tuple = all_shard_lengths
# Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a
# pickling error due to pickling the SparkContext.
A : Union[str, Any] = self._fs
# use the -SSSSS-of-NNNNN pattern
def _rename_shard(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ):
rename(
__UpperCAmelCase , fpath.replace('''SSSSS''' , f'{shard_id:05d}' ).replace('''TTTTT''' , f'{task_id:05d}' ) , fpath.replace('''TTTTT-SSSSS''' , f'{global_shard_id:05d}' ).replace('''NNNNN''' , f'{total_shards:05d}' ) , )
A : Tuple = []
A : Optional[int] = 0
for i in range(len(__UpperCAmelCase ) ):
A , A : Tuple = task_id_and_num_shards[i]
for shard_id in range(__UpperCAmelCase ):
args.append([task_id, shard_id, global_shard_id] )
global_shard_id += 1
self._spark.sparkContext.parallelize(__UpperCAmelCase , len(__UpperCAmelCase ) ).map(lambda __UpperCAmelCase : _rename_shard(*__UpperCAmelCase ) ).collect()
else:
# don't use any pattern
A : int = 0
A : Tuple = task_id_and_num_shards[0][0]
self._rename(
fpath.replace('''SSSSS''' , f'{shard_id:05d}' ).replace('''TTTTT''' , f'{task_id:05d}' ) , fpath.replace(__UpperCAmelCase , '''''' ) , )
def snake_case ( self , __UpperCAmelCase , ) -> SparkExamplesIterable:
return SparkExamplesIterable(self.df )
| 542
| 1
|
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
_A = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, "utils"))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If BertLMPredictionHead is changed in modeling_bert.py, this code needs to be manually updated.
_A = " def __init__(self, config):\n super().__init__()\n self.transform = BertPredictionHeadTransform(config)\n\n # The output weights are the same as the input embeddings, but there is\n # an output-only bias for each token.\n self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)\n\n self.bias = nn.Parameter(torch.zeros(config.vocab_size))\n\n # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`\n self.decoder.bias = self.bias\n\n def forward(self, hidden_states):\n hidden_states = self.transform(hidden_states)\n hidden_states = self.decoder(hidden_states)\n return hidden_states\n"
class _lowerCAmelCase ( unittest.TestCase ):
def __a ( self ) -> Any:
lowerCAmelCase_ = tempfile.mkdtemp()
os.makedirs(os.path.join(self.transformer_dir , "models/bert/" ) )
lowerCAmelCase_ = self.transformer_dir
shutil.copy(
os.path.join(_UpperCamelCase , "src/transformers/models/bert/modeling_bert.py" ) , os.path.join(self.transformer_dir , "models/bert/modeling_bert.py" ) , )
def __a ( self ) -> List[Any]:
lowerCAmelCase_ = "src/transformers"
shutil.rmtree(self.transformer_dir )
def __a ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=None ) -> Optional[int]:
lowerCAmelCase_ = comment + f"""\nclass {class_name}(nn.Module):\n""" + class_code
if overwrite_result is not None:
lowerCAmelCase_ = comment + f"""\nclass {class_name}(nn.Module):\n""" + overwrite_result
lowerCAmelCase_ = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 )
lowerCAmelCase_ = black.format_str(_UpperCamelCase , mode=_UpperCamelCase )
lowerCAmelCase_ = os.path.join(self.transformer_dir , "new_code.py" )
with open(_UpperCamelCase , "w" , newline="\n" ) as f:
f.write(_UpperCamelCase )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(_UpperCamelCase ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=_UpperCamelCase )
with open(_UpperCamelCase , "r" ) as f:
self.assertTrue(f.read() , _UpperCamelCase )
def __a ( self ) -> Any:
lowerCAmelCase_ = check_copies.find_code_in_transformers("models.bert.modeling_bert.BertLMPredictionHead" )
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
def __a ( self ) -> Any:
# Base copy consistency
self.check_copy_consistency(
"# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead" , "BertLMPredictionHead" , REFERENCE_CODE + "\n" , )
# With no empty line at the end
self.check_copy_consistency(
"# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead" , "BertLMPredictionHead" , _UpperCamelCase , )
# Copy consistency with rename
self.check_copy_consistency(
"# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel" , "TestModelLMPredictionHead" , re.sub("Bert" , "TestModel" , _UpperCamelCase ) , )
# Copy consistency with a really long name
lowerCAmelCase_ = "TestModelWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason"
self.check_copy_consistency(
f"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->{long_class_name}""" , f"""{long_class_name}LMPredictionHead""" , re.sub("Bert" , _UpperCamelCase , _UpperCamelCase ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
"# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel" , "TestModelLMPredictionHead" , _UpperCamelCase , overwrite_result=re.sub("Bert" , "TestModel" , _UpperCamelCase ) , )
def __a ( self ) -> Any:
lowerCAmelCase_ = check_copies.LOCALIZED_READMES["README_zh-hans.md"]
lowerCAmelCase_ = (
"1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the"
" Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for"
" Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong"
" Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.\n1."
" **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (from HuggingFace),"
" released together with the paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and"
" lighter](https://arxiv.org/abs/1910.01108) by Victor Sanh, Lysandre Debut and Thomas Wolf. The same"
" method has been applied to compress GPT2 into"
" [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into"
" [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),"
" Multilingual BERT into"
" [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German"
" version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)**"
" (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders"
" as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang"
" Luong, Quoc V. Le, Christopher D. Manning."
)
lowerCAmelCase_ = (
"1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"
" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"
" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"
" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"
)
lowerCAmelCase_ = (
"1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"
" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"
" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"
" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n1."
" **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (来自 HuggingFace) 伴随论文"
" [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and"
" lighter](https://arxiv.org/abs/1910.01108) 由 Victor Sanh, Lysandre Debut and Thomas Wolf 发布。 The same"
" method has been applied to compress GPT2 into"
" [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into"
" [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),"
" Multilingual BERT into"
" [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German"
" version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)** (来自"
" Google Research/Stanford University) 伴随论文 [ELECTRA: Pre-training text encoders as discriminators rather"
" than generators](https://arxiv.org/abs/2003.10555) 由 Kevin Clark, Minh-Thang Luong, Quoc V. Le,"
" Christopher D. Manning 发布。\n"
)
lowerCAmelCase_ , lowerCAmelCase_ = check_copies.convert_to_localized_md(
_UpperCamelCase , _UpperCamelCase , localized_readme["format_model_list"] )
self.assertFalse(_UpperCamelCase )
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
lowerCAmelCase_ , lowerCAmelCase_ = check_copies.convert_to_localized_md(
_UpperCamelCase , _UpperCamelCase , localized_readme["format_model_list"] )
# Check whether the number of models is equal to README.md after conversion.
self.assertTrue(_UpperCamelCase )
lowerCAmelCase_ = (
"1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the"
" Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for"
" Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong"
" Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut."
)
lowerCAmelCase_ = (
"1. **[ALBERT](https://huggingface.co/transformers/main/model_doc/albert.html)** (来自 Google Research and"
" the Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"
" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"
" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"
)
lowerCAmelCase_ = (
"1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"
" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"
" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"
" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"
)
lowerCAmelCase_ , lowerCAmelCase_ = check_copies.convert_to_localized_md(
_UpperCamelCase , _UpperCamelCase , localized_readme["format_model_list"] )
# Check if the model link is synchronized.
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
| 717
|
import gc
import unittest
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
PriorTransformer,
StableUnCLIPPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class _lowerCAmelCase ( __a , __a , __a , unittest.TestCase ):
_lowercase =StableUnCLIPPipeline
_lowercase =TEXT_TO_IMAGE_PARAMS
_lowercase =TEXT_TO_IMAGE_BATCH_PARAMS
_lowercase =TEXT_TO_IMAGE_IMAGE_PARAMS
_lowercase =TEXT_TO_IMAGE_IMAGE_PARAMS
# TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false
_lowercase =False
def __a ( self ) -> Dict:
lowerCAmelCase_ = 32
lowerCAmelCase_ = embedder_hidden_size
# prior components
torch.manual_seed(0 )
lowerCAmelCase_ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
torch.manual_seed(0 )
lowerCAmelCase_ = CLIPTextModelWithProjection(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=_UpperCamelCase , projection_dim=_UpperCamelCase , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , ) )
torch.manual_seed(0 )
lowerCAmelCase_ = PriorTransformer(
num_attention_heads=2 , attention_head_dim=12 , embedding_dim=_UpperCamelCase , num_layers=1 , )
torch.manual_seed(0 )
lowerCAmelCase_ = DDPMScheduler(
variance_type="fixed_small_log" , prediction_type="sample" , num_train_timesteps=1_000 , clip_sample=_UpperCamelCase , clip_sample_range=5.0 , beta_schedule="squaredcos_cap_v2" , )
# regular denoising components
torch.manual_seed(0 )
lowerCAmelCase_ = StableUnCLIPImageNormalizer(embedding_dim=_UpperCamelCase )
lowerCAmelCase_ = DDPMScheduler(beta_schedule="squaredcos_cap_v2" )
torch.manual_seed(0 )
lowerCAmelCase_ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
torch.manual_seed(0 )
lowerCAmelCase_ = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=_UpperCamelCase , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , ) )
torch.manual_seed(0 )
lowerCAmelCase_ = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("CrossAttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "CrossAttnUpBlock2D") , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type="projection" , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=_UpperCamelCase , layers_per_block=1 , upcast_attention=_UpperCamelCase , use_linear_projection=_UpperCamelCase , )
torch.manual_seed(0 )
lowerCAmelCase_ = DDIMScheduler(
beta_schedule="scaled_linear" , beta_start=0.00085 , beta_end=0.012 , prediction_type="v_prediction" , set_alpha_to_one=_UpperCamelCase , steps_offset=1 , )
torch.manual_seed(0 )
lowerCAmelCase_ = AutoencoderKL()
lowerCAmelCase_ = {
# prior components
"prior_tokenizer": prior_tokenizer,
"prior_text_encoder": prior_text_encoder,
"prior": prior,
"prior_scheduler": prior_scheduler,
# image noising components
"image_normalizer": image_normalizer,
"image_noising_scheduler": image_noising_scheduler,
# regular denoising components
"tokenizer": tokenizer,
"text_encoder": text_encoder,
"unet": unet,
"scheduler": scheduler,
"vae": vae,
}
return components
def __a ( self , _UpperCamelCase , _UpperCamelCase=0 ) -> Tuple:
if str(_UpperCamelCase ).startswith("mps" ):
lowerCAmelCase_ = torch.manual_seed(_UpperCamelCase )
else:
lowerCAmelCase_ = torch.Generator(device=_UpperCamelCase ).manual_seed(_UpperCamelCase )
lowerCAmelCase_ = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"prior_num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def __a ( self ) -> List[Any]:
lowerCAmelCase_ = torch_device == "cpu"
self._test_attention_slicing_forward_pass(test_max_difference=_UpperCamelCase )
def __a ( self ) -> str:
lowerCAmelCase_ = torch_device in ["cpu", "mps"]
self._test_inference_batch_single_identical(test_max_difference=_UpperCamelCase )
@slow
@require_torch_gpu
class _lowerCAmelCase ( unittest.TestCase ):
def __a ( self ) -> List[str]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __a ( self ) -> Optional[int]:
lowerCAmelCase_ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy" )
lowerCAmelCase_ = StableUnCLIPPipeline.from_pretrained("fusing/stable-unclip-2-1-l" , torch_dtype=torch.floataa )
pipe.to(_UpperCamelCase )
pipe.set_progress_bar_config(disable=_UpperCamelCase )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowerCAmelCase_ = torch.Generator(device="cpu" ).manual_seed(0 )
lowerCAmelCase_ = pipe("anime turle" , generator=_UpperCamelCase , output_type="np" )
lowerCAmelCase_ = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(_UpperCamelCase , _UpperCamelCase )
def __a ( self ) -> Optional[Any]:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
lowerCAmelCase_ = StableUnCLIPPipeline.from_pretrained("fusing/stable-unclip-2-1-l" , torch_dtype=torch.floataa )
lowerCAmelCase_ = pipe.to(_UpperCamelCase )
pipe.set_progress_bar_config(disable=_UpperCamelCase )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowerCAmelCase_ = pipe(
"anime turtle" , prior_num_inference_steps=2 , num_inference_steps=2 , output_type="np" , )
lowerCAmelCase_ = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 279
| 0
|
'''simple docstring'''
def __UpperCAmelCase ( _UpperCAmelCase : int , _UpperCAmelCase : float , _UpperCAmelCase : float ) -> float:
return round(float(moles / volume ) * nfactor )
def __UpperCAmelCase ( _UpperCAmelCase : float , _UpperCAmelCase : float , _UpperCAmelCase : float ) -> float:
return round(float((moles * 0.0821 * temperature) / (volume) ) )
def __UpperCAmelCase ( _UpperCAmelCase : float , _UpperCAmelCase : float , _UpperCAmelCase : float ) -> float:
return round(float((moles * 0.0821 * temperature) / (pressure) ) )
def __UpperCAmelCase ( _UpperCAmelCase : float , _UpperCAmelCase : float , _UpperCAmelCase : float ) -> float:
return round(float((pressure * volume) / (0.0821 * moles) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 69
|
"""simple docstring"""
from __future__ import annotations
from random import random
from typing import Generic, TypeVar
__lowerCAmelCase : int = TypeVar('''KT''')
__lowerCAmelCase : Union[str, Any] = TypeVar('''VT''')
class _lowerCAmelCase ( Generic[KT, VT] ):
"""simple docstring"""
def __init__( self , _lowercase = "root" , _lowercase = None ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Tuple = key
snake_case_ : Tuple = value
snake_case_ : list[Node[KT, VT]] = []
def __repr__( self ) -> str:
'''simple docstring'''
return f'Node({self.key}: {self.value})'
@property
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
return len(self.forward )
class _lowerCAmelCase ( Generic[KT, VT] ):
"""simple docstring"""
def __init__( self , _lowercase = 0.5 , _lowercase = 1_6 ) -> int:
'''simple docstring'''
snake_case_ : Node[KT, VT] = Node[KT, VT]()
snake_case_ : Union[str, Any] = 0
snake_case_ : Optional[int] = p
snake_case_ : Any = max_level
def __str__( self ) -> str:
'''simple docstring'''
snake_case_ : str = list(self )
if len(_lowercase ) == 0:
return f'SkipList(level={self.level})'
snake_case_ : List[Any] = max((len(str(_lowercase ) ) for item in items) , default=4 )
snake_case_ : str = max(_lowercase , 4 ) + 4
snake_case_ : Union[str, Any] = self.head
snake_case_ : Dict = []
snake_case_ : List[str] = node.forward.copy()
lines.append(f'[{node.key}]'.ljust(_lowercase , """-""" ) + """* """ * len(_lowercase ) )
lines.append(""" """ * label_size + """| """ * len(_lowercase ) )
while len(node.forward ) != 0:
snake_case_ : Optional[Any] = node.forward[0]
lines.append(
f'[{node.key}]'.ljust(_lowercase , """-""" )
+ """ """.join(str(n.key ) if n.key == node.key else """|""" for n in forwards ) )
lines.append(""" """ * label_size + """| """ * len(_lowercase ) )
snake_case_ : List[str] = node.forward
lines.append("""None""".ljust(_lowercase ) + """* """ * len(_lowercase ) )
return f'SkipList(level={self.level})\n' + "\n".join(_lowercase )
def __iter__( self ) -> Optional[int]:
'''simple docstring'''
snake_case_ : Dict = self.head
while len(node.forward ) != 0:
yield node.forward[0].key
snake_case_ : Dict = node.forward[0]
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
snake_case_ : Optional[int] = 1
while random() < self.p and level < self.max_level:
level += 1
return level
def UpperCAmelCase__ ( self , _lowercase ) -> tuple[Node[KT, VT] | None, list[Node[KT, VT]]]:
'''simple docstring'''
snake_case_ : Optional[Any] = []
snake_case_ : int = self.head
for i in reversed(range(self.level ) ):
# i < node.level - When node level is lesser than `i` decrement `i`.
# node.forward[i].key < key - Jumping to node with key value higher
# or equal to searched key would result
# in skipping searched key.
while i < node.level and node.forward[i].key < key:
snake_case_ : List[Any] = node.forward[i]
# Each leftmost node (relative to searched node) will potentially have to
# be updated.
update_vector.append(_lowercase )
update_vector.reverse() # Note that we were inserting values in reverse order.
# len(node.forward) != 0 - If current node doesn't contain any further
# references then searched key is not present.
# node.forward[0].key == key - Next node key should be equal to search key
# if key is present.
if len(node.forward ) != 0 and node.forward[0].key == key:
return node.forward[0], update_vector
else:
return None, update_vector
def UpperCAmelCase__ ( self , _lowercase ) -> Optional[int]:
'''simple docstring'''
snake_case_ , snake_case_ : Dict = self._locate_node(_lowercase )
if node is not None:
for i, update_node in enumerate(_lowercase ):
# Remove or replace all references to removed node.
if update_node.level > i and update_node.forward[i].key == key:
if node.level > i:
snake_case_ : List[str] = node.forward[i]
else:
snake_case_ : Tuple = update_node.forward[:i]
def UpperCAmelCase__ ( self , _lowercase , _lowercase ) -> str:
'''simple docstring'''
snake_case_ , snake_case_ : Dict = self._locate_node(_lowercase )
if node is not None:
snake_case_ : List[Any] = value
else:
snake_case_ : Optional[int] = self.random_level()
if level > self.level:
# After level increase we have to add additional nodes to head.
for _ in range(self.level - 1 , _lowercase ):
update_vector.append(self.head )
snake_case_ : Any = level
snake_case_ : Optional[int] = Node(_lowercase , _lowercase )
for i, update_node in enumerate(update_vector[:level] ):
# Change references to pass through new node.
if update_node.level > i:
new_node.forward.append(update_node.forward[i] )
if update_node.level < i + 1:
update_node.forward.append(_lowercase )
else:
snake_case_ : Optional[Any] = new_node
def UpperCAmelCase__ ( self , _lowercase ) -> VT | None:
'''simple docstring'''
snake_case_ , snake_case_ : Dict = self._locate_node(_lowercase )
if node is not None:
return node.value
return None
def __lowerCAmelCase ( ):
'''simple docstring'''
snake_case_ : List[str] = SkipList()
skip_list.insert("""Key1""" , 3 )
skip_list.insert("""Key2""" , 1_2 )
skip_list.insert("""Key3""" , 4_1 )
skip_list.insert("""Key4""" , -1_9 )
snake_case_ : Optional[int] = skip_list.head
snake_case_ : List[Any] = {}
while node.level != 0:
snake_case_ : List[str] = node.forward[0]
snake_case_ : Union[str, Any] = node.value
assert len(__UpperCamelCase ) == 4
assert all_values["Key1"] == 3
assert all_values["Key2"] == 1_2
assert all_values["Key3"] == 4_1
assert all_values["Key4"] == -1_9
def __lowerCAmelCase ( ):
'''simple docstring'''
snake_case_ : Optional[int] = SkipList()
skip_list.insert("""Key1""" , 1_0 )
skip_list.insert("""Key1""" , 1_2 )
skip_list.insert("""Key5""" , 7 )
skip_list.insert("""Key7""" , 1_0 )
skip_list.insert("""Key10""" , 5 )
skip_list.insert("""Key7""" , 7 )
skip_list.insert("""Key5""" , 5 )
skip_list.insert("""Key10""" , 1_0 )
snake_case_ : str = skip_list.head
snake_case_ : str = {}
while node.level != 0:
snake_case_ : Optional[Any] = node.forward[0]
snake_case_ : int = node.value
if len(__UpperCamelCase ) != 4:
print()
assert len(__UpperCamelCase ) == 4
assert all_values["Key1"] == 1_2
assert all_values["Key7"] == 7
assert all_values["Key5"] == 5
assert all_values["Key10"] == 1_0
def __lowerCAmelCase ( ):
'''simple docstring'''
snake_case_ : str = SkipList()
assert skip_list.find("""Some key""" ) is None
def __lowerCAmelCase ( ):
'''simple docstring'''
snake_case_ : Optional[Any] = SkipList()
skip_list.insert("""Key2""" , 2_0 )
assert skip_list.find("""Key2""" ) == 2_0
skip_list.insert("""Some Key""" , 1_0 )
skip_list.insert("""Key2""" , 8 )
skip_list.insert("""V""" , 1_3 )
assert skip_list.find("""Y""" ) is None
assert skip_list.find("""Key2""" ) == 8
assert skip_list.find("""Some Key""" ) == 1_0
assert skip_list.find("""V""" ) == 1_3
def __lowerCAmelCase ( ):
'''simple docstring'''
snake_case_ : Any = SkipList()
skip_list.delete("""Some key""" )
assert len(skip_list.head.forward ) == 0
def __lowerCAmelCase ( ):
'''simple docstring'''
snake_case_ : Tuple = SkipList()
skip_list.insert("""Key1""" , 1_2 )
skip_list.insert("""V""" , 1_3 )
skip_list.insert("""X""" , 1_4 )
skip_list.insert("""Key2""" , 1_5 )
skip_list.delete("""V""" )
skip_list.delete("""Key2""" )
assert skip_list.find("""V""" ) is None
assert skip_list.find("""Key2""" ) is None
def __lowerCAmelCase ( ):
'''simple docstring'''
snake_case_ : Optional[int] = SkipList()
skip_list.insert("""Key1""" , 1_2 )
skip_list.insert("""V""" , 1_3 )
skip_list.insert("""X""" , 1_4 )
skip_list.insert("""Key2""" , 1_5 )
skip_list.delete("""V""" )
assert skip_list.find("""V""" ) is None
assert skip_list.find("""X""" ) == 1_4
assert skip_list.find("""Key1""" ) == 1_2
assert skip_list.find("""Key2""" ) == 1_5
skip_list.delete("""X""" )
assert skip_list.find("""V""" ) is None
assert skip_list.find("""X""" ) is None
assert skip_list.find("""Key1""" ) == 1_2
assert skip_list.find("""Key2""" ) == 1_5
skip_list.delete("""Key1""" )
assert skip_list.find("""V""" ) is None
assert skip_list.find("""X""" ) is None
assert skip_list.find("""Key1""" ) is None
assert skip_list.find("""Key2""" ) == 1_5
skip_list.delete("""Key2""" )
assert skip_list.find("""V""" ) is None
assert skip_list.find("""X""" ) is None
assert skip_list.find("""Key1""" ) is None
assert skip_list.find("""Key2""" ) is None
def __lowerCAmelCase ( ):
'''simple docstring'''
snake_case_ : Union[str, Any] = SkipList()
skip_list.insert("""Key1""" , 1_2 )
skip_list.insert("""V""" , 1_3 )
skip_list.insert("""X""" , 1_4_2 )
skip_list.insert("""Key2""" , 1_5 )
skip_list.delete("""X""" )
def traverse_keys(__UpperCamelCase : str ):
yield node.key
for forward_node in node.forward:
yield from traverse_keys(__UpperCamelCase )
assert len(set(traverse_keys(skip_list.head ) ) ) == 4
def __lowerCAmelCase ( ):
'''simple docstring'''
def is_sorted(__UpperCamelCase : List[Any] ):
return all(next_item >= item for item, next_item in zip(__UpperCamelCase , lst[1:] ) )
snake_case_ : str = SkipList()
for i in range(1_0 ):
skip_list.insert(__UpperCamelCase , __UpperCamelCase )
assert is_sorted(list(__UpperCamelCase ) )
skip_list.delete(5 )
skip_list.delete(8 )
skip_list.delete(2 )
assert is_sorted(list(__UpperCamelCase ) )
skip_list.insert(-1_2 , -1_2 )
skip_list.insert(7_7 , 7_7 )
assert is_sorted(list(__UpperCamelCase ) )
def __lowerCAmelCase ( ):
'''simple docstring'''
for _ in range(1_0_0 ):
# Repeat test 100 times due to the probabilistic nature of skip list
# random values == random bugs
test_insert()
test_insert_overrides_existing_value()
test_searching_empty_list_returns_none()
test_search()
test_deleting_item_from_empty_list_do_nothing()
test_deleted_items_are_not_founded_by_find_method()
test_delete_removes_only_given_key()
test_delete_doesnt_leave_dead_nodes()
test_iter_always_yields_sorted_values()
def __lowerCAmelCase ( ):
'''simple docstring'''
snake_case_ : Dict = SkipList()
skip_list.insert(2 , """2""" )
skip_list.insert(4 , """4""" )
skip_list.insert(6 , """4""" )
skip_list.insert(4 , """5""" )
skip_list.insert(8 , """4""" )
skip_list.insert(9 , """4""" )
skip_list.delete(4 )
print(__UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 58
| 0
|
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class _lowerCamelCase (__lowerCAmelCase , unittest.TestCase ):
_snake_case = KandinskyVaaControlnetImgaImgPipeline
_snake_case = ["image_embeds", "negative_image_embeds", "image", "hint"]
_snake_case = ["image_embeds", "negative_image_embeds", "image", "hint"]
_snake_case = [
"generator",
"height",
"width",
"strength",
"guidance_scale",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
_snake_case = False
@property
def __UpperCAmelCase ( self : Optional[int] ):
"""simple docstring"""
return 3_2
@property
def __UpperCAmelCase ( self : List[Any] ):
"""simple docstring"""
return 3_2
@property
def __UpperCAmelCase ( self : Dict ):
"""simple docstring"""
return self.time_input_dim
@property
def __UpperCAmelCase ( self : Dict ):
"""simple docstring"""
return self.time_input_dim * 4
@property
def __UpperCAmelCase ( self : Dict ):
"""simple docstring"""
return 1_0_0
@property
def __UpperCAmelCase ( self : Union[str, Any] ):
"""simple docstring"""
torch.manual_seed(0 )
_lowercase : Optional[Any] = {
"""in_channels""": 8,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """image_hint""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
_lowercase : List[Any] = UNetaDConditionModel(**_UpperCamelCase )
return model
@property
def __UpperCAmelCase ( self : Optional[int] ):
"""simple docstring"""
return {
"block_out_channels": [3_2, 3_2, 6_4, 6_4],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 1_2,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def __UpperCAmelCase ( self : Optional[int] ):
"""simple docstring"""
torch.manual_seed(0 )
_lowercase : List[Any] = VQModel(**self.dummy_movq_kwargs )
return model
def __UpperCAmelCase ( self : str ):
"""simple docstring"""
_lowercase : Union[str, Any] = self.dummy_unet
_lowercase : Dict = self.dummy_movq
_lowercase : List[str] = {
"""num_train_timesteps""": 1_0_0_0,
"""beta_schedule""": """linear""",
"""beta_start""": 0.0_0085,
"""beta_end""": 0.012,
"""clip_sample""": False,
"""set_alpha_to_one""": False,
"""steps_offset""": 0,
"""prediction_type""": """epsilon""",
"""thresholding""": False,
}
_lowercase : Dict = DDIMScheduler(**_UpperCamelCase )
_lowercase : Optional[Any] = {
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def __UpperCAmelCase ( self : Tuple , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Any=0 ):
"""simple docstring"""
_lowercase : Union[str, Any] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(_UpperCamelCase ) ).to(_UpperCamelCase )
_lowercase : str = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
_UpperCamelCase )
# create init_image
_lowercase : int = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(_UpperCamelCase ) ).to(_UpperCamelCase )
_lowercase : Optional[Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
_lowercase : int = Image.fromarray(np.uinta(_UpperCamelCase ) ).convert('RGB' ).resize((2_5_6, 2_5_6) )
# create hint
_lowercase : List[Any] = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(_UpperCamelCase ) ).to(_UpperCamelCase )
if str(_UpperCamelCase ).startswith('mps' ):
_lowercase : Any = torch.manual_seed(_UpperCamelCase )
else:
_lowercase : Any = torch.Generator(device=_UpperCamelCase ).manual_seed(_UpperCamelCase )
_lowercase : List[Any] = {
"""image""": init_image,
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""hint""": hint,
"""generator""": generator,
"""height""": 6_4,
"""width""": 6_4,
"""num_inference_steps""": 1_0,
"""guidance_scale""": 7.0,
"""strength""": 0.2,
"""output_type""": """np""",
}
return inputs
def __UpperCAmelCase ( self : Union[str, Any] ):
"""simple docstring"""
_lowercase : str = """cpu"""
_lowercase : Tuple = self.get_dummy_components()
_lowercase : Union[str, Any] = self.pipeline_class(**_UpperCamelCase )
_lowercase : str = pipe.to(_UpperCamelCase )
pipe.set_progress_bar_config(disable=_UpperCamelCase )
_lowercase : List[str] = pipe(**self.get_dummy_inputs(_UpperCamelCase ) )
_lowercase : Any = output.images
_lowercase : Dict = pipe(
**self.get_dummy_inputs(_UpperCamelCase ) , return_dict=_UpperCamelCase , )[0]
_lowercase : Optional[int] = image[0, -3:, -3:, -1]
_lowercase : int = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
_lowercase : str = np.array(
[0.5498_5034, 0.5550_9365, 0.5256_1504, 0.557_0494, 0.559_3818, 0.526_3979, 0.5028_5643, 0.506_9846, 0.5119_6736] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), F''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), F''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class _lowerCamelCase (unittest.TestCase ):
def __UpperCAmelCase ( self : Any ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCAmelCase ( self : Optional[Any] ):
"""simple docstring"""
_lowercase : Union[str, Any] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/kandinskyv22_controlnet_img2img_robotcat_fp16.npy' )
_lowercase : Any = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/cat.png' )
_lowercase : Optional[int] = init_image.resize((5_1_2, 5_1_2) )
_lowercase : Dict = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/hint_image_cat.png' )
_lowercase : int = torch.from_numpy(np.array(_UpperCamelCase ) ).float() / 2_5_5.0
_lowercase : Dict = hint.permute(2 , 0 , 1 ).unsqueeze(0 )
_lowercase : Tuple = """A robot, 4k photo"""
_lowercase : Optional[int] = KandinskyVaaPriorEmbaEmbPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-prior' , torch_dtype=torch.floataa )
pipe_prior.to(_UpperCamelCase )
_lowercase : str = KandinskyVaaControlnetImgaImgPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-controlnet-depth' , torch_dtype=torch.floataa )
_lowercase : List[str] = pipeline.to(_UpperCamelCase )
pipeline.set_progress_bar_config(disable=_UpperCamelCase )
_lowercase : str = torch.Generator(device='cpu' ).manual_seed(0 )
_lowercase : Union[str, Any] = pipe_prior(
_UpperCamelCase , image=_UpperCamelCase , strength=0.85 , generator=_UpperCamelCase , negative_prompt='' , ).to_tuple()
_lowercase : Optional[Any] = pipeline(
image=_UpperCamelCase , image_embeds=_UpperCamelCase , negative_image_embeds=_UpperCamelCase , hint=_UpperCamelCase , generator=_UpperCamelCase , num_inference_steps=1_0_0 , height=5_1_2 , width=5_1_2 , strength=0.5 , output_type='np' , )
_lowercase : List[Any] = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert_mean_pixel_difference(_UpperCamelCase , _UpperCamelCase )
| 715
|
"""simple docstring"""
import re
import time
from typing import Optional
import IPython.display as disp
from ..trainer_callback import TrainerCallback
from ..trainer_utils import IntervalStrategy, has_length
def __lowerCAmelCase( __UpperCAmelCase ):
"""simple docstring"""
_lowercase : Optional[int] = int(__UpperCAmelCase )
_lowercase , _lowercase , _lowercase : Union[str, Any] = t // 3_600, (t // 60) % 60, t % 60
return F'''{h}:{m:02d}:{s:02d}''' if h != 0 else F'''{m:02d}:{s:02d}'''
def __lowerCAmelCase( __UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase=300 ):
"""simple docstring"""
return F'''
<div>
{prefix}
<progress value=\'{value}\' max=\'{total}\' style=\'width:{width}px; height:20px; vertical-align: middle;\'></progress>
{label}
</div>
'''
def __lowerCAmelCase( __UpperCAmelCase ):
"""simple docstring"""
_lowercase : List[Any] = '<table border="1" class="dataframe">\n'
html_code += """ <thead>\n <tr style="text-align: left;">\n"""
for i in items[0]:
html_code += F''' <th>{i}</th>\n'''
html_code += " </tr>\n </thead>\n <tbody>\n"
for line in items[1:]:
html_code += " <tr>\n"
for elt in line:
_lowercase : Optional[Any] = F'''{elt:.6f}''' if isinstance(__UpperCAmelCase ,__UpperCAmelCase ) else str(__UpperCAmelCase )
html_code += F''' <td>{elt}</td>\n'''
html_code += " </tr>\n"
html_code += " </tbody>\n</table><p>"
return html_code
class _lowerCamelCase :
_snake_case = 5
_snake_case = 0.2
def __init__( self : List[str] , lowerCamelCase_ : int , lowerCamelCase_ : Optional[str] = None , lowerCamelCase_ : bool = True , lowerCamelCase_ : Optional["NotebookTrainingTracker"] = None , lowerCamelCase_ : int = 3_0_0 , ):
"""simple docstring"""
_lowercase : int = total
_lowercase : Dict = '' if prefix is None else prefix
_lowercase : List[Any] = leave
_lowercase : Dict = parent
_lowercase : str = width
_lowercase : Any = None
_lowercase : Optional[int] = None
_lowercase : Union[str, Any] = None
def __UpperCAmelCase ( self : Dict , lowerCamelCase_ : int , lowerCamelCase_ : bool = False , lowerCamelCase_ : str = None ):
"""simple docstring"""
_lowercase : Dict = value
if comment is not None:
_lowercase : List[str] = comment
if self.last_value is None:
_lowercase : List[Any] = time.time()
_lowercase : str = value
_lowercase : Any = None
_lowercase : List[Any] = self.warmup
_lowercase : Union[str, Any] = 1
self.update_bar(lowerCamelCase_ )
elif value <= self.last_value and not force_update:
return
elif force_update or self.first_calls > 0 or value >= min(self.last_value + self.wait_for , self.total ):
if self.first_calls > 0:
self.first_calls -= 1
_lowercase : Tuple = time.time()
_lowercase : List[str] = current_time - self.start_time
# We could have value = self.start_value if the update is called twixe with the same start value.
if value > self.start_value:
_lowercase : List[Any] = self.elapsed_time / (value - self.start_value)
else:
_lowercase : List[Any] = None
if value >= self.total:
_lowercase : Optional[Any] = self.total
_lowercase : str = None
if not self.leave:
self.close()
elif self.average_time_per_item is not None:
_lowercase : int = self.average_time_per_item * (self.total - value)
self.update_bar(lowerCamelCase_ )
_lowercase : Optional[int] = value
_lowercase : Dict = current_time
if self.average_time_per_item is None:
_lowercase : Union[str, Any] = 1
else:
_lowercase : Optional[Any] = max(int(self.update_every / self.average_time_per_item ) , 1 )
def __UpperCAmelCase ( self : str , lowerCamelCase_ : int , lowerCamelCase_ : Optional[Any]=None ):
"""simple docstring"""
_lowercase : str = ' ' * (len(str(self.total ) ) - len(str(lowerCamelCase_ ) )) + str(lowerCamelCase_ )
if self.elapsed_time is None:
_lowercase : Dict = F'''[{spaced_value}/{self.total} : < :'''
elif self.predicted_remaining is None:
_lowercase : Optional[int] = F'''[{spaced_value}/{self.total} {format_time(self.elapsed_time )}'''
else:
_lowercase : int = (
F'''[{spaced_value}/{self.total} {format_time(self.elapsed_time )} <'''
F''' {format_time(self.predicted_remaining )}'''
)
self.label += F''', {1/self.average_time_per_item:.2f} it/s'''
self.label += "]" if self.comment is None or len(self.comment ) == 0 else F''', {self.comment}]'''
self.display()
def __UpperCAmelCase ( self : int ):
"""simple docstring"""
_lowercase : int = html_progress_bar(self.value , self.total , self.prefix , self.label , self.width )
if self.parent is not None:
# If this is a child bar, the parent will take care of the display.
self.parent.display()
return
if self.output is None:
_lowercase : List[str] = disp.display(disp.HTML(self.html_code ) , display_id=lowerCamelCase_ )
else:
self.output.update(disp.HTML(self.html_code ) )
def __UpperCAmelCase ( self : Any ):
"""simple docstring"""
if self.parent is None and self.output is not None:
self.output.update(disp.HTML('' ) )
class _lowerCamelCase (__lowerCamelCase ):
def __init__( self : Optional[int] , lowerCamelCase_ : Any , lowerCamelCase_ : Tuple=None ):
"""simple docstring"""
super().__init__(lowerCamelCase_ )
_lowercase : Tuple = None if column_names is None else [column_names]
_lowercase : Optional[int] = None
def __UpperCAmelCase ( self : Optional[Any] ):
"""simple docstring"""
_lowercase : Union[str, Any] = html_progress_bar(self.value , self.total , self.prefix , self.label , self.width )
if self.inner_table is not None:
self.html_code += text_to_html_table(self.inner_table )
if self.child_bar is not None:
self.html_code += self.child_bar.html_code
if self.output is None:
_lowercase : Dict = disp.display(disp.HTML(self.html_code ) , display_id=lowerCamelCase_ )
else:
self.output.update(disp.HTML(self.html_code ) )
def __UpperCAmelCase ( self : Optional[Any] , lowerCamelCase_ : Optional[int] ):
"""simple docstring"""
if self.inner_table is None:
_lowercase : Optional[int] = [list(values.keys() ), list(values.values() )]
else:
_lowercase : str = self.inner_table[0]
if len(self.inner_table ) == 1:
# We give a chance to update the column names at the first iteration
for key in values.keys():
if key not in columns:
columns.append(lowerCamelCase_ )
_lowercase : List[Any] = columns
self.inner_table.append([values[c] for c in columns] )
def __UpperCAmelCase ( self : List[Any] , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Union[str, Any]=None , lowerCamelCase_ : int=3_0_0 ):
"""simple docstring"""
_lowercase : List[str] = NotebookProgressBar(lowerCamelCase_ , prefix=lowerCamelCase_ , parent=self , width=lowerCamelCase_ )
return self.child_bar
def __UpperCAmelCase ( self : Any ):
"""simple docstring"""
_lowercase : Any = None
self.display()
class _lowerCamelCase (__lowerCamelCase ):
def __init__( self : Optional[Any] ):
"""simple docstring"""
_lowercase : int = None
_lowercase : Any = None
_lowercase : Optional[int] = False
def __UpperCAmelCase ( self : Any , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : int , lowerCamelCase_ : List[Any] , **lowerCamelCase_ : Union[str, Any] ):
"""simple docstring"""
_lowercase : Any = 'Epoch' if args.evaluation_strategy == IntervalStrategy.EPOCH else 'Step'
_lowercase : Dict = 0
_lowercase : Optional[Any] = 0
_lowercase : Union[str, Any] = [self.first_column] + ['Training Loss']
if args.evaluation_strategy != IntervalStrategy.NO:
column_names.append('Validation Loss' )
_lowercase : Tuple = NotebookTrainingTracker(state.max_steps , lowerCamelCase_ )
def __UpperCAmelCase ( self : Union[str, Any] , lowerCamelCase_ : Any , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Union[str, Any] , **lowerCamelCase_ : Dict ):
"""simple docstring"""
_lowercase : Optional[Any] = int(state.epoch ) if int(state.epoch ) == state.epoch else F'''{state.epoch:.2f}'''
self.training_tracker.update(
state.global_step + 1 , comment=F'''Epoch {epoch}/{state.num_train_epochs}''' , force_update=self._force_next_update , )
_lowercase : Tuple = False
def __UpperCAmelCase ( self : int , lowerCamelCase_ : Dict , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Union[str, Any]=None , **lowerCamelCase_ : str ):
"""simple docstring"""
if not has_length(lowerCamelCase_ ):
return
if self.prediction_bar is None:
if self.training_tracker is not None:
_lowercase : Tuple = self.training_tracker.add_child(len(lowerCamelCase_ ) )
else:
_lowercase : Optional[Any] = NotebookProgressBar(len(lowerCamelCase_ ) )
self.prediction_bar.update(1 )
else:
self.prediction_bar.update(self.prediction_bar.value + 1 )
def __UpperCAmelCase ( self : Tuple , lowerCamelCase_ : Dict , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Dict , **lowerCamelCase_ : List[str] ):
"""simple docstring"""
if self.prediction_bar is not None:
self.prediction_bar.close()
_lowercase : Union[str, Any] = None
def __UpperCAmelCase ( self : List[str] , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : str , lowerCamelCase_ : int=None , **lowerCamelCase_ : List[Any] ):
"""simple docstring"""
if args.evaluation_strategy == IntervalStrategy.NO and "loss" in logs:
_lowercase : Any = {'Training Loss': logs['loss']}
# First column is necessarily Step sine we're not in epoch eval strategy
_lowercase : Any = state.global_step
self.training_tracker.write_line(lowerCamelCase_ )
def __UpperCAmelCase ( self : int , lowerCamelCase_ : List[Any] , lowerCamelCase_ : int , lowerCamelCase_ : List[str] , lowerCamelCase_ : str=None , **lowerCamelCase_ : List[Any] ):
"""simple docstring"""
if self.training_tracker is not None:
_lowercase : List[str] = {'Training Loss': 'No log', 'Validation Loss': 'No log'}
for log in reversed(state.log_history ):
if "loss" in log:
_lowercase : Union[str, Any] = log['loss']
break
if self.first_column == "Epoch":
_lowercase : Optional[int] = int(state.epoch )
else:
_lowercase : Optional[Any] = state.global_step
_lowercase : int = 'eval'
for k in metrics:
if k.endswith('_loss' ):
_lowercase : Tuple = re.sub(r'\_loss$' , '' , lowerCamelCase_ )
_lowercase : Optional[Any] = metrics.pop('total_flos' , lowerCamelCase_ )
_lowercase : List[str] = metrics.pop('epoch' , lowerCamelCase_ )
_lowercase : List[Any] = metrics.pop(F'''{metric_key_prefix}_runtime''' , lowerCamelCase_ )
_lowercase : Tuple = metrics.pop(F'''{metric_key_prefix}_samples_per_second''' , lowerCamelCase_ )
_lowercase : Any = metrics.pop(F'''{metric_key_prefix}_steps_per_second''' , lowerCamelCase_ )
_lowercase : Union[str, Any] = metrics.pop(F'''{metric_key_prefix}_jit_compilation_time''' , lowerCamelCase_ )
for k, v in metrics.items():
if k == F'''{metric_key_prefix}_loss''':
_lowercase : Optional[Any] = v
else:
_lowercase : Tuple = k.split('_' )
_lowercase : List[Any] = ' '.join([part.capitalize() for part in splits[1:]] )
_lowercase : List[Any] = v
self.training_tracker.write_line(lowerCamelCase_ )
self.training_tracker.remove_child()
_lowercase : Any = None
# Evaluation takes a long time so we should force the next update.
_lowercase : Dict = True
def __UpperCAmelCase ( self : Optional[int] , lowerCamelCase_ : Tuple , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Union[str, Any] , **lowerCamelCase_ : Any ):
"""simple docstring"""
self.training_tracker.update(
state.global_step , comment=F'''Epoch {int(state.epoch )}/{state.num_train_epochs}''' , force_update=lowerCamelCase_ )
_lowercase : List[Any] = None
| 283
| 0
|
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> list[int]:
"""simple docstring"""
A : Optional[int] = int(_lowerCAmelCase )
# Initialize Result
A : int = []
# Traverse through all denomination
for denomination in reversed(_lowerCAmelCase ):
# Find denominations
while int(_lowerCAmelCase ) >= int(_lowerCAmelCase ):
total_value -= int(_lowerCAmelCase )
answer.append(_lowerCAmelCase ) # Append the "answers" array
return answer
# Driver Code
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_:List[Any] = []
SCREAMING_SNAKE_CASE_:Dict = """0"""
if (
input("""Do you want to enter your denominations ? (yY/n): """).strip().lower()
== "y"
):
SCREAMING_SNAKE_CASE_:Optional[int] = int(input("""Enter the number of denominations you want to add: """).strip())
for i in range(0, n):
denominations.append(int(input(F"""Denomination {i}: """).strip()))
SCREAMING_SNAKE_CASE_:Optional[Any] = input("""Enter the change you want to make in Indian Currency: """).strip()
else:
# All denominations of Indian Currency if user does not enter
SCREAMING_SNAKE_CASE_:Tuple = [1, 2, 5, 10, 20, 50, 100, 500, 2_000]
SCREAMING_SNAKE_CASE_:Optional[Any] = input("""Enter the change you want to make: """).strip()
if int(value) == 0 or int(value) < 0:
print("""The total value cannot be zero or negative.""")
else:
print(F"""Following is minimal change for {value}: """)
SCREAMING_SNAKE_CASE_:str = find_minimum_change(denominations, value)
# Print result
for i in range(len(answer)):
print(answer[i], end=""" """)
| 662
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE_:int = {
"""configuration_blenderbot""": [
"""BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""BlenderbotConfig""",
"""BlenderbotOnnxConfig""",
],
"""tokenization_blenderbot""": ["""BlenderbotTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_:Union[str, Any] = ["""BlenderbotTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_:Optional[int] = [
"""BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BlenderbotForCausalLM""",
"""BlenderbotForConditionalGeneration""",
"""BlenderbotModel""",
"""BlenderbotPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_:Union[str, Any] = [
"""TFBlenderbotForConditionalGeneration""",
"""TFBlenderbotModel""",
"""TFBlenderbotPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_:Any = [
"""FlaxBlenderbotForConditionalGeneration""",
"""FlaxBlenderbotModel""",
"""FlaxBlenderbotPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_blenderbot import (
BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotConfig,
BlenderbotOnnxConfig,
)
from .tokenization_blenderbot import BlenderbotTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_fast import BlenderbotTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot import (
BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotForCausalLM,
BlenderbotForConditionalGeneration,
BlenderbotModel,
BlenderbotPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot import (
TFBlenderbotForConditionalGeneration,
TFBlenderbotModel,
TFBlenderbotPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
FlaxBlenderbotPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE_:Optional[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 662
| 1
|
"""simple docstring"""
__UpperCAmelCase = [sum(int(c, 10) ** 2 for c in i.__str__()) for i in range(10_00_00)]
def _snake_case ( lowercase__ : int ) -> int:
'''simple docstring'''
lowerCAmelCase_ :str = 0
while number:
# Increased Speed Slightly by checking every 5 digits together.
sum_of_digits_squared += DIGITS_SQUARED[number % 1_0_0_0_0_0]
number //= 1_0_0_0_0_0
return sum_of_digits_squared
# There are 2 Chains made,
# One ends with 89 with the chain member 58 being the one which when declared first,
# there will be the least number of iterations for all the members to be checked.
# The other one ends with 1 and has only one element 1.
# So 58 and 1 are chosen to be declared at the starting.
# Changed dictionary to an array to quicken the solution
__UpperCAmelCase = [None] * 10_00_00_00
__UpperCAmelCase = True
__UpperCAmelCase = False
def _snake_case ( lowercase__ : int ) -> bool:
'''simple docstring'''
if CHAINS[number - 1] is not None:
return CHAINS[number - 1] # type: ignore
lowerCAmelCase_ :Optional[int] = chain(next_number(lowercase__ ) )
lowerCAmelCase_ :Tuple = number_chain
while number < 1_0_0_0_0_0_0_0:
lowerCAmelCase_ :List[Any] = number_chain
number *= 1_0
return number_chain
def _snake_case ( lowercase__ : int = 1_0_0_0_0_0_0_0 ) -> int:
'''simple docstring'''
for i in range(1 , lowercase__ ):
if CHAINS[i] is None:
chain(i + 1 )
return CHAINS[:number].count(lowercase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F"""{solution() = }""")
| 256
|
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
import torch.nn.functional as F
from transformers import (
ClapTextConfig,
ClapTextModelWithProjection,
RobertaTokenizer,
SpeechTaHifiGan,
SpeechTaHifiGanConfig,
)
from diffusers import (
AudioLDMPipeline,
AutoencoderKL,
DDIMScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_AUDIO_BATCH_PARAMS, TEXT_TO_AUDIO_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class _SCREAMING_SNAKE_CASE ( A__ , unittest.TestCase ):
UpperCAmelCase_ :Dict = AudioLDMPipeline
UpperCAmelCase_ :Optional[int] = TEXT_TO_AUDIO_PARAMS
UpperCAmelCase_ :Optional[Any] = TEXT_TO_AUDIO_BATCH_PARAMS
UpperCAmelCase_ :Union[str, Any] = frozenset(
[
"num_inference_steps",
"num_waveforms_per_prompt",
"generator",
"latents",
"output_type",
"return_dict",
"callback",
"callback_steps",
] )
def __lowerCAmelCase ( self ) -> List[str]:
torch.manual_seed(0 )
lowerCAmelCase_ :List[Any] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=(32, 64) , class_embed_type="""simple_projection""" , projection_class_embeddings_input_dim=32 , class_embeddings_concat=__A , )
lowerCAmelCase_ :Any = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="""scaled_linear""" , clip_sample=__A , set_alpha_to_one=__A , )
torch.manual_seed(0 )
lowerCAmelCase_ :int = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=1 , out_channels=1 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
lowerCAmelCase_ :int = ClapTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , projection_dim=32 , )
lowerCAmelCase_ :int = ClapTextModelWithProjection(__A )
lowerCAmelCase_ :Tuple = RobertaTokenizer.from_pretrained("""hf-internal-testing/tiny-random-roberta""" , model_max_length=77 )
lowerCAmelCase_ :Optional[Any] = SpeechTaHifiGanConfig(
model_in_dim=8 , sampling_rate=1_6000 , upsample_initial_channel=16 , upsample_rates=[2, 2] , upsample_kernel_sizes=[4, 4] , resblock_kernel_sizes=[3, 7] , resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5]] , normalize_before=__A , )
lowerCAmelCase_ :List[str] = SpeechTaHifiGan(__A )
lowerCAmelCase_ :List[Any] = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""vocoder""": vocoder,
}
return components
def __lowerCAmelCase ( self , __A , __A=0 ) -> List[Any]:
if str(__A ).startswith("""mps""" ):
lowerCAmelCase_ :int = torch.manual_seed(__A )
else:
lowerCAmelCase_ :int = torch.Generator(device=__A ).manual_seed(__A )
lowerCAmelCase_ :List[str] = {
"""prompt""": """A hammer hitting a wooden surface""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
}
return inputs
def __lowerCAmelCase ( self ) -> Tuple:
lowerCAmelCase_ :Union[str, Any] = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase_ :Optional[Any] = self.get_dummy_components()
lowerCAmelCase_ :Tuple = AudioLDMPipeline(**__A )
lowerCAmelCase_ :int = audioldm_pipe.to(__A )
audioldm_pipe.set_progress_bar_config(disable=__A )
lowerCAmelCase_ :int = self.get_dummy_inputs(__A )
lowerCAmelCase_ :Any = audioldm_pipe(**__A )
lowerCAmelCase_ :Union[str, Any] = output.audios[0]
assert audio.ndim == 1
assert len(__A ) == 256
lowerCAmelCase_ :Optional[int] = audio[:10]
lowerCAmelCase_ :str = np.array(
[-0.0_0_5_0, 0.0_0_5_0, -0.0_0_6_0, 0.0_0_3_3, -0.0_0_2_6, 0.0_0_3_3, -0.0_0_2_7, 0.0_0_3_3, -0.0_0_2_8, 0.0_0_3_3] )
assert np.abs(audio_slice - expected_slice ).max() < 1E-2
def __lowerCAmelCase ( self ) -> Union[str, Any]:
lowerCAmelCase_ :Any = self.get_dummy_components()
lowerCAmelCase_ :str = AudioLDMPipeline(**__A )
lowerCAmelCase_ :Optional[Any] = audioldm_pipe.to(__A )
lowerCAmelCase_ :List[Any] = audioldm_pipe.to(__A )
audioldm_pipe.set_progress_bar_config(disable=__A )
lowerCAmelCase_ :Any = self.get_dummy_inputs(__A )
lowerCAmelCase_ :Tuple = 3 * [inputs["""prompt"""]]
# forward
lowerCAmelCase_ :Dict = audioldm_pipe(**__A )
lowerCAmelCase_ :Optional[Any] = output.audios[0]
lowerCAmelCase_ :Any = self.get_dummy_inputs(__A )
lowerCAmelCase_ :Optional[int] = 3 * [inputs.pop("""prompt""" )]
lowerCAmelCase_ :Union[str, Any] = audioldm_pipe.tokenizer(
__A , padding="""max_length""" , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=__A , return_tensors="""pt""" , )
lowerCAmelCase_ :str = text_inputs["""input_ids"""].to(__A )
lowerCAmelCase_ :List[str] = audioldm_pipe.text_encoder(
__A , )
lowerCAmelCase_ :Tuple = prompt_embeds.text_embeds
# additional L_2 normalization over each hidden-state
lowerCAmelCase_ :Any = F.normalize(__A , dim=-1 )
lowerCAmelCase_ :Tuple = prompt_embeds
# forward
lowerCAmelCase_ :Tuple = audioldm_pipe(**__A )
lowerCAmelCase_ :int = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1E-2
def __lowerCAmelCase ( self ) -> str:
lowerCAmelCase_ :Optional[Any] = self.get_dummy_components()
lowerCAmelCase_ :List[Any] = AudioLDMPipeline(**__A )
lowerCAmelCase_ :str = audioldm_pipe.to(__A )
lowerCAmelCase_ :Union[str, Any] = audioldm_pipe.to(__A )
audioldm_pipe.set_progress_bar_config(disable=__A )
lowerCAmelCase_ :Optional[Any] = self.get_dummy_inputs(__A )
lowerCAmelCase_ :List[Any] = 3 * ["""this is a negative prompt"""]
lowerCAmelCase_ :str = negative_prompt
lowerCAmelCase_ :Optional[Any] = 3 * [inputs["""prompt"""]]
# forward
lowerCAmelCase_ :Dict = audioldm_pipe(**__A )
lowerCAmelCase_ :Tuple = output.audios[0]
lowerCAmelCase_ :int = self.get_dummy_inputs(__A )
lowerCAmelCase_ :List[str] = 3 * [inputs.pop("""prompt""" )]
lowerCAmelCase_ :Any = []
for p in [prompt, negative_prompt]:
lowerCAmelCase_ :Any = audioldm_pipe.tokenizer(
__A , padding="""max_length""" , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=__A , return_tensors="""pt""" , )
lowerCAmelCase_ :Tuple = text_inputs["""input_ids"""].to(__A )
lowerCAmelCase_ :Optional[Any] = audioldm_pipe.text_encoder(
__A , )
lowerCAmelCase_ :Dict = text_embeds.text_embeds
# additional L_2 normalization over each hidden-state
lowerCAmelCase_ :str = F.normalize(__A , dim=-1 )
embeds.append(__A )
lowerCAmelCase_ , lowerCAmelCase_ :Optional[int] = embeds
# forward
lowerCAmelCase_ :Tuple = audioldm_pipe(**__A )
lowerCAmelCase_ :int = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1E-2
def __lowerCAmelCase ( self ) -> Optional[int]:
lowerCAmelCase_ :int = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase_ :str = self.get_dummy_components()
lowerCAmelCase_ :Dict = PNDMScheduler(skip_prk_steps=__A )
lowerCAmelCase_ :Dict = AudioLDMPipeline(**__A )
lowerCAmelCase_ :int = audioldm_pipe.to(__A )
audioldm_pipe.set_progress_bar_config(disable=__A )
lowerCAmelCase_ :List[str] = self.get_dummy_inputs(__A )
lowerCAmelCase_ :Union[str, Any] = """egg cracking"""
lowerCAmelCase_ :Tuple = audioldm_pipe(**__A , negative_prompt=__A )
lowerCAmelCase_ :Dict = output.audios[0]
assert audio.ndim == 1
assert len(__A ) == 256
lowerCAmelCase_ :List[Any] = audio[:10]
lowerCAmelCase_ :Tuple = np.array(
[-0.0_0_5_1, 0.0_0_5_0, -0.0_0_6_0, 0.0_0_3_4, -0.0_0_2_6, 0.0_0_3_3, -0.0_0_2_7, 0.0_0_3_3, -0.0_0_2_8, 0.0_0_3_2] )
assert np.abs(audio_slice - expected_slice ).max() < 1E-2
def __lowerCAmelCase ( self ) -> int:
lowerCAmelCase_ :str = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase_ :Optional[int] = self.get_dummy_components()
lowerCAmelCase_ :List[Any] = PNDMScheduler(skip_prk_steps=__A )
lowerCAmelCase_ :Union[str, Any] = AudioLDMPipeline(**__A )
lowerCAmelCase_ :Optional[Any] = audioldm_pipe.to(__A )
audioldm_pipe.set_progress_bar_config(disable=__A )
lowerCAmelCase_ :Tuple = """A hammer hitting a wooden surface"""
# test num_waveforms_per_prompt=1 (default)
lowerCAmelCase_ :int = audioldm_pipe(__A , num_inference_steps=2 ).audios
assert audios.shape == (1, 256)
# test num_waveforms_per_prompt=1 (default) for batch of prompts
lowerCAmelCase_ :List[str] = 2
lowerCAmelCase_ :Optional[int] = audioldm_pipe([prompt] * batch_size , num_inference_steps=2 ).audios
assert audios.shape == (batch_size, 256)
# test num_waveforms_per_prompt for single prompt
lowerCAmelCase_ :List[Any] = 2
lowerCAmelCase_ :Dict = audioldm_pipe(__A , num_inference_steps=2 , num_waveforms_per_prompt=__A ).audios
assert audios.shape == (num_waveforms_per_prompt, 256)
# test num_waveforms_per_prompt for batch of prompts
lowerCAmelCase_ :str = 2
lowerCAmelCase_ :str = audioldm_pipe(
[prompt] * batch_size , num_inference_steps=2 , num_waveforms_per_prompt=__A ).audios
assert audios.shape == (batch_size * num_waveforms_per_prompt, 256)
def __lowerCAmelCase ( self ) -> List[str]:
lowerCAmelCase_ :Tuple = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase_ :List[str] = self.get_dummy_components()
lowerCAmelCase_ :List[Any] = AudioLDMPipeline(**__A )
lowerCAmelCase_ :Union[str, Any] = audioldm_pipe.to(__A )
audioldm_pipe.set_progress_bar_config(disable=__A )
lowerCAmelCase_ :Optional[Any] = audioldm_pipe.vocoder.config.sampling_rate
lowerCAmelCase_ :int = self.get_dummy_inputs(__A )
lowerCAmelCase_ :str = audioldm_pipe(audio_length_in_s=0.0_1_6 , **__A )
lowerCAmelCase_ :Dict = output.audios[0]
assert audio.ndim == 1
assert len(__A ) / vocoder_sampling_rate == 0.0_1_6
lowerCAmelCase_ :List[str] = audioldm_pipe(audio_length_in_s=0.0_3_2 , **__A )
lowerCAmelCase_ :Any = output.audios[0]
assert audio.ndim == 1
assert len(__A ) / vocoder_sampling_rate == 0.0_3_2
def __lowerCAmelCase ( self ) -> Optional[Any]:
lowerCAmelCase_ :Any = self.get_dummy_components()
lowerCAmelCase_ :List[str] = AudioLDMPipeline(**__A )
lowerCAmelCase_ :Dict = audioldm_pipe.to(__A )
audioldm_pipe.set_progress_bar_config(disable=__A )
lowerCAmelCase_ :Tuple = ["""hey"""]
lowerCAmelCase_ :Any = audioldm_pipe(__A , num_inference_steps=1 )
lowerCAmelCase_ :List[Any] = output.audios.shape
assert audio_shape == (1, 256)
lowerCAmelCase_ :Optional[Any] = audioldm_pipe.vocoder.config
config.model_in_dim *= 2
lowerCAmelCase_ :Optional[int] = SpeechTaHifiGan(__A ).to(__A )
lowerCAmelCase_ :Optional[int] = audioldm_pipe(__A , num_inference_steps=1 )
lowerCAmelCase_ :str = output.audios.shape
# waveform shape is unchanged, we just have 2x the number of mel channels in the spectrogram
assert audio_shape == (1, 256)
def __lowerCAmelCase ( self ) -> List[Any]:
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=__A )
def __lowerCAmelCase ( self ) -> Optional[int]:
self._test_inference_batch_single_identical(test_mean_pixel_difference=__A )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def __lowerCAmelCase ( self ) -> Tuple:
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=__A )
@slow
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __lowerCAmelCase ( self ) -> Union[str, Any]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCAmelCase ( self , __A , __A="cpu" , __A=torch.floataa , __A=0 ) -> str:
lowerCAmelCase_ :Tuple = torch.Generator(device=__A ).manual_seed(__A )
lowerCAmelCase_ :Tuple = np.random.RandomState(__A ).standard_normal((1, 8, 128, 16) )
lowerCAmelCase_ :Optional[Any] = torch.from_numpy(__A ).to(device=__A , dtype=__A )
lowerCAmelCase_ :Any = {
"""prompt""": """A hammer hitting a wooden surface""",
"""latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 2.5,
}
return inputs
def __lowerCAmelCase ( self ) -> Optional[Any]:
lowerCAmelCase_ :Optional[int] = AudioLDMPipeline.from_pretrained("""cvssp/audioldm""" )
lowerCAmelCase_ :Optional[Any] = audioldm_pipe.to(__A )
audioldm_pipe.set_progress_bar_config(disable=__A )
lowerCAmelCase_ :Tuple = self.get_inputs(__A )
lowerCAmelCase_ :Any = 25
lowerCAmelCase_ :Optional[Any] = audioldm_pipe(**__A ).audios[0]
assert audio.ndim == 1
assert len(__A ) == 8_1920
lowerCAmelCase_ :List[Any] = audio[7_7230:7_7240]
lowerCAmelCase_ :List[str] = np.array(
[-0.4_8_8_4, -0.4_6_0_7, 0.0_0_2_3, 0.5_0_0_7, 0.5_8_9_6, 0.5_1_5_1, 0.3_8_1_3, -0.0_2_0_8, -0.3_6_8_7, -0.4_3_1_5] )
lowerCAmelCase_ :Dict = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 1E-2
def __lowerCAmelCase ( self ) -> List[str]:
lowerCAmelCase_ :List[Any] = AudioLDMPipeline.from_pretrained("""cvssp/audioldm""" )
lowerCAmelCase_ :List[Any] = LMSDiscreteScheduler.from_config(audioldm_pipe.scheduler.config )
lowerCAmelCase_ :Optional[Any] = audioldm_pipe.to(__A )
audioldm_pipe.set_progress_bar_config(disable=__A )
lowerCAmelCase_ :Any = self.get_inputs(__A )
lowerCAmelCase_ :Union[str, Any] = audioldm_pipe(**__A ).audios[0]
assert audio.ndim == 1
assert len(__A ) == 8_1920
lowerCAmelCase_ :List[str] = audio[2_7780:2_7790]
lowerCAmelCase_ :str = np.array([-0.2_1_3_1, -0.0_8_7_3, -0.0_1_2_4, -0.0_1_8_9, 0.0_5_6_9, 0.1_3_7_3, 0.1_8_8_3, 0.2_8_8_6, 0.3_2_9_7, 0.2_2_1_2] )
lowerCAmelCase_ :int = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 3E-2
| 256
| 1
|
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from torch.backends.cuda import sdp_kernel
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
from diffusers.utils import randn_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_a, require_torch_gpu
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class lowerCAmelCase__ ( lowerCamelCase__ , unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase = ConsistencyModelPipeline
__UpperCamelCase = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
__UpperCamelCase = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
# Override required_optional_params to remove num_images_per_prompt
__UpperCamelCase = frozenset(
[
"num_inference_steps",
"generator",
"latents",
"output_type",
"return_dict",
"callback",
"callback_steps",
] )
@property
def __lowerCAmelCase ( self : Dict ) -> str:
'''simple docstring'''
a__ : Union[str, Any] = UNetaDModel.from_pretrained(
'''diffusers/consistency-models-test''' , subfolder='''test_unet''' , )
return unet
@property
def __lowerCAmelCase ( self : Any ) -> Optional[int]:
'''simple docstring'''
a__ : Any = UNetaDModel.from_pretrained(
'''diffusers/consistency-models-test''' , subfolder='''test_unet_class_cond''' , )
return unet
def __lowerCAmelCase ( self : Any , A__ : str=False ) -> Optional[Any]:
'''simple docstring'''
if class_cond:
a__ : List[str] = self.dummy_cond_unet
else:
a__ : Any = self.dummy_uncond_unet
# Default to CM multistep sampler
a__ : int = CMStochasticIterativeScheduler(
num_train_timesteps=4_0 , sigma_min=0.002 , sigma_max=8_0.0 , )
a__ : Dict = {
'''unet''': unet,
'''scheduler''': scheduler,
}
return components
def __lowerCAmelCase ( self : List[Any] , A__ : Tuple , A__ : Dict=0 ) -> List[Any]:
'''simple docstring'''
if str(SCREAMING_SNAKE_CASE__ ).startswith('''mps''' ):
a__ : Optional[Any] = torch.manual_seed(SCREAMING_SNAKE_CASE__ )
else:
a__ : Optional[int] = torch.Generator(device=SCREAMING_SNAKE_CASE__ ).manual_seed(SCREAMING_SNAKE_CASE__ )
a__ : Optional[Any] = {
'''batch_size''': 1,
'''num_inference_steps''': None,
'''timesteps''': [2_2, 0],
'''generator''': generator,
'''output_type''': '''np''',
}
return inputs
def __lowerCAmelCase ( self : Optional[Any] ) -> Tuple:
'''simple docstring'''
a__ : Tuple = '''cpu''' # ensure determinism for the device-dependent torch.Generator
a__ : Dict = self.get_dummy_components()
a__ : Dict = ConsistencyModelPipeline(**SCREAMING_SNAKE_CASE__ )
a__ : List[str] = pipe.to(SCREAMING_SNAKE_CASE__ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
a__ : Optional[Any] = self.get_dummy_inputs(SCREAMING_SNAKE_CASE__ )
a__ : Optional[Any] = pipe(**SCREAMING_SNAKE_CASE__ ).images
assert image.shape == (1, 3_2, 3_2, 3)
a__ : Tuple = image[0, -3:, -3:, -1]
a__ : List[Any] = np.array([0.3_572, 0.6_273, 0.4_031, 0.3_961, 0.4_321, 0.5_730, 0.5_266, 0.4_780, 0.5_004] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def __lowerCAmelCase ( self : Dict ) -> List[Any]:
'''simple docstring'''
a__ : List[Any] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
a__ : Optional[Any] = self.get_dummy_components(class_cond=SCREAMING_SNAKE_CASE__ )
a__ : Tuple = ConsistencyModelPipeline(**SCREAMING_SNAKE_CASE__ )
a__ : Any = pipe.to(SCREAMING_SNAKE_CASE__ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
a__ : Optional[int] = self.get_dummy_inputs(SCREAMING_SNAKE_CASE__ )
a__ : int = 0
a__ : Optional[int] = pipe(**SCREAMING_SNAKE_CASE__ ).images
assert image.shape == (1, 3_2, 3_2, 3)
a__ : Optional[Any] = image[0, -3:, -3:, -1]
a__ : Tuple = np.array([0.3_572, 0.6_273, 0.4_031, 0.3_961, 0.4_321, 0.5_730, 0.5_266, 0.4_780, 0.5_004] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def __lowerCAmelCase ( self : int ) -> Tuple:
'''simple docstring'''
a__ : Optional[Any] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
a__ : List[str] = self.get_dummy_components()
a__ : Any = ConsistencyModelPipeline(**SCREAMING_SNAKE_CASE__ )
a__ : List[str] = pipe.to(SCREAMING_SNAKE_CASE__ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
a__ : str = self.get_dummy_inputs(SCREAMING_SNAKE_CASE__ )
a__ : Union[str, Any] = 1
a__ : Union[str, Any] = None
a__ : str = pipe(**SCREAMING_SNAKE_CASE__ ).images
assert image.shape == (1, 3_2, 3_2, 3)
a__ : Tuple = image[0, -3:, -3:, -1]
a__ : Dict = np.array([0.5_004, 0.5_004, 0.4_994, 0.5_008, 0.4_976, 0.5_018, 0.4_990, 0.4_982, 0.4_987] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def __lowerCAmelCase ( self : List[Any] ) -> Any:
'''simple docstring'''
a__ : Any = '''cpu''' # ensure determinism for the device-dependent torch.Generator
a__ : Optional[Any] = self.get_dummy_components(class_cond=SCREAMING_SNAKE_CASE__ )
a__ : List[Any] = ConsistencyModelPipeline(**SCREAMING_SNAKE_CASE__ )
a__ : str = pipe.to(SCREAMING_SNAKE_CASE__ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
a__ : List[Any] = self.get_dummy_inputs(SCREAMING_SNAKE_CASE__ )
a__ : Optional[Any] = 1
a__ : Optional[int] = None
a__ : Optional[Any] = 0
a__ : Optional[int] = pipe(**SCREAMING_SNAKE_CASE__ ).images
assert image.shape == (1, 3_2, 3_2, 3)
a__ : List[str] = image[0, -3:, -3:, -1]
a__ : Any = np.array([0.5_004, 0.5_004, 0.4_994, 0.5_008, 0.4_976, 0.5_018, 0.4_990, 0.4_982, 0.4_987] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
@slow
@require_torch_gpu
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __lowerCAmelCase ( self : int ) -> Optional[Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCAmelCase ( self : List[str] , A__ : Optional[Any]=0 , A__ : Tuple=False , A__ : Any="cpu" , A__ : Any=torch.floataa , A__ : List[Any]=(1, 3, 6_4, 6_4) ) -> Union[str, Any]:
'''simple docstring'''
a__ : List[str] = torch.manual_seed(SCREAMING_SNAKE_CASE__ )
a__ : Optional[int] = {
'''num_inference_steps''': None,
'''timesteps''': [2_2, 0],
'''class_labels''': 0,
'''generator''': generator,
'''output_type''': '''np''',
}
if get_fixed_latents:
a__ : Optional[Any] = self.get_fixed_latents(seed=SCREAMING_SNAKE_CASE__ , device=SCREAMING_SNAKE_CASE__ , dtype=SCREAMING_SNAKE_CASE__ , shape=SCREAMING_SNAKE_CASE__ )
a__ : List[str] = latents
return inputs
def __lowerCAmelCase ( self : Optional[Any] , A__ : str=0 , A__ : Union[str, Any]="cpu" , A__ : List[str]=torch.floataa , A__ : str=(1, 3, 6_4, 6_4) ) -> Optional[int]:
'''simple docstring'''
if type(SCREAMING_SNAKE_CASE__ ) == str:
a__ : List[str] = torch.device(SCREAMING_SNAKE_CASE__ )
a__ : List[Any] = torch.Generator(device=SCREAMING_SNAKE_CASE__ ).manual_seed(SCREAMING_SNAKE_CASE__ )
a__ : List[Any] = randn_tensor(SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , device=SCREAMING_SNAKE_CASE__ , dtype=SCREAMING_SNAKE_CASE__ )
return latents
def __lowerCAmelCase ( self : str ) -> Tuple:
'''simple docstring'''
a__ : List[Any] = UNetaDModel.from_pretrained('''diffusers/consistency_models''' , subfolder='''diffusers_cd_imagenet64_l2''' )
a__ : int = CMStochasticIterativeScheduler(
num_train_timesteps=4_0 , sigma_min=0.002 , sigma_max=8_0.0 , )
a__ : int = ConsistencyModelPipeline(unet=SCREAMING_SNAKE_CASE__ , scheduler=SCREAMING_SNAKE_CASE__ )
pipe.to(torch_device=SCREAMING_SNAKE_CASE__ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
a__ : Any = self.get_inputs()
a__ : List[Any] = pipe(**SCREAMING_SNAKE_CASE__ ).images
assert image.shape == (1, 6_4, 6_4, 3)
a__ : List[Any] = image[0, -3:, -3:, -1]
a__ : Dict = np.array([0.0_888, 0.0_881, 0.0_666, 0.0_479, 0.0_292, 0.0_195, 0.0_201, 0.0_163, 0.0_254] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
def __lowerCAmelCase ( self : Tuple ) -> int:
'''simple docstring'''
a__ : Optional[Any] = UNetaDModel.from_pretrained('''diffusers/consistency_models''' , subfolder='''diffusers_cd_imagenet64_l2''' )
a__ : Optional[int] = CMStochasticIterativeScheduler(
num_train_timesteps=4_0 , sigma_min=0.002 , sigma_max=8_0.0 , )
a__ : Optional[Any] = ConsistencyModelPipeline(unet=SCREAMING_SNAKE_CASE__ , scheduler=SCREAMING_SNAKE_CASE__ )
pipe.to(torch_device=SCREAMING_SNAKE_CASE__ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
a__ : List[Any] = self.get_inputs()
a__ : Dict = 1
a__ : Optional[Any] = None
a__ : Optional[Any] = pipe(**SCREAMING_SNAKE_CASE__ ).images
assert image.shape == (1, 6_4, 6_4, 3)
a__ : Union[str, Any] = image[0, -3:, -3:, -1]
a__ : Optional[Any] = np.array([0.0_340, 0.0_152, 0.0_063, 0.0_267, 0.0_221, 0.0_107, 0.0_416, 0.0_186, 0.0_217] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
@require_torch_a
def __lowerCAmelCase ( self : Dict ) -> Dict:
'''simple docstring'''
a__ : Any = UNetaDModel.from_pretrained('''diffusers/consistency_models''' , subfolder='''diffusers_cd_imagenet64_l2''' )
a__ : List[str] = CMStochasticIterativeScheduler(
num_train_timesteps=4_0 , sigma_min=0.002 , sigma_max=8_0.0 , )
a__ : List[Any] = ConsistencyModelPipeline(unet=SCREAMING_SNAKE_CASE__ , scheduler=SCREAMING_SNAKE_CASE__ )
pipe.to(torch_device=SCREAMING_SNAKE_CASE__ , torch_dtype=torch.floataa )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
a__ : List[Any] = self.get_inputs(get_fixed_latents=SCREAMING_SNAKE_CASE__ , device=SCREAMING_SNAKE_CASE__ )
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=SCREAMING_SNAKE_CASE__ , enable_math=SCREAMING_SNAKE_CASE__ , enable_mem_efficient=SCREAMING_SNAKE_CASE__ ):
a__ : List[str] = pipe(**SCREAMING_SNAKE_CASE__ ).images
assert image.shape == (1, 6_4, 6_4, 3)
a__ : Union[str, Any] = image[0, -3:, -3:, -1]
a__ : Any = np.array([0.1_875, 0.1_428, 0.1_289, 0.2_151, 0.2_092, 0.1_477, 0.1_877, 0.1_641, 0.1_353] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
@require_torch_a
def __lowerCAmelCase ( self : int ) -> Dict:
'''simple docstring'''
a__ : Optional[int] = UNetaDModel.from_pretrained('''diffusers/consistency_models''' , subfolder='''diffusers_cd_imagenet64_l2''' )
a__ : Dict = CMStochasticIterativeScheduler(
num_train_timesteps=4_0 , sigma_min=0.002 , sigma_max=8_0.0 , )
a__ : str = ConsistencyModelPipeline(unet=SCREAMING_SNAKE_CASE__ , scheduler=SCREAMING_SNAKE_CASE__ )
pipe.to(torch_device=SCREAMING_SNAKE_CASE__ , torch_dtype=torch.floataa )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
a__ : str = self.get_inputs(get_fixed_latents=SCREAMING_SNAKE_CASE__ , device=SCREAMING_SNAKE_CASE__ )
a__ : str = 1
a__ : int = None
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=SCREAMING_SNAKE_CASE__ , enable_math=SCREAMING_SNAKE_CASE__ , enable_mem_efficient=SCREAMING_SNAKE_CASE__ ):
a__ : Optional[int] = pipe(**SCREAMING_SNAKE_CASE__ ).images
assert image.shape == (1, 6_4, 6_4, 3)
a__ : Optional[int] = image[0, -3:, -3:, -1]
a__ : Tuple = np.array([0.1_663, 0.1_948, 0.2_275, 0.1_680, 0.1_204, 0.1_245, 0.1_858, 0.1_338, 0.2_095] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
| 688
|
"""simple docstring"""
from collections.abc import Callable
import numpy as np
def A_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ) -> np.ndarray:
_UpperCamelCase :str = int(np.ceil((x_end - xa) / step_size ) )
_UpperCamelCase :Union[str, Any] = np.zeros((n + 1,) )
_UpperCamelCase :List[str] = ya
_UpperCamelCase :Any = xa
for k in range(snake_case__ ):
_UpperCamelCase :Union[str, Any] = y[k] + step_size * ode_func(snake_case__ , y[k] )
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 355
| 0
|
'''simple docstring'''
from __future__ import annotations
import inspect
import unittest
from typing import List, Tuple
from transformers import RegNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFRegNetForImageClassification, TFRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _A :
'''simple docstring'''
def __init__( self : Tuple , lowerCamelCase : List[Any] , lowerCamelCase : Optional[int]=3 , lowerCamelCase : List[Any]=32 , lowerCamelCase : List[Any]=3 , lowerCamelCase : List[str]=10 , lowerCamelCase : int=[10, 20, 30, 40] , lowerCamelCase : List[str]=[1, 1, 2, 1] , lowerCamelCase : Tuple=True , lowerCamelCase : Optional[Any]=True , lowerCamelCase : int="relu" , lowerCamelCase : Optional[int]=3 , lowerCamelCase : Optional[Any]=None , )-> Any:
snake_case__ : Union[str, Any] = parent
snake_case__ : Union[str, Any] = batch_size
snake_case__ : Dict = image_size
snake_case__ : Union[str, Any] = num_channels
snake_case__ : List[str] = embeddings_size
snake_case__ : List[str] = hidden_sizes
snake_case__ : str = depths
snake_case__ : Any = is_training
snake_case__ : List[Any] = use_labels
snake_case__ : int = hidden_act
snake_case__ : Dict = num_labels
snake_case__ : Dict = scope
snake_case__ : Union[str, Any] = len(lowerCamelCase )
def __lowerCAmelCase ( self : Any )-> List[str]:
snake_case__ : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case__ : List[str] = None
if self.use_labels:
snake_case__ : Dict = ids_tensor([self.batch_size] , self.num_labels )
snake_case__ : Tuple = self.get_config()
return config, pixel_values, labels
def __lowerCAmelCase ( self : str )-> int:
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , )
def __lowerCAmelCase ( self : List[str] , lowerCamelCase : List[str] , lowerCamelCase : Optional[Any] , lowerCamelCase : List[str] )-> Optional[int]:
snake_case__ : Optional[int] = TFRegNetModel(config=lowerCamelCase )
snake_case__ : Optional[int] = model(lowerCamelCase , training=lowerCamelCase )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def __lowerCAmelCase ( self : Optional[Any] , lowerCamelCase : Union[str, Any] , lowerCamelCase : List[Any] , lowerCamelCase : Optional[Any] )-> Any:
snake_case__ : Optional[Any] = self.num_labels
snake_case__ : Union[str, Any] = TFRegNetForImageClassification(lowerCamelCase )
snake_case__ : Dict = model(lowerCamelCase , labels=lowerCamelCase , training=lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowerCAmelCase ( self : Optional[Any] )-> Any:
snake_case__ : List[Any] = self.prepare_config_and_inputs()
snake_case__ : Any = config_and_inputs
snake_case__ : int = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class _A ( UpperCamelCase , UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
_lowercase = (TFRegNetModel, TFRegNetForImageClassification) if is_tf_available() else ()
_lowercase = (
{'feature-extraction': TFRegNetModel, 'image-classification': TFRegNetForImageClassification}
if is_tf_available()
else {}
)
_lowercase = False
_lowercase = False
_lowercase = False
_lowercase = False
_lowercase = False
def __lowerCAmelCase ( self : List[str] )-> Dict:
snake_case__ : List[str] = TFRegNetModelTester(self )
snake_case__ : str = ConfigTester(self , config_class=lowerCamelCase , has_text_modality=lowerCamelCase )
def __lowerCAmelCase ( self : List[Any] )-> Optional[int]:
return
@unittest.skip(reason="""RegNet does not use inputs_embeds""" )
def __lowerCAmelCase ( self : int )-> int:
pass
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices("""GPU""" ) ) == 0 , reason="""TF does not support backprop for grouped convolutions on CPU.""" , )
@slow
def __lowerCAmelCase ( self : List[str] )-> Tuple:
super().test_keras_fit()
@unittest.skip(reason="""RegNet does not support input and output embeddings""" )
def __lowerCAmelCase ( self : List[str] )-> List[Any]:
pass
def __lowerCAmelCase ( self : Dict )-> Optional[Any]:
snake_case__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ : Optional[int] = model_class(lowerCamelCase )
snake_case__ : Optional[int] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case__ : List[str] = [*signature.parameters.keys()]
snake_case__ : Dict = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowerCamelCase )
def __lowerCAmelCase ( self : Dict )-> str:
snake_case__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase )
def __lowerCAmelCase ( self : Any )-> Any:
def check_hidden_states_output(lowerCamelCase : List[Any] , lowerCamelCase : int , lowerCamelCase : Optional[int] ):
snake_case__ : Dict = model_class(lowerCamelCase )
snake_case__ : Tuple = model(**self._prepare_for_class(lowerCamelCase , lowerCamelCase ) , training=lowerCamelCase )
snake_case__ : Union[str, Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
snake_case__ : str = self.model_tester.num_stages
self.assertEqual(len(lowerCamelCase ) , expected_num_stages + 1 )
# RegNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , )
snake_case__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ : Optional[int] = ["""basic""", """bottleneck"""]
for model_class in self.all_model_classes:
for layer_type in layers_type:
snake_case__ : Optional[Any] = layer_type
snake_case__ : List[Any] = True
check_hidden_states_output(lowerCamelCase , lowerCamelCase , lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case__ : Union[str, Any] = True
check_hidden_states_output(lowerCamelCase , lowerCamelCase , lowerCamelCase )
def __lowerCAmelCase ( self : List[Any] )-> List[str]:
snake_case__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
def check_equivalence(lowerCamelCase : Dict , lowerCamelCase : str , lowerCamelCase : Dict , lowerCamelCase : str={} ):
snake_case__ : Any = model(lowerCamelCase , return_dict=lowerCamelCase , **lowerCamelCase )
snake_case__ : Optional[Any] = model(lowerCamelCase , return_dict=lowerCamelCase , **lowerCamelCase ).to_tuple()
def recursive_check(lowerCamelCase : str , lowerCamelCase : Optional[Any] ):
if isinstance(lowerCamelCase , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(lowerCamelCase , lowerCamelCase ):
recursive_check(lowerCamelCase , lowerCamelCase )
elif tuple_object is None:
return
else:
self.assertTrue(
all(tf.equal(lowerCamelCase , lowerCamelCase ) ) , msg=(
"""Tuple and dict output are not equal. Difference:"""
F""" {tf.math.reduce_max(tf.abs(tuple_object - dict_object ) )}"""
) , )
recursive_check(lowerCamelCase , lowerCamelCase )
for model_class in self.all_model_classes:
snake_case__ : str = model_class(lowerCamelCase )
snake_case__ : Tuple = self._prepare_for_class(lowerCamelCase , lowerCamelCase )
snake_case__ : Union[str, Any] = self._prepare_for_class(lowerCamelCase , lowerCamelCase )
check_equivalence(lowerCamelCase , lowerCamelCase , lowerCamelCase )
snake_case__ : Tuple = self._prepare_for_class(lowerCamelCase , lowerCamelCase , return_labels=lowerCamelCase )
snake_case__ : int = self._prepare_for_class(lowerCamelCase , lowerCamelCase , return_labels=lowerCamelCase )
check_equivalence(lowerCamelCase , lowerCamelCase , lowerCamelCase )
snake_case__ : Tuple = self._prepare_for_class(lowerCamelCase , lowerCamelCase )
snake_case__ : Optional[Any] = self._prepare_for_class(lowerCamelCase , lowerCamelCase )
check_equivalence(lowerCamelCase , lowerCamelCase , lowerCamelCase , {"""output_hidden_states""": True} )
snake_case__ : str = self._prepare_for_class(lowerCamelCase , lowerCamelCase , return_labels=lowerCamelCase )
snake_case__ : List[Any] = self._prepare_for_class(lowerCamelCase , lowerCamelCase , return_labels=lowerCamelCase )
check_equivalence(lowerCamelCase , lowerCamelCase , lowerCamelCase , {"""output_hidden_states""": True} )
def __lowerCAmelCase ( self : Optional[int] )-> Union[str, Any]:
snake_case__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase )
@slow
def __lowerCAmelCase ( self : Tuple )-> Union[str, Any]:
for model_name in TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case__ : List[str] = TFRegNetModel.from_pretrained(lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
def lowerCAmelCase__ ( ):
"""simple docstring"""
snake_case__ : List[str] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class _A ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __lowerCAmelCase ( self : Any )-> List[str]:
return (
AutoImageProcessor.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def __lowerCAmelCase ( self : Dict )-> Optional[Any]:
snake_case__ : List[str] = TFRegNetForImageClassification.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
snake_case__ : Any = self.default_image_processor
snake_case__ : List[str] = prepare_img()
snake_case__ : Optional[int] = image_processor(images=lowerCamelCase , return_tensors="""tf""" )
# forward pass
snake_case__ : List[Any] = model(**lowerCamelCase , training=lowerCamelCase )
# verify the logits
snake_case__ : Optional[Any] = tf.TensorShape((1, 1_000) )
self.assertEqual(outputs.logits.shape , lowerCamelCase )
snake_case__ : int = tf.constant([-0.4_180, -1.5_051, -3.4_836] )
tf.debugging.assert_near(outputs.logits[0, :3] , lowerCamelCase , atol=1e-4 )
| 707
|
'''simple docstring'''
import argparse
import json
import os
import re
import shutil
import torch
from transformers import BioGptConfig, BioGptForCausalLM
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
lowerCAmelCase__ = 2
class _A :
'''simple docstring'''
def __init__( self : List[Any] , *, # begin keyword-only arguments
lowerCamelCase : Optional[int]="<s>" , lowerCamelCase : str="<pad>" , lowerCamelCase : str="</s>" , lowerCamelCase : int="<unk>" , lowerCamelCase : Tuple=None , )-> str:
snake_case__ , snake_case__ , snake_case__ , snake_case__ : Dict = bos, unk, pad, eos
snake_case__ : Dict = []
snake_case__ : int = []
snake_case__ : Optional[int] = {}
snake_case__ : int = self.add_symbol(lowerCamelCase )
snake_case__ : Optional[int] = self.add_symbol(lowerCamelCase )
snake_case__ : List[str] = self.add_symbol(lowerCamelCase )
snake_case__ : int = self.add_symbol(lowerCamelCase )
if extra_special_symbols:
for s in extra_special_symbols:
self.add_symbol(lowerCamelCase )
snake_case__ : int = len(self.symbols )
def __eq__( self : str , lowerCamelCase : Tuple )-> Optional[Any]:
return self.indices == other.indices
def __getitem__( self : Optional[int] , lowerCamelCase : Any )-> Tuple:
if idx < len(self.symbols ):
return self.symbols[idx]
return self.unk_word
def __len__( self : Any )-> Union[str, Any]:
return len(self.symbols )
def __contains__( self : Tuple , lowerCamelCase : int )-> int:
return sym in self.indices
@classmethod
def __lowerCAmelCase ( cls : Dict , lowerCamelCase : Union[str, Any] )-> str:
snake_case__ : List[str] = cls()
d.add_from_file(lowerCamelCase )
return d
def __lowerCAmelCase ( self : int , lowerCamelCase : int , lowerCamelCase : List[Any]=1 , lowerCamelCase : Union[str, Any]=False )-> Any:
if word in self.indices and not overwrite:
snake_case__ : Union[str, Any] = self.indices[word]
snake_case__ : str = self.count[idx] + n
return idx
else:
snake_case__ : Any = len(self.symbols )
snake_case__ : Optional[int] = idx
self.symbols.append(lowerCamelCase )
self.count.append(lowerCamelCase )
return idx
def __lowerCAmelCase ( self : Any , lowerCamelCase : List[Any] )-> Dict:
return 0
def __lowerCAmelCase ( self : int , lowerCamelCase : str )-> Optional[int]:
if isinstance(lowerCamelCase , lowerCamelCase ):
try:
with open(lowerCamelCase , """r""" , encoding="""utf-8""" ) as fd:
self.add_from_file(lowerCamelCase )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception("""Incorrect encoding detected in {}, please rebuild the dataset""".format(lowerCamelCase ) )
return
snake_case__ : Union[str, Any] = f.readlines()
snake_case__ : Optional[Any] = self._load_meta(lowerCamelCase )
for line in lines[indices_start_line:]:
try:
snake_case__ , snake_case__ : Optional[int] = line.rstrip().rsplit(""" """ , 1 )
if field == "#fairseq:overwrite":
snake_case__ : str = True
snake_case__ , snake_case__ : Any = line.rsplit(""" """ , 1 )
else:
snake_case__ : Dict = False
snake_case__ : Optional[int] = int(lowerCamelCase )
snake_case__ : List[str] = line
if word in self and not overwrite:
raise RuntimeError(
"""Duplicate word found when loading Dictionary: '{}'. """
"""Duplicate words can overwrite earlier ones by adding the """
"""#fairseq:overwrite flag at the end of the corresponding row """
"""in the dictionary file. If using the Camembert model, please """
"""download an updated copy of the model file.""".format(lowerCamelCase ) )
self.add_symbol(lowerCamelCase , n=lowerCamelCase , overwrite=lowerCamelCase )
except ValueError:
raise ValueError("""Incorrect dictionary format, expected '<token> <cnt> [flags]'""" )
def lowerCAmelCase__ ( UpperCAmelCase ):
"""simple docstring"""
snake_case__ : List[str] = dict((re.sub(R"""@@$""" , """""" , UpperCAmelCase ), v) if k.endswith("""@@""" ) else (re.sub(R"""$""" , """</w>""" , UpperCAmelCase ), v) for k, v in d.items() )
snake_case__ : str = """<s> <pad> </s> <unk>""".split()
# restore the special tokens
for k in keep_keys:
del da[f"""{k}</w>"""]
snake_case__ : Optional[Any] = d[k] # restore
return da
def lowerCAmelCase__ ( UpperCAmelCase , UpperCAmelCase ):
"""simple docstring"""
if not os.path.exists(UpperCAmelCase ):
raise ValueError(f"""path {biogpt_checkpoint_path} does not exist!""" )
os.makedirs(UpperCAmelCase , exist_ok=UpperCAmelCase )
print(f"""Writing results to {pytorch_dump_folder_path}""" )
# handle various types of models
snake_case__ : Tuple = os.path.join(UpperCAmelCase , """checkpoint.pt""" )
if not os.path.isfile(UpperCAmelCase ):
raise ValueError(f"""path to the file {checkpoint_file} does not exist!""" )
snake_case__ : str = torch.load(UpperCAmelCase , map_location="""cpu""" )
snake_case__ : List[Any] = chkpt["""cfg"""]["""model"""]
# dicts
snake_case__ : Optional[Any] = os.path.join(UpperCAmelCase , """dict.txt""" )
if not os.path.isfile(UpperCAmelCase ):
raise ValueError(f"""path to the file {dict_file} does not exist!""" )
snake_case__ : List[str] = Dictionary.load(UpperCAmelCase )
snake_case__ : Optional[int] = rewrite_dict_keys(src_dict.indices )
snake_case__ : Tuple = len(UpperCAmelCase )
snake_case__ : Optional[Any] = os.path.join(UpperCAmelCase , VOCAB_FILES_NAMES["""vocab_file"""] )
print(f"""Generating {src_vocab_file} of {src_vocab_size} records""" )
with open(UpperCAmelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(UpperCAmelCase , ensure_ascii=UpperCAmelCase , indent=UpperCAmelCase ) )
# merges_file (bpecodes)
snake_case__ : Union[str, Any] = os.path.join(UpperCAmelCase , """bpecodes""" )
if not os.path.isfile(UpperCAmelCase ):
raise ValueError(f"""path to the file {bpecodes_file} does not exist!""" )
snake_case__ : Tuple = os.path.join(UpperCAmelCase , VOCAB_FILES_NAMES["""merges_file"""] )
shutil.copyfile(UpperCAmelCase , UpperCAmelCase )
# model config
snake_case__ : str = os.path.join(UpperCAmelCase , """config.json""" )
snake_case__ : Dict = {
"""activation_dropout""": args["""activation_dropout"""],
"""architectures""": ["""BioGptForCausalLM"""],
"""attention_probs_dropout_prob""": args["""attention_dropout"""],
"""bos_token_id""": 0,
"""eos_token_id""": 2,
"""hidden_act""": args["""activation_fn"""],
"""hidden_dropout_prob""": args["""dropout"""],
"""hidden_size""": args["""decoder_embed_dim"""],
"""initializer_range""": 0.0_2,
"""intermediate_size""": args["""decoder_ffn_embed_dim"""],
"""layer_norm_eps""": 1E-1_2,
"""layerdrop""": args["""decoder_layerdrop"""],
"""max_position_embeddings""": args["""max_target_positions"""],
"""model_type""": """biogpt""",
"""num_attention_heads""": args["""decoder_attention_heads"""],
"""num_hidden_layers""": args["""decoder_layers"""],
"""pad_token_id""": 1,
"""scale_embedding""": not args["""no_scale_embedding"""],
"""tie_word_embeddings""": args["""share_decoder_input_output_embed"""],
"""vocab_size""": src_vocab_size,
}
# good hparam defaults to start with
print(f"""Generating {biogpt_model_config_file}""" )
with open(UpperCAmelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(UpperCAmelCase , ensure_ascii=UpperCAmelCase , indent=UpperCAmelCase ) )
# tokenizer config
snake_case__ : int = os.path.join(UpperCAmelCase , UpperCAmelCase )
snake_case__ : List[str] = {
"""bos_token""": """<s>""",
"""eos_token""": """</s>""",
"""model_max_length""": 1024,
"""pad_token""": """<pad>""",
"""special_tokens_map_file""": None,
"""tokenizer_class""": """BioGptTokenizer""",
"""unk_token""": """<unk>""",
}
print(f"""Generating {biogpt_tokenizer_config_file}""" )
with open(UpperCAmelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(UpperCAmelCase , ensure_ascii=UpperCAmelCase , indent=UpperCAmelCase ) )
# model
snake_case__ : int = chkpt["""model"""]
# remove unneeded keys
snake_case__ : List[Any] = [
"""decoder.version""",
]
for k in ignore_keys:
model_state_dict.pop(UpperCAmelCase , UpperCAmelCase )
snake_case__ : List[Any] = list(model_state_dict.keys() )
for layer_name in layer_names:
if layer_name.endswith("""output_projection.weight""" ):
snake_case__ : str = model_state_dict.pop(UpperCAmelCase )
else:
snake_case__ : Optional[int] = model_state_dict.pop(UpperCAmelCase )
snake_case__ : Tuple = BioGptConfig.from_pretrained(UpperCAmelCase )
snake_case__ : Optional[int] = BioGptForCausalLM(UpperCAmelCase )
# check that it loads ok
model_new.load_state_dict(UpperCAmelCase )
# save
snake_case__ : Dict = os.path.join(UpperCAmelCase , UpperCAmelCase )
print(f"""Generating {pytorch_weights_dump_path}""" )
torch.save(UpperCAmelCase , UpperCAmelCase )
print("""Conversion is done!""" )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--biogpt_checkpoint_path',
default=None,
type=str,
required=True,
help=(
'Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,'
' bpecodes, etc.'
),
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
lowerCAmelCase__ = parser.parse_args()
convert_biogpt_checkpoint_to_pytorch(args.biogpt_checkpoint_path, args.pytorch_dump_folder_path)
| 172
| 0
|
'''simple docstring'''
def UpperCamelCase__ ( _lowercase : list[int] , _lowercase : int ) -> bool:
__UpperCAmelCase: int = len(_lowercase )
__UpperCAmelCase: Optional[int] = [[False] * (required_sum + 1) for _ in range(arr_len + 1 )]
# for each arr value, a sum of zero(0) can be formed by not taking any element
# hence True/1
for i in range(arr_len + 1 ):
__UpperCAmelCase: List[str] = True
# sum is not zero and set is empty then false
for i in range(1 , required_sum + 1 ):
__UpperCAmelCase: Tuple = False
for i in range(1 , arr_len + 1 ):
for j in range(1 , required_sum + 1 ):
if arr[i - 1] > j:
__UpperCAmelCase: Union[str, Any] = subset[i - 1][j]
if arr[i - 1] <= j:
__UpperCAmelCase: Union[str, Any] = subset[i - 1][j] or subset[i - 1][j - arr[i - 1]]
return subset[arr_len][required_sum]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 523
|
'''simple docstring'''
class a :
"""simple docstring"""
def __init__( self , snake_case_ , snake_case_ , snake_case_ ):
'''simple docstring'''
__UpperCAmelCase: List[Any] = None
__UpperCAmelCase: Tuple = None
__UpperCAmelCase: List[Any] = graph
self._normalize_graph(snake_case_ , snake_case_ )
__UpperCAmelCase: Union[str, Any] = len(snake_case_ )
__UpperCAmelCase: List[str] = None
def lowercase_ ( self , snake_case_ , snake_case_ ):
'''simple docstring'''
if sources is int:
__UpperCAmelCase: List[Any] = [sources]
if sinks is int:
__UpperCAmelCase: Optional[Any] = [sinks]
if len(snake_case_ ) == 0 or len(snake_case_ ) == 0:
return
__UpperCAmelCase: Any = sources[0]
__UpperCAmelCase: int = sinks[0]
# make fake vertex if there are more
# than one source or sink
if len(snake_case_ ) > 1 or len(snake_case_ ) > 1:
__UpperCAmelCase: Union[str, Any] = 0
for i in sources:
max_input_flow += sum(self.graph[i] )
__UpperCAmelCase: List[Any] = len(self.graph ) + 1
for room in self.graph:
room.insert(0 , 0 )
self.graph.insert(0 , [0] * size )
for i in sources:
__UpperCAmelCase: Tuple = max_input_flow
__UpperCAmelCase: Any = 0
__UpperCAmelCase: Tuple = len(self.graph ) + 1
for room in self.graph:
room.append(0 )
self.graph.append([0] * size )
for i in sinks:
__UpperCAmelCase: Tuple = max_input_flow
__UpperCAmelCase: Tuple = size - 1
def lowercase_ ( self ):
'''simple docstring'''
if self.maximum_flow_algorithm is None:
raise Exception("""You need to set maximum flow algorithm before.""" )
if self.source_index is None or self.sink_index is None:
return 0
self.maximum_flow_algorithm.execute()
return self.maximum_flow_algorithm.getMaximumFlow()
def lowercase_ ( self , snake_case_ ):
'''simple docstring'''
__UpperCAmelCase: Optional[Any] = algorithm(self )
class a :
"""simple docstring"""
def __init__( self , snake_case_ ):
'''simple docstring'''
__UpperCAmelCase: Tuple = flow_network
__UpperCAmelCase: Dict = flow_network.verticesCount
__UpperCAmelCase: List[Any] = flow_network.sourceIndex
__UpperCAmelCase: int = flow_network.sinkIndex
# it's just a reference, so you shouldn't change
# it in your algorithms, use deep copy before doing that
__UpperCAmelCase: Dict = flow_network.graph
__UpperCAmelCase: Optional[int] = False
def lowercase_ ( self ):
'''simple docstring'''
if not self.executed:
self._algorithm()
__UpperCAmelCase: str = True
def lowercase_ ( self ):
'''simple docstring'''
pass
class a ( __lowerCAmelCase ):
"""simple docstring"""
def __init__( self , snake_case_ ):
'''simple docstring'''
super().__init__(snake_case_ )
# use this to save your result
__UpperCAmelCase: int = -1
def lowercase_ ( self ):
'''simple docstring'''
if not self.executed:
raise Exception("""You should execute algorithm before using its result!""" )
return self.maximum_flow
class a ( __lowerCAmelCase ):
"""simple docstring"""
def __init__( self , snake_case_ ):
'''simple docstring'''
super().__init__(snake_case_ )
__UpperCAmelCase: Optional[int] = [[0] * self.verticies_count for i in range(self.verticies_count )]
__UpperCAmelCase: Union[str, Any] = [0] * self.verticies_count
__UpperCAmelCase: Tuple = [0] * self.verticies_count
def lowercase_ ( self ):
'''simple docstring'''
__UpperCAmelCase: Dict = self.verticies_count
# push some substance to graph
for nextvertex_index, bandwidth in enumerate(self.graph[self.source_index] ):
self.preflow[self.source_index][nextvertex_index] += bandwidth
self.preflow[nextvertex_index][self.source_index] -= bandwidth
self.excesses[nextvertex_index] += bandwidth
# Relabel-to-front selection rule
__UpperCAmelCase: int = [
i
for i in range(self.verticies_count )
if i != self.source_index and i != self.sink_index
]
# move through list
__UpperCAmelCase: List[Any] = 0
while i < len(snake_case_ ):
__UpperCAmelCase: Optional[int] = vertices_list[i]
__UpperCAmelCase: Optional[int] = self.heights[vertex_index]
self.process_vertex(snake_case_ )
if self.heights[vertex_index] > previous_height:
# if it was relabeled, swap elements
# and start from 0 index
vertices_list.insert(0 , vertices_list.pop(snake_case_ ) )
__UpperCAmelCase: Union[str, Any] = 0
else:
i += 1
__UpperCAmelCase: Tuple = sum(self.preflow[self.source_index] )
def lowercase_ ( self , snake_case_ ):
'''simple docstring'''
while self.excesses[vertex_index] > 0:
for neighbour_index in range(self.verticies_count ):
# if it's neighbour and current vertex is higher
if (
self.graph[vertex_index][neighbour_index]
- self.preflow[vertex_index][neighbour_index]
> 0
and self.heights[vertex_index] > self.heights[neighbour_index]
):
self.push(snake_case_ , snake_case_ )
self.relabel(snake_case_ )
def lowercase_ ( self , snake_case_ , snake_case_ ):
'''simple docstring'''
__UpperCAmelCase: Union[str, Any] = min(
self.excesses[from_index] , self.graph[from_index][to_index] - self.preflow[from_index][to_index] , )
self.preflow[from_index][to_index] += preflow_delta
self.preflow[to_index][from_index] -= preflow_delta
self.excesses[from_index] -= preflow_delta
self.excesses[to_index] += preflow_delta
def lowercase_ ( self , snake_case_ ):
'''simple docstring'''
__UpperCAmelCase: Optional[Any] = None
for to_index in range(self.verticies_count ):
if (
self.graph[vertex_index][to_index]
- self.preflow[vertex_index][to_index]
> 0
) and (min_height is None or self.heights[to_index] < min_height):
__UpperCAmelCase: str = self.heights[to_index]
if min_height is not None:
__UpperCAmelCase: Optional[Any] = min_height + 1
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = [0]
SCREAMING_SNAKE_CASE_ = [3]
# graph = [
# [0, 0, 4, 6, 0, 0],
# [0, 0, 5, 2, 0, 0],
# [0, 0, 0, 0, 4, 4],
# [0, 0, 0, 0, 6, 6],
# [0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0],
# ]
SCREAMING_SNAKE_CASE_ = [[0, 7, 0, 0], [0, 0, 6, 0], [0, 0, 0, 8], [9, 0, 0, 0]]
# prepare our network
SCREAMING_SNAKE_CASE_ = FlowNetwork(graph, entrances, exits)
# set algorithm
flow_network.set_maximum_flow_algorithm(PushRelabelExecutor)
# and calculate
SCREAMING_SNAKE_CASE_ = flow_network.find_maximum_flow()
print(F"""maximum flow is {maximum_flow}""")
| 523
| 1
|
'''simple docstring'''
import inspect
import unittest
from transformers import RegNetConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from transformers.utils import cached_property, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.models.regnet.modeling_flax_regnet import FlaxRegNetForImageClassification, FlaxRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __init__( self : List[Any] , a__ : str , a__ : str=3 , a__ : Any=32 , a__ : List[Any]=3 , a__ : Union[str, Any]=10 , a__ : Any=[10, 20, 30, 40] , a__ : int=[1, 1, 2, 1] , a__ : List[str]=True , a__ : Optional[int]=True , a__ : Any="relu" , a__ : Union[str, Any]=3 , a__ : Optional[Any]=None , ):
__magic_name__ = parent
__magic_name__ = batch_size
__magic_name__ = image_size
__magic_name__ = num_channels
__magic_name__ = embeddings_size
__magic_name__ = hidden_sizes
__magic_name__ = depths
__magic_name__ = is_training
__magic_name__ = use_labels
__magic_name__ = hidden_act
__magic_name__ = num_labels
__magic_name__ = scope
__magic_name__ = len(a__ )
def snake_case__ ( self : Optional[Any] ):
__magic_name__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__magic_name__ = self.get_config()
return config, pixel_values
def snake_case__ ( self : List[str] ):
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def snake_case__ ( self : str , a__ : List[Any] , a__ : Dict ):
__magic_name__ = FlaxRegNetModel(config=a__ )
__magic_name__ = model(a__ )
# Output shape (b, c, h, w)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def snake_case__ ( self : Optional[int] , a__ : Optional[int] , a__ : Any ):
__magic_name__ = self.num_labels
__magic_name__ = FlaxRegNetForImageClassification(config=a__ )
__magic_name__ = model(a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def snake_case__ ( self : Dict ):
__magic_name__ = self.prepare_config_and_inputs()
__magic_name__ , __magic_name__ = config_and_inputs
__magic_name__ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_flax
class _SCREAMING_SNAKE_CASE ( __a ,unittest.TestCase ):
__SCREAMING_SNAKE_CASE :Union[str, Any] = (FlaxRegNetModel, FlaxRegNetForImageClassification) if is_flax_available() else ()
__SCREAMING_SNAKE_CASE :str = False
__SCREAMING_SNAKE_CASE :List[str] = False
__SCREAMING_SNAKE_CASE :Any = False
def snake_case__ ( self : int ):
__magic_name__ = FlaxRegNetModelTester(self )
__magic_name__ = ConfigTester(self , config_class=a__ , has_text_modality=a__ )
def snake_case__ ( self : Tuple ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def snake_case__ ( self : Union[str, Any] ):
return
def snake_case__ ( self : List[str] ):
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a__ )
def snake_case__ ( self : Tuple ):
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*a__ )
@unittest.skip(reason='''RegNet does not use inputs_embeds''' )
def snake_case__ ( self : str ):
pass
@unittest.skip(reason='''RegNet does not support input and output embeddings''' )
def snake_case__ ( self : Optional[Any] ):
pass
def snake_case__ ( self : Tuple ):
__magic_name__ , __magic_name__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__magic_name__ = model_class(a__ )
__magic_name__ = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__magic_name__ = [*signature.parameters.keys()]
__magic_name__ = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , a__ )
def snake_case__ ( self : Tuple ):
def check_hidden_states_output(a__ : str , a__ : Union[str, Any] , a__ : Tuple ):
__magic_name__ = model_class(a__ )
__magic_name__ = model(**self._prepare_for_class(a__ , a__ ) )
__magic_name__ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__magic_name__ = self.model_tester.num_stages
self.assertEqual(len(a__ ) , expected_num_stages + 1 )
__magic_name__ , __magic_name__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__magic_name__ = True
check_hidden_states_output(a__ , a__ , a__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__magic_name__ = True
check_hidden_states_output(a__ , a__ , a__ )
def snake_case__ ( self : Union[str, Any] ):
__magic_name__ , __magic_name__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__magic_name__ = self._prepare_for_class(a__ , a__ )
__magic_name__ = model_class(a__ )
@jax.jit
def model_jitted(a__ : int , **a__ : Optional[Any] ):
return model(pixel_values=a__ , **a__ )
with self.subTest('''JIT Enabled''' ):
__magic_name__ = model_jitted(**a__ ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
__magic_name__ = model_jitted(**a__ ).to_tuple()
self.assertEqual(len(a__ ) , len(a__ ) )
for jitted_output, output in zip(a__ , a__ ):
self.assertEqual(jitted_output.shape , output.shape )
def UpperCamelCase ( ) -> Any:
'''simple docstring'''
__magic_name__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_flax
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@cached_property
def snake_case__ ( self : Tuple ):
return AutoImageProcessor.from_pretrained('''facebook/regnet-y-040''' ) if is_vision_available() else None
@slow
def snake_case__ ( self : Tuple ):
__magic_name__ = FlaxRegNetForImageClassification.from_pretrained('''facebook/regnet-y-040''' )
__magic_name__ = self.default_image_processor
__magic_name__ = prepare_img()
__magic_name__ = image_processor(images=a__ , return_tensors='''np''' )
__magic_name__ = model(**a__ )
# verify the logits
__magic_name__ = (1, 1000)
self.assertEqual(outputs.logits.shape , a__ )
__magic_name__ = jnp.array([-0.4_180, -1.5_051, -3.4_836] )
self.assertTrue(jnp.allclose(outputs.logits[0, :3] , a__ , atol=1E-4 ) )
| 703
|
'''simple docstring'''
from argparse import ArgumentParser, Namespace
from ..utils import logging
from . import BaseTransformersCLICommand
def UpperCamelCase ( a ) -> Optional[int]:
'''simple docstring'''
return ConvertCommand(
args.model_type , args.tf_checkpoint , args.pytorch_dump_output , args.config , args.finetuning_task_name )
_lowerCAmelCase = "\ntransformers can only be used from the commandline to convert TensorFlow models in PyTorch, In that case, it requires\nTensorFlow to be installed. Please see https://www.tensorflow.org/install/ for installation instructions.\n"
class _SCREAMING_SNAKE_CASE ( __a ):
@staticmethod
def snake_case__ ( a__ : ArgumentParser ):
__magic_name__ = parser.add_parser(
'''convert''' , help='''CLI tool to run convert model from original author checkpoints to Transformers PyTorch checkpoints.''' , )
train_parser.add_argument('''--model_type''' , type=a__ , required=a__ , help='''Model\'s type.''' )
train_parser.add_argument(
'''--tf_checkpoint''' , type=a__ , required=a__ , help='''TensorFlow checkpoint path or folder.''' )
train_parser.add_argument(
'''--pytorch_dump_output''' , type=a__ , required=a__ , help='''Path to the PyTorch saved model output.''' )
train_parser.add_argument('''--config''' , type=a__ , default='''''' , help='''Configuration file path or folder.''' )
train_parser.add_argument(
'''--finetuning_task_name''' , type=a__ , default=a__ , help='''Optional fine-tuning task name if the TF model was a finetuned model.''' , )
train_parser.set_defaults(func=a__ )
def __init__( self : List[str] , a__ : str , a__ : str , a__ : str , a__ : str , a__ : str , *a__ : Optional[Any] , ):
__magic_name__ = logging.get_logger('''transformers-cli/converting''' )
self._logger.info(F'''Loading model {model_type}''' )
__magic_name__ = model_type
__magic_name__ = tf_checkpoint
__magic_name__ = pytorch_dump_output
__magic_name__ = config
__magic_name__ = finetuning_task_name
def snake_case__ ( self : Optional[Any] ):
if self._model_type == "albert":
try:
from ..models.albert.convert_albert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(a__ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "bert":
try:
from ..models.bert.convert_bert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(a__ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "funnel":
try:
from ..models.funnel.convert_funnel_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(a__ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "t5":
try:
from ..models.ta.convert_ta_original_tf_checkpoint_to_pytorch import convert_tf_checkpoint_to_pytorch
except ImportError:
raise ImportError(a__ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "gpt":
from ..models.openai.convert_openai_original_tf_checkpoint_to_pytorch import (
convert_openai_checkpoint_to_pytorch,
)
convert_openai_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "transfo_xl":
try:
from ..models.transfo_xl.convert_transfo_xl_original_tf_checkpoint_to_pytorch import (
convert_transfo_xl_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(a__ )
if "ckpt" in self._tf_checkpoint.lower():
__magic_name__ = self._tf_checkpoint
__magic_name__ = ''''''
else:
__magic_name__ = self._tf_checkpoint
__magic_name__ = ''''''
convert_transfo_xl_checkpoint_to_pytorch(
a__ , self._config , self._pytorch_dump_output , a__ )
elif self._model_type == "gpt2":
try:
from ..models.gpta.convert_gpta_original_tf_checkpoint_to_pytorch import (
convert_gpta_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(a__ )
convert_gpta_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "xlnet":
try:
from ..models.xlnet.convert_xlnet_original_tf_checkpoint_to_pytorch import (
convert_xlnet_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(a__ )
convert_xlnet_checkpoint_to_pytorch(
self._tf_checkpoint , self._config , self._pytorch_dump_output , self._finetuning_task_name )
elif self._model_type == "xlm":
from ..models.xlm.convert_xlm_original_pytorch_checkpoint_to_pytorch import (
convert_xlm_checkpoint_to_pytorch,
)
convert_xlm_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "lxmert":
from ..models.lxmert.convert_lxmert_original_tf_checkpoint_to_pytorch import (
convert_lxmert_checkpoint_to_pytorch,
)
convert_lxmert_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "rembert":
from ..models.rembert.convert_rembert_tf_checkpoint_to_pytorch import (
convert_rembert_tf_checkpoint_to_pytorch,
)
convert_rembert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
else:
raise ValueError(
'''--model_type should be selected in the list [bert, gpt, gpt2, t5, transfo_xl, xlnet, xlm, lxmert]''' )
| 245
| 0
|
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import is_speech_available, is_vision_available
from transformers.testing_utils import require_torch
if is_vision_available():
from transformers import TvltImageProcessor
if is_speech_available():
from transformers import TvltFeatureExtractor
from transformers import TvltProcessor
@require_torch
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Optional[int] = "ZinengTang/tvlt-base"
lowerCamelCase : List[str] = tempfile.mkdtemp()
def _snake_case ( self , **__A ):
"""simple docstring"""
return TvltImageProcessor.from_pretrained(self.checkpoint , **__A )
def _snake_case ( self , **__A ):
"""simple docstring"""
return TvltFeatureExtractor.from_pretrained(self.checkpoint , **__A )
def _snake_case ( self ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Tuple = self.get_image_processor()
lowerCamelCase : Tuple = self.get_feature_extractor()
lowerCamelCase : Any = TvltProcessor(image_processor=__A , feature_extractor=__A )
processor.save_pretrained(self.tmpdirname )
lowerCamelCase : List[Any] = TvltProcessor.from_pretrained(self.tmpdirname )
self.assertIsInstance(processor.feature_extractor , __A )
self.assertIsInstance(processor.image_processor , __A )
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Any = self.get_image_processor()
lowerCamelCase : str = self.get_feature_extractor()
lowerCamelCase : str = TvltProcessor(image_processor=__A , feature_extractor=__A )
lowerCamelCase : List[Any] = np.ones([1_2000] )
lowerCamelCase : Dict = feature_extractor(__A , return_tensors="np" )
lowerCamelCase : Any = processor(audio=__A , return_tensors="np" )
for key in audio_dict.keys():
self.assertAlmostEqual(audio_dict[key].sum() , input_processor[key].sum() , delta=1e-2 )
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Any = self.get_image_processor()
lowerCamelCase : Any = self.get_feature_extractor()
lowerCamelCase : List[Any] = TvltProcessor(image_processor=__A , feature_extractor=__A )
lowerCamelCase : Any = np.ones([3, 224, 224] )
lowerCamelCase : Any = image_processor(__A , return_tensors="np" )
lowerCamelCase : int = processor(images=__A , return_tensors="np" )
for key in image_dict.keys():
self.assertAlmostEqual(image_dict[key].sum() , input_processor[key].sum() , delta=1e-2 )
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : str = self.get_image_processor()
lowerCamelCase : List[Any] = self.get_feature_extractor()
lowerCamelCase : Union[str, Any] = TvltProcessor(image_processor=__A , feature_extractor=__A )
lowerCamelCase : str = np.ones([1_2000] )
lowerCamelCase : int = np.ones([3, 224, 224] )
lowerCamelCase : Optional[int] = processor(audio=__A , images=__A )
self.assertListEqual(list(inputs.keys() ) , ["audio_values", "audio_mask", "pixel_values", "pixel_mask"] )
# test if it raises when no input is passed
with pytest.raises(__A ):
processor()
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : List[str] = self.get_image_processor()
lowerCamelCase : Any = self.get_feature_extractor()
lowerCamelCase : str = TvltProcessor(image_processor=__A , feature_extractor=__A )
self.assertListEqual(
processor.model_input_names , image_processor.model_input_names + feature_extractor.model_input_names , msg="`processor` and `image_processor`+`feature_extractor` model input names do not match" , )
| 340
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DetrConfig, DetrForObjectDetection, DetrForSegmentation, DetrImageProcessor, ResNetConfig
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCamelCase : Tuple = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE ( snake_case_ : Optional[Any] ):
# initialize config
if "resnet-50" in model_name:
snake_case__ : List[Any] = ResNetConfig.from_pretrained("microsoft/resnet-50" )
elif "resnet-101" in model_name:
snake_case__ : Dict = ResNetConfig.from_pretrained("microsoft/resnet-101" )
else:
raise ValueError("Model name should include either resnet50 or resnet101" )
snake_case__ : str = DetrConfig(use_timm_backbone=snake_case_ , backbone_config=snake_case_ )
# set label attributes
snake_case__ : Optional[int] = "panoptic" in model_name
if is_panoptic:
snake_case__ : Any = 250
else:
snake_case__ : str = 91
snake_case__ : str = "huggingface/label-files"
snake_case__ : Union[str, Any] = "coco-detection-id2label.json"
snake_case__ : List[Any] = json.load(open(hf_hub_download(snake_case_ , snake_case_ , repo_type="dataset" ) , "r" ) )
snake_case__ : Union[str, Any] = {int(snake_case_ ): v for k, v in idalabel.items()}
snake_case__ : Dict = idalabel
snake_case__ : str = {v: k for k, v in idalabel.items()}
return config, is_panoptic
def SCREAMING_SNAKE_CASE ( snake_case_ : List[Any] ):
# here we list all keys to be renamed (original name on the left, our name on the right)
snake_case__ : Dict = []
# stem
# fmt: off
rename_keys.append(("backbone.0.body.conv1.weight", "backbone.conv_encoder.model.embedder.embedder.convolution.weight") )
rename_keys.append(("backbone.0.body.bn1.weight", "backbone.conv_encoder.model.embedder.embedder.normalization.weight") )
rename_keys.append(("backbone.0.body.bn1.bias", "backbone.conv_encoder.model.embedder.embedder.normalization.bias") )
rename_keys.append(("backbone.0.body.bn1.running_mean", "backbone.conv_encoder.model.embedder.embedder.normalization.running_mean") )
rename_keys.append(("backbone.0.body.bn1.running_var", "backbone.conv_encoder.model.embedder.embedder.normalization.running_var") )
# stages
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
# shortcut
if layer_idx == 0:
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.0.weight''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.convolution.weight''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.weight''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.weight''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.bias''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.bias''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_mean''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_mean''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_var''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_var''',
) )
# 3 convs
for i in range(3 ):
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.conv{i+1}.weight''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.convolution.weight''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.weight''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.weight''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.bias''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.bias''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_mean''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_mean''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_var''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_var''',
) )
# fmt: on
for i in range(config.encoder_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(
F'''transformer.encoder.layers.{i}.self_attn.out_proj.weight''',
F'''encoder.layers.{i}.self_attn.out_proj.weight''',
) )
rename_keys.append(
(F'''transformer.encoder.layers.{i}.self_attn.out_proj.bias''', F'''encoder.layers.{i}.self_attn.out_proj.bias''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.weight''', F'''encoder.layers.{i}.fc1.weight''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.bias''', F'''encoder.layers.{i}.fc1.bias''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.weight''', F'''encoder.layers.{i}.fc2.weight''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.bias''', F'''encoder.layers.{i}.fc2.bias''') )
rename_keys.append(
(F'''transformer.encoder.layers.{i}.norm1.weight''', F'''encoder.layers.{i}.self_attn_layer_norm.weight''') )
rename_keys.append(
(F'''transformer.encoder.layers.{i}.norm1.bias''', F'''encoder.layers.{i}.self_attn_layer_norm.bias''') )
rename_keys.append(
(F'''transformer.encoder.layers.{i}.norm2.weight''', F'''encoder.layers.{i}.final_layer_norm.weight''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.bias''', F'''encoder.layers.{i}.final_layer_norm.bias''') )
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''',
F'''decoder.layers.{i}.self_attn.out_proj.weight''',
) )
rename_keys.append(
(F'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', F'''decoder.layers.{i}.self_attn.out_proj.bias''') )
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.multihead_attn.out_proj.weight''',
F'''decoder.layers.{i}.encoder_attn.out_proj.weight''',
) )
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.multihead_attn.out_proj.bias''',
F'''decoder.layers.{i}.encoder_attn.out_proj.bias''',
) )
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.weight''', F'''decoder.layers.{i}.fc1.weight''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.bias''', F'''decoder.layers.{i}.fc1.bias''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.weight''', F'''decoder.layers.{i}.fc2.weight''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.bias''', F'''decoder.layers.{i}.fc2.bias''') )
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm1.weight''', F'''decoder.layers.{i}.self_attn_layer_norm.weight''') )
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm1.bias''', F'''decoder.layers.{i}.self_attn_layer_norm.bias''') )
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm2.weight''', F'''decoder.layers.{i}.encoder_attn_layer_norm.weight''') )
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm2.bias''', F'''decoder.layers.{i}.encoder_attn_layer_norm.bias''') )
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm3.weight''', F'''decoder.layers.{i}.final_layer_norm.weight''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.bias''', F'''decoder.layers.{i}.final_layer_norm.bias''') )
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
("input_proj.weight", "input_projection.weight"),
("input_proj.bias", "input_projection.bias"),
("query_embed.weight", "query_position_embeddings.weight"),
("transformer.decoder.norm.weight", "decoder.layernorm.weight"),
("transformer.decoder.norm.bias", "decoder.layernorm.bias"),
("class_embed.weight", "class_labels_classifier.weight"),
("class_embed.bias", "class_labels_classifier.bias"),
("bbox_embed.layers.0.weight", "bbox_predictor.layers.0.weight"),
("bbox_embed.layers.0.bias", "bbox_predictor.layers.0.bias"),
("bbox_embed.layers.1.weight", "bbox_predictor.layers.1.weight"),
("bbox_embed.layers.1.bias", "bbox_predictor.layers.1.bias"),
("bbox_embed.layers.2.weight", "bbox_predictor.layers.2.weight"),
("bbox_embed.layers.2.bias", "bbox_predictor.layers.2.bias"),
] )
return rename_keys
def SCREAMING_SNAKE_CASE ( snake_case_ : List[Any] , snake_case_ : Tuple , snake_case_ : str ):
snake_case__ : str = state_dict.pop(snake_case_ )
snake_case__ : str = val
def SCREAMING_SNAKE_CASE ( snake_case_ : Dict , snake_case_ : str=False ):
snake_case__ : List[Any] = ""
if is_panoptic:
snake_case__ : Union[str, Any] = "detr."
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
snake_case__ : List[Any] = state_dict.pop(F'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight''' )
snake_case__ : List[Any] = state_dict.pop(F'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
snake_case__ : str = in_proj_weight[:256, :]
snake_case__ : str = in_proj_bias[:256]
snake_case__ : Dict = in_proj_weight[256:512, :]
snake_case__ : List[str] = in_proj_bias[256:512]
snake_case__ : int = in_proj_weight[-256:, :]
snake_case__ : List[str] = in_proj_bias[-256:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
snake_case__ : Tuple = state_dict.pop(F'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight''' )
snake_case__ : List[str] = state_dict.pop(F'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
snake_case__ : Dict = in_proj_weight[:256, :]
snake_case__ : Union[str, Any] = in_proj_bias[:256]
snake_case__ : Optional[int] = in_proj_weight[256:512, :]
snake_case__ : Dict = in_proj_bias[256:512]
snake_case__ : int = in_proj_weight[-256:, :]
snake_case__ : Dict = in_proj_bias[-256:]
# read in weights + bias of input projection layer of cross-attention
snake_case__ : Dict = state_dict.pop(
F'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight''' )
snake_case__ : Dict = state_dict.pop(F'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) of cross-attention to the state dict
snake_case__ : Tuple = in_proj_weight_cross_attn[:256, :]
snake_case__ : List[str] = in_proj_bias_cross_attn[:256]
snake_case__ : Dict = in_proj_weight_cross_attn[256:512, :]
snake_case__ : Optional[Any] = in_proj_bias_cross_attn[256:512]
snake_case__ : Optional[int] = in_proj_weight_cross_attn[-256:, :]
snake_case__ : Optional[int] = in_proj_bias_cross_attn[-256:]
def SCREAMING_SNAKE_CASE ( ):
snake_case__ : Dict = "http://images.cocodataset.org/val2017/000000039769.jpg"
snake_case__ : List[Any] = Image.open(requests.get(snake_case_ , stream=snake_case_ ).raw )
return im
@torch.no_grad()
def SCREAMING_SNAKE_CASE ( snake_case_ : Union[str, Any] , snake_case_ : Dict=None , snake_case_ : str=False ):
snake_case__, snake_case__ : Optional[int] = get_detr_config(snake_case_ )
# load original model from torch hub
snake_case__ : Tuple = {
"detr-resnet-50": "detr_resnet50",
"detr-resnet-101": "detr_resnet101",
}
logger.info(F'''Converting model {model_name}...''' )
snake_case__ : int = torch.hub.load("facebookresearch/detr" , model_name_to_original_name[model_name] , pretrained=snake_case_ ).eval()
snake_case__ : List[Any] = detr.state_dict()
# rename keys
for src, dest in create_rename_keys(snake_case_ ):
if is_panoptic:
snake_case__ : Any = "detr." + src
rename_key(snake_case_ , snake_case_ , snake_case_ )
# query, key and value matrices need special treatment
read_in_q_k_v(snake_case_ , is_panoptic=snake_case_ )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
snake_case__ : Union[str, Any] = "detr.model." if is_panoptic else "model."
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith("detr" )
and not key.startswith("class_labels_classifier" )
and not key.startswith("bbox_predictor" )
):
snake_case__ : Tuple = state_dict.pop(snake_case_ )
snake_case__ : Optional[Any] = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
snake_case__ : Optional[Any] = state_dict.pop(snake_case_ )
snake_case__ : Optional[int] = val
elif key.startswith("bbox_attention" ) or key.startswith("mask_head" ):
continue
else:
snake_case__ : str = state_dict.pop(snake_case_ )
snake_case__ : Dict = val
else:
if not key.startswith("class_labels_classifier" ) and not key.startswith("bbox_predictor" ):
snake_case__ : Any = state_dict.pop(snake_case_ )
snake_case__ : Any = val
# finally, create HuggingFace model and load state dict
snake_case__ : str = DetrForSegmentation(snake_case_ ) if is_panoptic else DetrForObjectDetection(snake_case_ )
model.load_state_dict(snake_case_ )
model.eval()
# verify our conversion on an image
snake_case__ : Tuple = "coco_panoptic" if is_panoptic else "coco_detection"
snake_case__ : List[Any] = DetrImageProcessor(format=snake_case_ )
snake_case__ : str = processor(images=prepare_img() , return_tensors="pt" )
snake_case__ : List[str] = encoding["pixel_values"]
snake_case__ : Union[str, Any] = detr(snake_case_ )
snake_case__ : str = model(snake_case_ )
assert torch.allclose(outputs.logits , original_outputs["pred_logits"] , atol=1E-3 )
assert torch.allclose(outputs.pred_boxes , original_outputs["pred_boxes"] , atol=1E-3 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs["pred_masks"] , atol=1E-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(F'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(snake_case_ ).mkdir(exist_ok=snake_case_ )
model.save_pretrained(snake_case_ )
processor.save_pretrained(snake_case_ )
if push_to_hub:
# Upload model and image processor to the hub
logger.info("Uploading PyTorch model and image processor to the hub..." )
model.push_to_hub(F'''nielsr/{model_name}''' )
processor.push_to_hub(F'''nielsr/{model_name}''' )
if __name__ == "__main__":
__lowerCamelCase : List[Any] = argparse.ArgumentParser()
parser.add_argument(
"""--model_name""",
default="""detr-resnet-50""",
type=str,
choices=["""detr-resnet-50""", """detr-resnet-101"""],
help="""Name of the DETR model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Whether to push the model to the hub or not.""")
__lowerCamelCase : Dict = parser.parse_args()
convert_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 297
| 0
|
"""simple docstring"""
import unittest
from diffusers import FlaxAutoencoderKL
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax
from .test_modeling_common_flax import FlaxModelTesterMixin
if is_flax_available():
import jax
@require_flax
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
a__ = FlaxAutoencoderKL
@property
def lowerCamelCase_ ( self) -> List[Any]:
'''simple docstring'''
a__: Any = 4
a__: Union[str, Any] = 3
a__: Union[str, Any] = (32, 32)
a__: Optional[int] = jax.random.PRNGKey(0)
a__: List[Any] = jax.random.uniform(UpperCamelCase_ , ((batch_size, num_channels) + sizes))
return {"sample": image, "prng_key": prng_key}
def lowerCamelCase_ ( self) -> Tuple:
'''simple docstring'''
a__: Dict = {
'block_out_channels': [32, 64],
'in_channels': 3,
'out_channels': 3,
'down_block_types': ['DownEncoderBlock2D', 'DownEncoderBlock2D'],
'up_block_types': ['UpDecoderBlock2D', 'UpDecoderBlock2D'],
'latent_channels': 4,
}
a__: List[str] = self.dummy_input
return init_dict, inputs_dict
| 714
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowercase__ = {
'configuration_distilbert': [
'DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'DistilBertConfig',
'DistilBertOnnxConfig',
],
'tokenization_distilbert': ['DistilBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = ['DistilBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = [
'DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'DistilBertForMaskedLM',
'DistilBertForMultipleChoice',
'DistilBertForQuestionAnswering',
'DistilBertForSequenceClassification',
'DistilBertForTokenClassification',
'DistilBertModel',
'DistilBertPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = [
'TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFDistilBertForMaskedLM',
'TFDistilBertForMultipleChoice',
'TFDistilBertForQuestionAnswering',
'TFDistilBertForSequenceClassification',
'TFDistilBertForTokenClassification',
'TFDistilBertMainLayer',
'TFDistilBertModel',
'TFDistilBertPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = [
'FlaxDistilBertForMaskedLM',
'FlaxDistilBertForMultipleChoice',
'FlaxDistilBertForQuestionAnswering',
'FlaxDistilBertForSequenceClassification',
'FlaxDistilBertForTokenClassification',
'FlaxDistilBertModel',
'FlaxDistilBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_distilbert import (
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DistilBertConfig,
DistilBertOnnxConfig,
)
from .tokenization_distilbert import DistilBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_distilbert_fast import DistilBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_distilbert import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
DistilBertPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertMainLayer,
TFDistilBertModel,
TFDistilBertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
FlaxDistilBertPreTrainedModel,
)
else:
import sys
lowercase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 217
| 0
|
from ..utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_pt_objects import * # noqa F403
else:
from .scheduling_consistency_models import CMStochasticIterativeScheduler
from .scheduling_ddim import DDIMScheduler
from .scheduling_ddim_inverse import DDIMInverseScheduler
from .scheduling_ddim_parallel import DDIMParallelScheduler
from .scheduling_ddpm import DDPMScheduler
from .scheduling_ddpm_parallel import DDPMParallelScheduler
from .scheduling_deis_multistep import DEISMultistepScheduler
from .scheduling_dpmsolver_multistep import DPMSolverMultistepScheduler
from .scheduling_dpmsolver_multistep_inverse import DPMSolverMultistepInverseScheduler
from .scheduling_dpmsolver_singlestep import DPMSolverSinglestepScheduler
from .scheduling_euler_ancestral_discrete import EulerAncestralDiscreteScheduler
from .scheduling_euler_discrete import EulerDiscreteScheduler
from .scheduling_heun_discrete import HeunDiscreteScheduler
from .scheduling_ipndm import IPNDMScheduler
from .scheduling_k_dpm_2_ancestral_discrete import KDPMaAncestralDiscreteScheduler
from .scheduling_k_dpm_2_discrete import KDPMaDiscreteScheduler
from .scheduling_karras_ve import KarrasVeScheduler
from .scheduling_pndm import PNDMScheduler
from .scheduling_repaint import RePaintScheduler
from .scheduling_sde_ve import ScoreSdeVeScheduler
from .scheduling_sde_vp import ScoreSdeVpScheduler
from .scheduling_unclip import UnCLIPScheduler
from .scheduling_unipc_multistep import UniPCMultistepScheduler
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin
from .scheduling_vq_diffusion import VQDiffusionScheduler
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_flax_objects import * # noqa F403
else:
from .scheduling_ddim_flax import FlaxDDIMScheduler
from .scheduling_ddpm_flax import FlaxDDPMScheduler
from .scheduling_dpmsolver_multistep_flax import FlaxDPMSolverMultistepScheduler
from .scheduling_karras_ve_flax import FlaxKarrasVeScheduler
from .scheduling_lms_discrete_flax import FlaxLMSDiscreteScheduler
from .scheduling_pndm_flax import FlaxPNDMScheduler
from .scheduling_sde_ve_flax import FlaxScoreSdeVeScheduler
from .scheduling_utils_flax import (
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
broadcast_to_shape_from_left,
)
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .scheduling_lms_discrete import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .scheduling_dpmsolver_sde import DPMSolverSDEScheduler
| 63
|
import argparse
import torch
from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
a : Dict = logging.get_logger(__name__)
a : Tuple = [
["attention", "attn"],
["encoder_attention", "encoder_attn"],
["q_lin", "q_proj"],
["k_lin", "k_proj"],
["v_lin", "v_proj"],
["out_lin", "out_proj"],
["norm_embeddings", "layernorm_embedding"],
["position_embeddings", "embed_positions"],
["embeddings", "embed_tokens"],
["ffn.lin", "fc"],
]
def lowerCamelCase__ ( __lowerCamelCase : Dict ):
if k == "embeddings.weight":
return "shared.weight"
for parlai_name, hf_name in PATTERNS:
__UpperCAmelCase : Union[str, Any] = k.replace(__lowerCamelCase , __lowerCamelCase )
if k.startswith("""encoder""" ):
__UpperCAmelCase : List[str] = k.replace(""".attn""" , """.self_attn""" )
__UpperCAmelCase : Optional[Any] = k.replace("""norm1""" , """self_attn_layer_norm""" )
__UpperCAmelCase : Union[str, Any] = k.replace("""norm2""" , """final_layer_norm""" )
elif k.startswith("""decoder""" ):
__UpperCAmelCase : Optional[int] = k.replace("""norm1""" , """self_attn_layer_norm""" )
__UpperCAmelCase : List[Any] = k.replace("""norm2""" , """encoder_attn_layer_norm""" )
__UpperCAmelCase : Any = k.replace("""norm3""" , """final_layer_norm""" )
return k
def lowerCamelCase__ ( __lowerCamelCase : Optional[int] ):
__UpperCAmelCase : Optional[Any] = [
"""model.encoder.layernorm_embedding.weight""",
"""model.encoder.layernorm_embedding.bias""",
"""model.decoder.layernorm_embedding.weight""",
"""model.decoder.layernorm_embedding.bias""",
]
for k in keys:
__UpperCAmelCase : Dict = sd.pop(__lowerCamelCase )
__UpperCAmelCase : List[str] = k.replace("""layernorm_embedding""" , """layer_norm""" )
assert new_k not in sd
__UpperCAmelCase : List[str] = v
a : Optional[int] = ["START"]
@torch.no_grad()
def lowerCamelCase__ ( __lowerCamelCase : Dict , __lowerCamelCase : Optional[int] , __lowerCamelCase : str ):
__UpperCAmelCase : str = torch.load(__lowerCamelCase , map_location="""cpu""" )
__UpperCAmelCase : Tuple = model["""model"""]
__UpperCAmelCase : int = BlenderbotConfig.from_json_file(__lowerCamelCase )
__UpperCAmelCase : List[str] = BlenderbotForConditionalGeneration(__lowerCamelCase )
__UpperCAmelCase : Optional[Any] = m.model.state_dict().keys()
__UpperCAmelCase : Any = []
__UpperCAmelCase : Any = {}
for k, v in sd.items():
if k in IGNORE_KEYS:
continue
__UpperCAmelCase : int = rename_state_dict_key(__lowerCamelCase )
if new_k not in valid_keys:
failures.append([k, new_k] )
else:
__UpperCAmelCase : str = v
if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm
rename_layernorm_keys(__lowerCamelCase )
m.model.load_state_dict(__lowerCamelCase , strict=__lowerCamelCase )
m.half()
m.save_pretrained(__lowerCamelCase )
if __name__ == "__main__":
a : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--src_path", type=str, help="like blenderbot-model.bin")
parser.add_argument("--save_dir", default="hf_blenderbot", type=str, help="Where to save converted model.")
parser.add_argument(
"--hf_config_json", default="blenderbot-3b-config.json", type=str, help="Path to config to use"
)
a : Any = parser.parse_args()
convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
| 63
| 1
|
"""simple docstring"""
import numpy as np
import skfuzzy as fuzz
if __name__ == "__main__":
# Create universe of discourse in Python using linspace ()
_SCREAMING_SNAKE_CASE = np.linspace(start=0, stop=7_5, num=7_5, endpoint=True, retstep=False)
# Create two fuzzy sets by defining any membership function
# (trapmf(), gbellmf(), gaussmf(), etc).
_SCREAMING_SNAKE_CASE = [0, 2_5, 5_0]
_SCREAMING_SNAKE_CASE = [2_5, 5_0, 7_5]
_SCREAMING_SNAKE_CASE = fuzz.membership.trimf(X, abca)
_SCREAMING_SNAKE_CASE = fuzz.membership.trimf(X, abca)
# Compute the different operations using inbuilt functions.
_SCREAMING_SNAKE_CASE = np.ones(7_5)
_SCREAMING_SNAKE_CASE = np.zeros((7_5,))
# 1. Union = max(µA(x), µB(x))
_SCREAMING_SNAKE_CASE = fuzz.fuzzy_or(X, young, X, middle_aged)[1]
# 2. Intersection = min(µA(x), µB(x))
_SCREAMING_SNAKE_CASE = fuzz.fuzzy_and(X, young, X, middle_aged)[1]
# 3. Complement (A) = (1- min(µA(x))
_SCREAMING_SNAKE_CASE = fuzz.fuzzy_not(young)
# 4. Difference (A/B) = min(µA(x),(1- µB(x)))
_SCREAMING_SNAKE_CASE = fuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1]
# 5. Algebraic Sum = [µA(x) + µB(x) – (µA(x) * µB(x))]
_SCREAMING_SNAKE_CASE = young + middle_aged - (young * middle_aged)
# 6. Algebraic Product = (µA(x) * µB(x))
_SCREAMING_SNAKE_CASE = young * middle_aged
# 7. Bounded Sum = min[1,(µA(x), µB(x))]
_SCREAMING_SNAKE_CASE = fuzz.fuzzy_and(X, one, X, young + middle_aged)[1]
# 8. Bounded difference = min[0,(µA(x), µB(x))]
_SCREAMING_SNAKE_CASE = fuzz.fuzzy_or(X, zero, X, young - middle_aged)[1]
# max-min composition
# max-product composition
# Plot each set A, set B and each operation result using plot() and subplot().
from matplotlib import pyplot as plt
plt.figure()
plt.subplot(4, 3, 1)
plt.plot(X, young)
plt.title("""Young""")
plt.grid(True)
plt.subplot(4, 3, 2)
plt.plot(X, middle_aged)
plt.title("""Middle aged""")
plt.grid(True)
plt.subplot(4, 3, 3)
plt.plot(X, union)
plt.title("""union""")
plt.grid(True)
plt.subplot(4, 3, 4)
plt.plot(X, intersection)
plt.title("""intersection""")
plt.grid(True)
plt.subplot(4, 3, 5)
plt.plot(X, complement_a)
plt.title("""complement_a""")
plt.grid(True)
plt.subplot(4, 3, 6)
plt.plot(X, difference)
plt.title("""difference a/b""")
plt.grid(True)
plt.subplot(4, 3, 7)
plt.plot(X, alg_sum)
plt.title("""alg_sum""")
plt.grid(True)
plt.subplot(4, 3, 8)
plt.plot(X, alg_product)
plt.title("""alg_product""")
plt.grid(True)
plt.subplot(4, 3, 9)
plt.plot(X, bdd_sum)
plt.title("""bdd_sum""")
plt.grid(True)
plt.subplot(4, 3, 1_0)
plt.plot(X, bdd_difference)
plt.title("""bdd_difference""")
plt.grid(True)
plt.subplots_adjust(hspace=0.5)
plt.show()
| 239
|
"""simple docstring"""
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__(self , lowerCAmelCase__ ):
'''simple docstring'''
_UpperCamelCase : List[str] = size
_UpperCamelCase : Optional[int] = [0] * size
_UpperCamelCase : List[str] = [0] * size
@staticmethod
def lowercase_ (lowerCAmelCase__ ):
'''simple docstring'''
return index | (index + 1)
@staticmethod
def lowercase_ (lowerCAmelCase__ ):
'''simple docstring'''
return (index & (index + 1)) - 1
def lowercase_ (self , lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
_UpperCamelCase : Dict = value
while index < self.size:
_UpperCamelCase : Any = self.get_prev(lowerCAmelCase__ ) + 1
if current_left_border == index:
_UpperCamelCase : List[str] = value
else:
_UpperCamelCase : List[str] = max(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCamelCase : List[str] = self.get_next(lowerCAmelCase__ )
def lowercase_ (self , lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
right -= 1 # Because of right is exclusive
_UpperCamelCase : Tuple = 0
while left <= right:
_UpperCamelCase : List[str] = self.get_prev(lowerCAmelCase__ )
if left <= current_left:
_UpperCamelCase : Optional[Any] = max(lowerCAmelCase__ , self.tree[right] )
_UpperCamelCase : Tuple = current_left
else:
_UpperCamelCase : Optional[int] = max(lowerCAmelCase__ , self.arr[right] )
right -= 1
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 239
| 1
|
"""simple docstring"""
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def lowercase ( lowerCAmelCase__ ):
for param in module.parameters():
lowerCamelCase_ = False
def lowercase ( ):
lowerCamelCase_ = '''cuda''' if torch.cuda.is_available() else '''cpu'''
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
lowerCamelCase_ = '''mps'''
if device == "mps":
print(
'''WARNING: MPS currently doesn\'t seem to work, and messes up backpropagation without any visible torch'''
''' errors. I recommend using CUDA on a colab notebook or CPU instead if you\'re facing inexplicable issues'''
''' with generations.''' )
return device
def lowercase ( lowerCAmelCase__ ):
lowerCamelCase_ = plt.imshow(lowerCAmelCase__ )
fig.axes.get_xaxis().set_visible(lowerCAmelCase__ )
fig.axes.get_yaxis().set_visible(lowerCAmelCase__ )
plt.show()
def lowercase ( ):
lowerCamelCase_ = datetime.now()
lowerCamelCase_ = current_time.strftime('''%H:%M:%S''' )
return timestamp
| 29
|
import secrets
from random import shuffle
from string import ascii_letters, ascii_lowercase, ascii_uppercase, digits, punctuation
def __a ( lowerCAmelCase_ : int = 8 ) -> str:
'''simple docstring'''
UpperCAmelCase_= ascii_letters + digits + punctuation
return "".join(secrets.choice(lowerCAmelCase_ ) for _ in range(lowerCAmelCase_ ) )
def __a ( lowerCAmelCase_ : str ,lowerCAmelCase_ : int ) -> str:
'''simple docstring'''
i -= len(lowerCAmelCase_ )
UpperCAmelCase_= i // 3
UpperCAmelCase_= i % 3
# chars = chars_incl + random_letters(ascii_letters, i / 3 + remainder) +
# random_number(digits, i / 3) + random_characters(punctuation, i / 3)
UpperCAmelCase_= (
chars_incl
+ random(lowerCAmelCase_ ,quotient + remainder )
+ random(lowerCAmelCase_ ,lowerCAmelCase_ )
+ random(lowerCAmelCase_ ,lowerCAmelCase_ )
)
UpperCAmelCase_= list(lowerCAmelCase_ )
shuffle(lowerCAmelCase_ )
return "".join(lowerCAmelCase_ )
# random is a generalised function for letters, characters and numbers
def __a ( lowerCAmelCase_ : str ,lowerCAmelCase_ : int ) -> str:
'''simple docstring'''
return "".join(secrets.choice(lowerCAmelCase_ ) for _ in range(lowerCAmelCase_ ) )
def __a ( lowerCAmelCase_ : Any ,lowerCAmelCase_ : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
pass # Put your code here...
def __a ( lowerCAmelCase_ : Any ,lowerCAmelCase_ : Dict ) -> Optional[int]:
'''simple docstring'''
pass # Put your code here...
def __a ( lowerCAmelCase_ : Any ,lowerCAmelCase_ : List[str] ) -> str:
'''simple docstring'''
pass # Put your code here...
def __a ( lowerCAmelCase_ : str ,lowerCAmelCase_ : int = 8 ) -> bool:
'''simple docstring'''
if len(lowerCAmelCase_ ) < min_length:
# Your Password must be at least 8 characters long
return False
UpperCAmelCase_= any(char in ascii_uppercase for char in password )
UpperCAmelCase_= any(char in ascii_lowercase for char in password )
UpperCAmelCase_= any(char in digits for char in password )
UpperCAmelCase_= any(char in punctuation for char in password )
return upper and lower and num and spec_char
# Passwords should contain UPPERCASE, lowerase
# numbers, and special characters
def __a ( ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_= int(input("""Please indicate the max length of your password: """ ).strip() )
UpperCAmelCase_= input(
"""Please indicate the characters that must be in your password: """ ).strip()
print("""Password generated:""" ,password_generator(lowerCAmelCase_ ) )
print(
"""Alternative Password generated:""" ,alternative_password_generator(lowerCAmelCase_ ,lowerCAmelCase_ ) ,)
print("""[If you are thinking of using this passsword, You better save it.]""" )
if __name__ == "__main__":
main()
| 593
| 0
|
"""simple docstring"""
import multiprocessing
from typing import TYPE_CHECKING, Optional, Union
from .. import Dataset, Features, config
from ..formatting import query_table
from ..packaged_modules.sql.sql import Sql
from ..utils import logging
from .abc import AbstractDatasetInputStream
if TYPE_CHECKING:
import sqlitea
import sqlalchemy
class UpperCamelCase_ (snake_case__ ):
def __init__( self : Optional[int] , lowerCAmelCase_ : Union[str, "sqlalchemy.sql.Selectable"] , lowerCAmelCase_ : Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"] , lowerCAmelCase_ : Optional[Features] = None , lowerCAmelCase_ : str = None , lowerCAmelCase_ : bool = False , **lowerCAmelCase_ : Optional[Any] , ) -> int:
super().__init__(features=UpperCAmelCase_ , cache_dir=UpperCAmelCase_ , keep_in_memory=UpperCAmelCase_ , **UpperCAmelCase_ )
UpperCAmelCase_ : Union[str, Any] = Sql(
cache_dir=UpperCAmelCase_ , features=UpperCAmelCase_ , sql=UpperCAmelCase_ , con=UpperCAmelCase_ , **UpperCAmelCase_ , )
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> str:
UpperCAmelCase_ : str = None
UpperCAmelCase_ : Tuple = None
UpperCAmelCase_ : Tuple = None
UpperCAmelCase_ : Optional[Any] = None
self.builder.download_and_prepare(
download_config=UpperCAmelCase_ , download_mode=UpperCAmelCase_ , verification_mode=UpperCAmelCase_ , base_path=UpperCAmelCase_ , )
# Build dataset for splits
UpperCAmelCase_ : int = self.builder.as_dataset(
split="train" , verification_mode=UpperCAmelCase_ , in_memory=self.keep_in_memory )
return dataset
class UpperCamelCase_ :
def __init__( self : Any , lowerCAmelCase_ : Dataset , lowerCAmelCase_ : str , lowerCAmelCase_ : Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"] , lowerCAmelCase_ : Optional[int] = None , lowerCAmelCase_ : Optional[int] = None , **lowerCAmelCase_ : Tuple , ) -> Dict:
if num_proc is not None and num_proc <= 0:
raise ValueError(f"""num_proc {num_proc} must be an integer > 0.""" )
UpperCAmelCase_ : Optional[int] = dataset
UpperCAmelCase_ : Optional[int] = name
UpperCAmelCase_ : str = con
UpperCAmelCase_ : List[str] = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
UpperCAmelCase_ : Dict = num_proc
UpperCAmelCase_ : List[Any] = to_sql_kwargs
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> int:
UpperCAmelCase_ : List[str] = self.to_sql_kwargs.pop("sql" , UpperCAmelCase_ )
UpperCAmelCase_ : List[Any] = self.to_sql_kwargs.pop("con" , UpperCAmelCase_ )
UpperCAmelCase_ : Tuple = self.to_sql_kwargs.pop("index" , UpperCAmelCase_ )
UpperCAmelCase_ : Dict = self._write(index=UpperCAmelCase_ , **self.to_sql_kwargs )
return written
def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase_ : Dict ) -> Union[str, Any]:
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = args
UpperCAmelCase_ : int = {**to_sql_kwargs, "if_exists": "append"} if offset > 0 else to_sql_kwargs
UpperCAmelCase_ : Dict = query_table(
table=self.dataset.data , key=slice(UpperCAmelCase_ , offset + self.batch_size ) , indices=self.dataset._indices , )
UpperCAmelCase_ : List[str] = batch.to_pandas()
UpperCAmelCase_ : List[str] = df.to_sql(self.name , self.con , index=UpperCAmelCase_ , **UpperCAmelCase_ )
return num_rows or len(UpperCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Dict , lowerCAmelCase_ : List[Any] , **lowerCAmelCase_ : Optional[int] ) -> int:
UpperCAmelCase_ : List[Any] = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit="ba" , disable=not logging.is_progress_bar_enabled() , desc="Creating SQL from Arrow format" , ):
written += self._batch_sql((offset, index, to_sql_kwargs) )
else:
UpperCAmelCase_ , UpperCAmelCase_ : int = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for num_rows in logging.tqdm(
pool.imap(
self._batch_sql , [(offset, index, to_sql_kwargs) for offset in range(0 , UpperCAmelCase_ , UpperCAmelCase_ )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit="ba" , disable=not logging.is_progress_bar_enabled() , desc="Creating SQL from Arrow format" , ):
written += num_rows
return written
| 702
|
"""simple docstring"""
# This script creates a super tiny model that is useful inside tests, when we just want to test that
# the machinery works, without needing to the check the quality of the outcomes.
#
# This version creates a tiny vocab first, and then a tiny model - so the outcome is truly tiny -
# all files ~60KB. As compared to taking a full-size model, reducing to the minimum its layers and
# emb dimensions, but keeping the full vocab + merges files, leading to ~3MB in total for all files.
# The latter is done by `fsmt-make-super-tiny-model.py`.
#
# It will be used then as "stas/tiny-wmt19-en-ru"
from pathlib import Path
import json
import tempfile
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
lowerCamelCase_ = '''tiny-wmt19-en-ru'''
# Build
# borrowed from a test
lowerCamelCase_ = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''w</w>''',
'''r</w>''',
'''t</w>''',
'''lo''',
'''low''',
'''er</w>''',
'''low</w>''',
'''lowest</w>''',
'''newer</w>''',
'''wider</w>''',
'''<unk>''',
]
lowerCamelCase_ = dict(zip(vocab, range(len(vocab))))
lowerCamelCase_ = ['''l o 123''', '''lo w 1456''', '''e r</w> 1789''', '''''']
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCamelCase_ = Path(tmpdirname)
lowerCamelCase_ = build_dir / VOCAB_FILES_NAMES['''src_vocab_file''']
lowerCamelCase_ = build_dir / VOCAB_FILES_NAMES['''tgt_vocab_file''']
lowerCamelCase_ = build_dir / VOCAB_FILES_NAMES['''merges_file''']
with open(src_vocab_file, '''w''') as fp:
fp.write(json.dumps(vocab_tokens))
with open(tgt_vocab_file, '''w''') as fp:
fp.write(json.dumps(vocab_tokens))
with open(merges_file, '''w''') as fp:
fp.write('''\n'''.join(merges))
lowerCamelCase_ = FSMTTokenizer(
langs=['''en''', '''ru'''],
src_vocab_size=len(vocab),
tgt_vocab_size=len(vocab),
src_vocab_file=src_vocab_file,
tgt_vocab_file=tgt_vocab_file,
merges_file=merges_file,
)
lowerCamelCase_ = FSMTConfig(
langs=['''ru''', '''en'''],
src_vocab_size=1000,
tgt_vocab_size=1000,
d_model=4,
encoder_layers=1,
decoder_layers=1,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
encoder_attention_heads=1,
decoder_attention_heads=1,
)
lowerCamelCase_ = FSMTForConditionalGeneration(config)
print(f'num of params {tiny_model.num_parameters()}')
# Test
lowerCamelCase_ = tokenizer(['''Making tiny model'''], return_tensors='''pt''')
lowerCamelCase_ = tiny_model(**batch)
print('''test output:''', len(outputs.logits[0]))
# Save
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)
print(f'Generated {mname_tiny}')
# Upload
# transformers-cli upload tiny-wmt19-en-ru
| 463
| 0
|
'''simple docstring'''
from collections.abc import Iterator, MutableMapping
from dataclasses import dataclass
from typing import Generic, TypeVar
__lowerCamelCase : Dict = TypeVar('''KEY''')
__lowerCamelCase : int = TypeVar('''VAL''')
@dataclass(frozen=a_ , slots=a_ )
class A_ (Generic[KEY, VAL] ):
"""simple docstring"""
a__ = 42
a__ = 42
class A_ (_Item ):
"""simple docstring"""
def __init__( self :List[Any] ) -> None:
'''simple docstring'''
super().__init__(lowerCAmelCase__ , lowerCAmelCase__ )
def __bool__( self :Optional[int] ) -> bool:
'''simple docstring'''
return False
__lowerCamelCase : Dict = _DeletedItem()
class A_ (MutableMapping[KEY, VAL] ):
"""simple docstring"""
def __init__( self :Dict , lowerCAmelCase__ :int = 8 , lowerCAmelCase__ :float = 0.7_5 ) -> None:
'''simple docstring'''
snake_case_ : Any = initial_block_size
snake_case_ : list[_Item | None] = [None] * initial_block_size
assert 0.0 < capacity_factor < 1.0
snake_case_ : Tuple = capacity_factor
snake_case_ : List[Any] = 0
def _A ( self :Tuple , lowerCAmelCase__ :KEY ) -> int:
'''simple docstring'''
return hash(lowerCAmelCase__ ) % len(self._buckets )
def _A ( self :Any , lowerCAmelCase__ :int ) -> int:
'''simple docstring'''
return (ind + 1) % len(self._buckets )
def _A ( self :str , lowerCAmelCase__ :int , lowerCAmelCase__ :KEY , lowerCAmelCase__ :VAL ) -> bool:
'''simple docstring'''
snake_case_ : Optional[int] = self._buckets[ind]
if not stored:
snake_case_ : int = _Item(lowerCAmelCase__ , lowerCAmelCase__ )
self._len += 1
return True
elif stored.key == key:
snake_case_ : Optional[int] = _Item(lowerCAmelCase__ , lowerCAmelCase__ )
return True
else:
return False
def _A ( self :int ) -> bool:
'''simple docstring'''
snake_case_ : Any = len(self._buckets ) * self._capacity_factor
return len(self ) >= int(lowerCAmelCase__ )
def _A ( self :Any ) -> bool:
'''simple docstring'''
if len(self._buckets ) <= self._initial_block_size:
return False
snake_case_ : Optional[int] = len(self._buckets ) * self._capacity_factor / 2
return len(self ) < limit
def _A ( self :Tuple , lowerCAmelCase__ :int ) -> None:
'''simple docstring'''
snake_case_ : Tuple = self._buckets
snake_case_ : int = [None] * new_size
snake_case_ : Any = 0
for item in old_buckets:
if item:
self._add_item(item.key , item.val )
def _A ( self :Optional[int] ) -> None:
'''simple docstring'''
self._resize(len(self._buckets ) * 2 )
def _A ( self :str ) -> None:
'''simple docstring'''
self._resize(len(self._buckets ) // 2 )
def _A ( self :Optional[int] , lowerCAmelCase__ :KEY ) -> Iterator[int]:
'''simple docstring'''
snake_case_ : str = self._get_bucket_index(lowerCAmelCase__ )
for _ in range(len(self._buckets ) ):
yield ind
snake_case_ : List[Any] = self._get_next_ind(lowerCAmelCase__ )
def _A ( self :Union[str, Any] , lowerCAmelCase__ :KEY , lowerCAmelCase__ :VAL ) -> None:
'''simple docstring'''
for ind in self._iterate_buckets(lowerCAmelCase__ ):
if self._try_set(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
break
def __setitem__( self :Optional[int] , lowerCAmelCase__ :KEY , lowerCAmelCase__ :VAL ) -> None:
'''simple docstring'''
if self._is_full():
self._size_up()
self._add_item(lowerCAmelCase__ , lowerCAmelCase__ )
def __delitem__( self :List[Any] , lowerCAmelCase__ :KEY ) -> None:
'''simple docstring'''
for ind in self._iterate_buckets(lowerCAmelCase__ ):
snake_case_ : int = self._buckets[ind]
if item is None:
raise KeyError(lowerCAmelCase__ )
if item is _deleted:
continue
if item.key == key:
snake_case_ : List[str] = _deleted
self._len -= 1
break
if self._is_sparse():
self._size_down()
def __getitem__( self :List[str] , lowerCAmelCase__ :KEY ) -> VAL:
'''simple docstring'''
for ind in self._iterate_buckets(lowerCAmelCase__ ):
snake_case_ : Optional[Any] = self._buckets[ind]
if item is None:
break
if item is _deleted:
continue
if item.key == key:
return item.val
raise KeyError(lowerCAmelCase__ )
def __len__( self :Optional[Any] ) -> int:
'''simple docstring'''
return self._len
def __iter__( self :List[Any] ) -> Iterator[KEY]:
'''simple docstring'''
yield from (item.key for item in self._buckets if item)
def __repr__( self :Any ) -> str:
'''simple docstring'''
snake_case_ : Dict = " ,".join(
F'''{item.key}: {item.val}''' for item in self._buckets if item )
return F'''HashMap({val_string})'''
| 653
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
is_vision_available,
)
__lowerCamelCase : Any = {'''configuration_vit''': ['''VIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ViTConfig''', '''ViTOnnxConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Any = ['''ViTFeatureExtractor''']
__lowerCamelCase : Any = ['''ViTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Optional[Any] = [
'''VIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ViTForImageClassification''',
'''ViTForMaskedImageModeling''',
'''ViTModel''',
'''ViTPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Union[str, Any] = [
'''TFViTForImageClassification''',
'''TFViTModel''',
'''TFViTPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Tuple = [
'''FlaxViTForImageClassification''',
'''FlaxViTModel''',
'''FlaxViTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_vit import VIT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTConfig, ViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_vit import ViTFeatureExtractor
from .image_processing_vit import ViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit import (
VIT_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTForImageClassification,
ViTForMaskedImageModeling,
ViTModel,
ViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit import TFViTForImageClassification, TFViTModel, TFViTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel, FlaxViTPreTrainedModel
else:
import sys
__lowerCamelCase : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 653
| 1
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {
'google/mobilenet_v2_1.4_224': 'https://huggingface.co/google/mobilenet_v2_1.4_224/resolve/main/config.json',
'google/mobilenet_v2_1.0_224': 'https://huggingface.co/google/mobilenet_v2_1.0_224/resolve/main/config.json',
'google/mobilenet_v2_0.75_160': 'https://huggingface.co/google/mobilenet_v2_0.75_160/resolve/main/config.json',
'google/mobilenet_v2_0.35_96': 'https://huggingface.co/google/mobilenet_v2_0.35_96/resolve/main/config.json',
# See all MobileNetV2 models at https://huggingface.co/models?filter=mobilenet_v2
}
class lowerCAmelCase__ ( snake_case__ ):
__a = """mobilenet_v2"""
def __init__( self : int , _lowerCamelCase : Any=3 , _lowerCamelCase : Dict=224 , _lowerCamelCase : List[Any]=1.0 , _lowerCamelCase : Optional[int]=8 , _lowerCamelCase : Dict=8 , _lowerCamelCase : Dict=6 , _lowerCamelCase : Dict=32 , _lowerCamelCase : int=True , _lowerCamelCase : str=True , _lowerCamelCase : List[Any]="relu6" , _lowerCamelCase : Any=True , _lowerCamelCase : Tuple=0.8 , _lowerCamelCase : Dict=0.0_2 , _lowerCamelCase : Dict=0.0_0_1 , _lowerCamelCase : str=255 , **_lowerCamelCase : Tuple , ):
super().__init__(**UpperCAmelCase_ )
if depth_multiplier <= 0:
raise ValueError('''depth_multiplier must be greater than zero.''' )
_snake_case = num_channels
_snake_case = image_size
_snake_case = depth_multiplier
_snake_case = depth_divisible_by
_snake_case = min_depth
_snake_case = expand_ratio
_snake_case = output_stride
_snake_case = first_layer_is_expansion
_snake_case = finegrained_output
_snake_case = hidden_act
_snake_case = tf_padding
_snake_case = classifier_dropout_prob
_snake_case = initializer_range
_snake_case = layer_norm_eps
_snake_case = semantic_loss_ignore_index
class lowerCAmelCase__ ( snake_case__ ):
__a = version.parse("""1.11""" )
@property
def lowercase ( self : Any ):
return OrderedDict([('''pixel_values''', {0: '''batch'''})] )
@property
def lowercase ( self : List[str] ):
if self.task == "image-classification":
return OrderedDict([('''logits''', {0: '''batch'''})] )
else:
return OrderedDict([('''last_hidden_state''', {0: '''batch'''}), ('''pooler_output''', {0: '''batch'''})] )
@property
def lowercase ( self : int ):
return 1e-4
| 702
|
"""simple docstring"""
def _UpperCAmelCase ( __lowerCamelCase : int ) -> bool:
if num < 0:
return False
_snake_case = num
_snake_case = 0
while num > 0:
_snake_case = rev_num * 10 + (num % 10)
num //= 10
return num_copy == rev_num
if __name__ == "__main__":
import doctest
doctest.testmod()
| 430
| 0
|
"""simple docstring"""
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Image
from .base import TaskTemplate
@dataclass(frozen=lowerCamelCase_ )
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
A_ = field(default='''image-classification''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
A_ = Features({'''image''': Image()} )
A_ = Features({'''labels''': ClassLabel} )
A_ = "image"
A_ = "labels"
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Tuple:
if self.label_column not in features:
raise ValueError(F'Column {self.label_column} is not present in features.')
if not isinstance(features[self.label_column] , lowerCamelCase_):
raise ValueError(F'Column {self.label_column} is not a ClassLabel.')
UpperCamelCase = copy.deepcopy(self)
UpperCamelCase = self.label_schema.copy()
UpperCamelCase = features[self.label_column]
UpperCamelCase = label_schema
return task_template
@property
def UpperCAmelCase__ ( self) -> Dict[str, str]:
return {
self.image_column: "image",
self.label_column: "labels",
}
| 34
|
"""simple docstring"""
from itertools import permutations
def lowerCamelCase ( _snake_case ):
if num[3] % 2 != 0:
return False
if (num[2] + num[3] + num[4]) % 3 != 0:
return False
if num[5] % 5 != 0:
return False
UpperCAmelCase__ : List[str] = [7, 11, 13, 17]
for i, test in enumerate(_snake_case ):
if (num[i + 4] * 100 + num[i + 5] * 10 + num[i + 6]) % test != 0:
return False
return True
def lowerCamelCase ( _snake_case = 10 ):
return sum(
int(''.join(map(_snake_case ,_snake_case ) ) )
for num in permutations(range(_snake_case ) )
if is_substring_divisible(_snake_case ) )
if __name__ == "__main__":
print(f'{solution() = }')
| 110
| 0
|
import argparse
import collections
import numpy as np
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import MTaConfig, UMTaEncoderModel, UMTaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def _A ( __snake_case :Union[str, Any] , __snake_case :Tuple , __snake_case :List[str] ) -> str:
"""simple docstring"""
return params[f'''{prefix}/{prefix}/relpos_bias/rel_embedding'''][:, i, :]
def _A ( __snake_case :str , __snake_case :List[str] , __snake_case :Any , __snake_case :Optional[int]="attention" ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = __SCREAMING_SNAKE_CASE = np.ascontiguousarray(params[f'''{prefix}/{prefix}/{layer_name}/key/kernel'''][:, i, :, :] )
__SCREAMING_SNAKE_CASE = k_tmp.reshape(k_tmp.shape[0] , k_tmp.shape[1] * k_tmp.shape[2] )
__SCREAMING_SNAKE_CASE = np.ascontiguousarray(params[f'''{prefix}/{prefix}/{layer_name}/out/kernel'''][:, i, :, :] )
__SCREAMING_SNAKE_CASE = o_tmp.reshape(o_tmp.shape[0] * o_tmp.shape[1] , o_tmp.shape[2] )
__SCREAMING_SNAKE_CASE = np.ascontiguousarray(params[f'''{prefix}/{prefix}/{layer_name}/query/kernel'''][:, i, :, :] )
__SCREAMING_SNAKE_CASE = q_tmp.reshape(q_tmp.shape[0] , q_tmp.shape[1] * q_tmp.shape[2] )
__SCREAMING_SNAKE_CASE = np.ascontiguousarray(params[f'''{prefix}/{prefix}/{layer_name}/value/kernel'''][:, i, :, :] )
__SCREAMING_SNAKE_CASE = v_tmp.reshape(v_tmp.shape[0] , v_tmp.shape[1] * v_tmp.shape[2] )
return k, o, q, v
def _A ( __snake_case :Optional[int] , __snake_case :int , __snake_case :str , __snake_case :Dict=False ) -> List[Any]:
"""simple docstring"""
if split_mlp_wi:
__SCREAMING_SNAKE_CASE = params[f'''{prefix}/{prefix}/mlp/wi_0/kernel'''][:, i, :]
__SCREAMING_SNAKE_CASE = params[f'''{prefix}/{prefix}/mlp/wi_1/kernel'''][:, i, :]
__SCREAMING_SNAKE_CASE = (wi_a, wi_a)
else:
__SCREAMING_SNAKE_CASE = params[f'''{prefix}/{prefix}/mlp/wi/kernel'''][:, i, :]
__SCREAMING_SNAKE_CASE = params[f'''{prefix}/{prefix}/mlp/wo/kernel'''][:, i, :]
return wi, wo
def _A ( __snake_case :Optional[Any] , __snake_case :str , __snake_case :Optional[Any] , __snake_case :List[str] ) -> List[str]:
"""simple docstring"""
return params[f'''{prefix}/{prefix}/{layer_name}/scale'''][:, i]
def _A ( __snake_case :dict , *, __snake_case :int , __snake_case :bool , __snake_case :bool = False ) -> List[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = traverse_util.flatten_dict(variables["target"] )
__SCREAMING_SNAKE_CASE = {"/".join(__snake_case ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
__SCREAMING_SNAKE_CASE = "encoder/encoder/mlp/wi_0/kernel" in old
print("Split MLP:" , __snake_case )
__SCREAMING_SNAKE_CASE = collections.OrderedDict()
# Shared embeddings.
__SCREAMING_SNAKE_CASE = old["token_embedder/embedding"]
# Encoder.
for i in range(__snake_case ):
# Block i, layer 0 (Self Attention).
__SCREAMING_SNAKE_CASE = tax_layer_norm_lookup(__snake_case , __snake_case , "encoder" , "pre_attention_layer_norm" )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = tax_attention_lookup(__snake_case , __snake_case , "encoder" , "attention" )
__SCREAMING_SNAKE_CASE = layer_norm
__SCREAMING_SNAKE_CASE = k.T
__SCREAMING_SNAKE_CASE = o.T
__SCREAMING_SNAKE_CASE = q.T
__SCREAMING_SNAKE_CASE = v.T
# Block i, layer 1 (MLP).
__SCREAMING_SNAKE_CASE = tax_layer_norm_lookup(__snake_case , __snake_case , "encoder" , "pre_mlp_layer_norm" )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = tax_mlp_lookup(__snake_case , __snake_case , "encoder" , __snake_case )
__SCREAMING_SNAKE_CASE = layer_norm
if split_mlp_wi:
__SCREAMING_SNAKE_CASE = wi[0].T
__SCREAMING_SNAKE_CASE = wi[1].T
else:
__SCREAMING_SNAKE_CASE = wi.T
__SCREAMING_SNAKE_CASE = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
__SCREAMING_SNAKE_CASE = tax_relpos_bias_lookup(
__snake_case , __snake_case , "encoder" ).T
__SCREAMING_SNAKE_CASE = old["encoder/encoder_norm/scale"]
if not scalable_attention:
__SCREAMING_SNAKE_CASE = tax_relpos_bias_lookup(
__snake_case , 0 , "encoder" ).T
__SCREAMING_SNAKE_CASE = tax_relpos_bias_lookup(
__snake_case , 0 , "decoder" ).T
if not is_encoder_only:
# Decoder.
for i in range(__snake_case ):
# Block i, layer 0 (Self Attention).
__SCREAMING_SNAKE_CASE = tax_layer_norm_lookup(__snake_case , __snake_case , "decoder" , "pre_self_attention_layer_norm" )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = tax_attention_lookup(__snake_case , __snake_case , "decoder" , "self_attention" )
__SCREAMING_SNAKE_CASE = layer_norm
__SCREAMING_SNAKE_CASE = k.T
__SCREAMING_SNAKE_CASE = o.T
__SCREAMING_SNAKE_CASE = q.T
__SCREAMING_SNAKE_CASE = v.T
# Block i, layer 1 (Cross Attention).
__SCREAMING_SNAKE_CASE = tax_layer_norm_lookup(__snake_case , __snake_case , "decoder" , "pre_cross_attention_layer_norm" )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = tax_attention_lookup(__snake_case , __snake_case , "decoder" , "encoder_decoder_attention" )
__SCREAMING_SNAKE_CASE = layer_norm
__SCREAMING_SNAKE_CASE = k.T
__SCREAMING_SNAKE_CASE = o.T
__SCREAMING_SNAKE_CASE = q.T
__SCREAMING_SNAKE_CASE = v.T
# Block i, layer 2 (MLP).
__SCREAMING_SNAKE_CASE = tax_layer_norm_lookup(__snake_case , __snake_case , "decoder" , "pre_mlp_layer_norm" )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = tax_mlp_lookup(__snake_case , __snake_case , "decoder" , __snake_case )
__SCREAMING_SNAKE_CASE = layer_norm
if split_mlp_wi:
__SCREAMING_SNAKE_CASE = wi[0].T
__SCREAMING_SNAKE_CASE = wi[1].T
else:
__SCREAMING_SNAKE_CASE = wi.T
__SCREAMING_SNAKE_CASE = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
__SCREAMING_SNAKE_CASE = tax_relpos_bias_lookup(__snake_case , __snake_case , "decoder" ).T
__SCREAMING_SNAKE_CASE = old["decoder/decoder_norm/scale"]
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
__SCREAMING_SNAKE_CASE = old["decoder/logits_dense/kernel"].T
return new
def _A ( __snake_case :List[str] , __snake_case :bool ) -> List[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
__SCREAMING_SNAKE_CASE = state_dict["shared.weight"]
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
__SCREAMING_SNAKE_CASE = state_dict["shared.weight"]
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print("Using shared word embeddings as lm_head." )
__SCREAMING_SNAKE_CASE = state_dict["shared.weight"]
return state_dict
def _A ( __snake_case :int , __snake_case :List[Any] , __snake_case :str , __snake_case :Tuple , __snake_case :str ) -> Dict:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = checkpoints.load_tax_checkpoint(__snake_case )
__SCREAMING_SNAKE_CASE = convert_tax_to_pytorch(
__snake_case , num_layers=config.num_layers , is_encoder_only=__snake_case , scalable_attention=__snake_case )
__SCREAMING_SNAKE_CASE = make_state_dict(__snake_case , __snake_case )
model.load_state_dict(__snake_case , strict=__snake_case )
def _A ( __snake_case :Optional[int] , __snake_case :int , __snake_case :List[Any] , __snake_case :bool = False , __snake_case :bool = False , ) -> List[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = MTaConfig.from_json_file(__snake_case )
print(f'''Building PyTorch model from configuration: {config}''' )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
__SCREAMING_SNAKE_CASE = UMTaEncoderModel(__snake_case )
else:
__SCREAMING_SNAKE_CASE = UMTaForConditionalGeneration(__snake_case )
# Load weights from tf checkpoint
load_tax_weights_in_ta(__snake_case , __snake_case , __snake_case , __snake_case , __snake_case )
# Save pytorch-model
print(f'''Save PyTorch model to {pytorch_dump_path}''' )
model.save_pretrained(__snake_case )
# Verify that we can load the checkpoint.
model.from_pretrained(__snake_case )
print("Done" )
if __name__ == "__main__":
_snake_case : Any = argparse.ArgumentParser(description='Converts a native T5X checkpoint into a PyTorch checkpoint.')
# Required parameters
parser.add_argument(
'--t5x_checkpoint_path', default=None, type=str, required=True, help='Path to the T5X checkpoint.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.',
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--is_encoder_only', action='store_true', help='Check if the model is encoder-decoder model', default=False
)
parser.add_argument(
'--scalable_attention',
action='store_true',
help='Whether the model uses scaled attention (umt5 model)',
default=False,
)
_snake_case : Any = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path,
args.config_file,
args.pytorch_dump_path,
args.is_encoder_only,
args.scalable_attention,
)
| 214
|
def _A ( __snake_case :int ) -> bool:
"""simple docstring"""
if not isinstance(__snake_case , __snake_case ):
raise ValueError("check_bouncy() accepts only integer arguments" )
__SCREAMING_SNAKE_CASE = str(__snake_case )
__SCREAMING_SNAKE_CASE = "".join(sorted(__snake_case ) )
return sorted_str_n != str_n and sorted_str_n[::-1] != str_n
def _A ( __snake_case :float = 99 ) -> int:
"""simple docstring"""
if not 0 < percent < 100:
raise ValueError("solution() only accepts values from 0 to 100" )
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = 1
while True:
if check_bouncy(__snake_case ):
bouncy_num += 1
if (bouncy_num / num) * 100 >= percent:
return num
num += 1
if __name__ == "__main__":
from doctest import testmod
testmod()
print(F"""{solution(99)}""")
| 214
| 1
|
import argparse
import logging
import os
import datasets
import tensorflow as tf
from transformers import AutoTokenizer
UpperCAmelCase_ = logging.getLogger(__name__)
def lowerCAmelCase_ ( ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase: List[str] = argparse.ArgumentParser(
description='''Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset.''' )
parser.add_argument(
'''--dataset_name''' , type=lowercase , default='''wikitext''' , help='''Name of the training. Explore datasets at: hf.co/datasets.''' , )
parser.add_argument(
'''--dataset_config''' , type=lowercase , default='''wikitext-103-raw-v1''' , help='''Configuration name of the dataset.''' )
parser.add_argument(
'''--tokenizer_name_or_path''' , type=lowercase , default='''sayakpaul/unigram-tokenizer-wikitext''' , help='''Tokenizer identifier. Can be a local filepath or a Hub identifier.''' , )
parser.add_argument(
'''--shard_size''' , type=lowercase , default=1_000 , help='''Number of entries to go in a single shard.''' , )
parser.add_argument('''--split''' , type=lowercase , default='''train''' , choices=['''train''', '''test''', '''validation'''] )
parser.add_argument(
'''--limit''' , default=lowercase , type=lowercase , help='''Limit the number of shards (used for debugging).''' , )
parser.add_argument(
'''--max_length''' , type=lowercase , default=512 , help='''Maximum sequence length. For training on TPUs, it helps to have a maximum'''
''' sequence length that is a multiple of 8.''' , )
parser.add_argument(
'''--output_dir''' , default='''tf-tpu''' , type=lowercase , help='''Output directory where the TFRecord shards will be saved. If the'''
''' path is appended with `gs://` (\'gs://tf-tpu\', for example) then the TFRecord'''
''' shards will be directly saved to a Google Cloud Storage bucket.''' , )
_UpperCamelCase: str = parser.parse_args()
return args
def lowerCAmelCase_ ( lowercase: Optional[int] ) -> Optional[Any]:
'''simple docstring'''
def fn(lowercase: List[Any] ):
return tokenizer(examples['''text'''] )
return fn
def lowerCAmelCase_ ( lowercase: List[str] ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase: int = []
for i in range(len(tokenized_data['''input_ids'''] ) ):
_UpperCamelCase: Tuple = {
'''input_ids''': tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data['''input_ids'''][i] ) ),
'''attention_mask''': tf.train.Feature(
intaa_list=tf.train.IntaaList(value=tokenized_data['''attention_mask'''][i] ) ),
}
_UpperCamelCase: Optional[int] = tf.train.Features(feature=lowercase )
_UpperCamelCase: Optional[int] = tf.train.Example(features=lowercase )
_UpperCamelCase: List[Any] = example.SerializeToString()
records.append(lowercase )
return records
def lowerCAmelCase_ ( lowercase: List[str] ) -> Tuple:
'''simple docstring'''
_UpperCamelCase: Tuple = datasets.load_dataset(args.dataset_name , args.dataset_config , split=args.split )
if args.limit is not None:
_UpperCamelCase: str = min(len(lowercase ) , args.limit )
_UpperCamelCase: Optional[Any] = dataset.select(range(lowercase ) )
print(F"""Limiting the dataset to {args.limit} entries.""" )
_UpperCamelCase: List[Any] = AutoTokenizer.from_pretrained(args.tokenizer_name_or_path )
# Handle output directory creation.
# For serializing into a Google Cloud Storage Bucket, one needs to first
# create a bucket.
if "gs" not in args.output_dir:
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
_UpperCamelCase: Any = os.path.join(args.output_dir , args.split )
if not os.path.exists(lowercase ):
os.makedirs(lowercase )
else:
_UpperCamelCase: str = os.path.join(args.output_dir , args.split )
# Tokenize the whole dataset at once.
_UpperCamelCase: Union[str, Any] = tokenize_function(lowercase )
_UpperCamelCase: List[Any] = dataset.map(lowercase , batched=lowercase , num_proc=4 , remove_columns=['''text'''] )
# We need to concatenate all our texts together, and then split the result
# into chunks of a fixed size, which we will call block_size. To do this, we
# will use the map method again, with the option batched=True. When we use batched=True,
# the function we pass to map() will be passed multiple inputs at once, allowing us
# to group them into more or fewer examples than we had in the input.
# This allows us to create our new fixed-length samples. The advantage of this
# method is that we don't lose a whole lot of content from the dataset compared to the
# case where we simply tokenize with a pre-defined max_length.
def group_texts(lowercase: int ):
# Concatenate all texts.
_UpperCamelCase: List[str] = {k: sum(examples[k] , [] ) for k in examples.keys()}
_UpperCamelCase: Any = len(concatenated_examples[list(examples.keys() )[0]] )
# We drop the small remainder, though you could add padding instead if the model supports it
# In this, as in all things, we advise you to follow your heart 🫀
_UpperCamelCase: Optional[int] = (total_length // args.max_length) * args.max_length
# Split by chunks of max_len.
_UpperCamelCase: Tuple = {
k: [t[i : i + args.max_length] for i in range(0 , lowercase , args.max_length )]
for k, t in concatenated_examples.items()
}
return result
_UpperCamelCase: Dict = dataset_tokenized.map(lowercase , batched=lowercase , batch_size=1_000 , num_proc=4 )
_UpperCamelCase: List[str] = 0
_UpperCamelCase: Tuple = 0
for shard in range(0 , len(lowercase ) , args.shard_size ):
_UpperCamelCase: List[str] = grouped_dataset[shard : shard + args.shard_size]
_UpperCamelCase: Optional[Any] = len(dataset_snapshot['''input_ids'''] )
_UpperCamelCase: str = os.path.join(lowercase , F"""dataset-{shard_count}-{records_containing}.tfrecord""" )
_UpperCamelCase: Any = get_serialized_examples(lowercase )
with tf.io.TFRecordWriter(lowercase ) as out_file:
for i in range(len(lowercase ) ):
_UpperCamelCase: List[Any] = serialized_examples[i]
out_file.write(lowercase )
print('''Wrote file {} containing {} records'''.format(lowercase , lowercase ) )
shard_count += 1
total_records += records_containing
with open(F"""split-{args.split}-records-count.txt""" , '''w''' ) as f:
print(F"""Total {args.split} records: {total_records}""" , file=lowercase )
if __name__ == "__main__":
UpperCAmelCase_ = parse_args()
main(args)
| 271
|
from pathlib import Path
import fire
def lowerCAmelCase_ ( lowercase: str , lowercase: str , lowercase: int ) -> int:
'''simple docstring'''
_UpperCamelCase: Any = Path(lowercase )
_UpperCamelCase: int = Path(lowercase )
dest_dir.mkdir(exist_ok=lowercase )
for path in src_dir.iterdir():
_UpperCamelCase: List[str] = [x.rstrip() for x in list(path.open().readlines() )][:n]
_UpperCamelCase: int = dest_dir.joinpath(path.name )
print(lowercase )
dest_path.open('''w''' ).write('''\n'''.join(lowercase ) )
if __name__ == "__main__":
fire.Fire(minify)
| 271
| 1
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import importlib.metadata
import json
import os
from dataclasses import dataclass
from typing import Any, Dict, Union
from packaging import version
from ..utils import is_torch_available, logging
if is_torch_available():
import torch
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
@dataclass
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
def __init__( self :Tuple, snake_case :Tuple=False, snake_case :Optional[int]=False, snake_case :Optional[Any]=6.0, snake_case :Optional[Any]=None, snake_case :Optional[Any]=False, snake_case :List[str]=False, snake_case :Optional[int]=None, snake_case :List[Any]="fp4", snake_case :Dict=False, **snake_case :Optional[int], ):
"""simple docstring"""
_lowercase =load_in_abit
_lowercase =load_in_abit
_lowercase =llm_inta_threshold
_lowercase =llm_inta_skip_modules
_lowercase =llm_inta_enable_fpaa_cpu_offload
_lowercase =llm_inta_has_fpaa_weight
_lowercase =bnb_abit_quant_type
_lowercase =bnb_abit_use_double_quant
if bnb_abit_compute_dtype is None:
_lowercase =torch.floataa
elif isinstance(snake_case, snake_case):
_lowercase =getattr(snake_case, snake_case)
elif isinstance(snake_case, torch.dtype):
_lowercase =bnb_abit_compute_dtype
else:
raise ValueError('bnb_4bit_compute_dtype must be a string or a torch.dtype')
self.post_init()
def UpperCamelCase__ ( self :int):
"""simple docstring"""
if not isinstance(self.llm_inta_threshold, snake_case):
raise ValueError('llm_int8_threshold must be a float')
if self.llm_inta_skip_modules is not None and not isinstance(self.llm_inta_skip_modules, snake_case):
raise ValueError('llm_int8_skip_modules must be a list of strings')
if not isinstance(self.llm_inta_enable_fpaa_cpu_offload, snake_case):
raise ValueError('llm_int8_enable_fp32_cpu_offload must be a boolean')
if not isinstance(self.llm_inta_has_fpaa_weight, snake_case):
raise ValueError('llm_int8_has_fp16_weight must be a boolean')
if self.bnb_abit_compute_dtype is not None and not isinstance(self.bnb_abit_compute_dtype, torch.dtype):
raise ValueError('bnb_4bit_compute_dtype must be torch.dtype')
if not isinstance(self.bnb_abit_quant_type, snake_case):
raise ValueError('bnb_4bit_quant_type must be a string')
if not isinstance(self.bnb_abit_use_double_quant, snake_case):
raise ValueError('bnb_4bit_use_double_quant must be a boolean')
if self.load_in_abit and not version.parse(importlib.metadata.version('bitsandbytes')) >= version.parse(
'0.39.0'):
raise ValueError(
'4 bit quantization requires bitsandbytes>=0.39.0 - please upgrade your bitsandbytes version')
def UpperCamelCase__ ( self :int):
"""simple docstring"""
return self.load_in_abit or self.load_in_abit
def UpperCamelCase__ ( self :Union[str, Any]):
"""simple docstring"""
if self.load_in_abit:
return "llm_int8"
elif self.load_in_abit and self.bnb_abit_quant_type == "fp4":
return "fp4"
elif self.load_in_abit and self.bnb_abit_quant_type == "nf4":
return "nf4"
else:
return None
@classmethod
def UpperCamelCase__ ( cls :Optional[int], snake_case :List[Any], snake_case :List[Any], **snake_case :Tuple):
"""simple docstring"""
_lowercase =cls(**snake_case)
_lowercase =[]
for key, value in kwargs.items():
if hasattr(snake_case, snake_case):
setattr(snake_case, snake_case, snake_case)
to_remove.append(snake_case)
for key in to_remove:
kwargs.pop(snake_case, snake_case)
if return_unused_kwargs:
return config, kwargs
else:
return config
def UpperCamelCase__ ( self :Any, snake_case :Union[str, os.PathLike]):
"""simple docstring"""
with open(snake_case, 'w', encoding='utf-8') as writer:
_lowercase =self.to_dict()
_lowercase =json.dumps(snake_case, indent=2, sort_keys=snake_case) + '\n'
writer.write(snake_case)
def UpperCamelCase__ ( self :List[Any]):
"""simple docstring"""
_lowercase =copy.deepcopy(self.__dict__)
_lowercase =str(output['bnb_4bit_compute_dtype']).split('.')[1]
return output
def __repr__( self :List[Any]):
"""simple docstring"""
return f'''{self.__class__.__name__} {self.to_json_string()}'''
def UpperCamelCase__ ( self :Dict, snake_case :bool = True):
"""simple docstring"""
if use_diff is True:
_lowercase =self.to_diff_dict()
else:
_lowercase =self.to_dict()
return json.dumps(snake_case, indent=2, sort_keys=snake_case) + "\n"
def UpperCamelCase__ ( self :str):
"""simple docstring"""
_lowercase =self.to_dict()
# get the default config dict
_lowercase =BitsAndBytesConfig().to_dict()
_lowercase ={}
# only serialize values that differ from the default config
for key, value in config_dict.items():
if value != default_config_dict[key]:
_lowercase =value
return serializable_config_dict
| 719
|
def _snake_case (_snake_case : int) -> bool:
if p < 2:
raise ValueError('p should not be less than 2!')
elif p == 2:
return True
_lowercase =4
_lowercase =(1 << p) - 1
for _ in range(p - 2):
_lowercase =((s * s) - 2) % m
return s == 0
if __name__ == "__main__":
print(lucas_lehmer_test(7))
print(lucas_lehmer_test(11))
| 557
| 0
|
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_owlvit import OwlViTImageProcessor
lowerCamelCase = logging.get_logger(__name__)
class lowercase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : str , *_UpperCAmelCase : Any , **_UpperCAmelCase : int ) -> None:
'''simple docstring'''
warnings.warn(
"The class OwlViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use OwlViTImageProcessor instead." , _UpperCAmelCase , )
super().__init__(*_UpperCAmelCase , **_UpperCAmelCase )
| 82
|
"""simple docstring"""
from dataclasses import dataclass
from typing import Dict, Optional, Union
import torch
import torch.nn.functional as F
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .attention_processor import AttentionProcessor, AttnProcessor
from .embeddings import TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
@dataclass
class a__ ( UpperCamelCase_ ):
snake_case__ = 42
class a__ ( UpperCamelCase_ , UpperCamelCase_ ):
@register_to_config
def __init__( self : Optional[int] ,a__ : int = 32 ,a__ : int = 64 ,a__ : int = 20 ,a__ : int = 768 ,a__ : Union[str, Any]=77 ,a__ : Union[str, Any]=4 ,a__ : float = 0.0 ,a__ : str = "silu" ,a__ : Optional[str] = None ,a__ : Optional[str] = None ,a__ : Optional[str] = "linear" ,a__ : Optional[str] = "prd" ,a__ : Optional[int] = None ,a__ : Optional[int] = None ,a__ : Optional[int] = None ,) -> Optional[Any]:
"""simple docstring"""
super().__init__()
_lowerCAmelCase:str = num_attention_heads
_lowerCAmelCase:Any = attention_head_dim
_lowerCAmelCase:str = num_attention_heads * attention_head_dim
_lowerCAmelCase:Any = additional_embeddings
_lowerCAmelCase:Any = time_embed_dim or inner_dim
_lowerCAmelCase:Optional[int] = embedding_proj_dim or embedding_dim
_lowerCAmelCase:Any = clip_embed_dim or embedding_dim
_lowerCAmelCase:str = Timesteps(a__ ,a__ ,0)
_lowerCAmelCase:Tuple = TimestepEmbedding(a__ ,a__ ,out_dim=a__ ,act_fn=a__)
_lowerCAmelCase:str = nn.Linear(a__ ,a__)
if embedding_proj_norm_type is None:
_lowerCAmelCase:str = None
elif embedding_proj_norm_type == "layer":
_lowerCAmelCase:Any = nn.LayerNorm(a__)
else:
raise ValueError(F'unsupported embedding_proj_norm_type: {embedding_proj_norm_type}')
_lowerCAmelCase:Tuple = nn.Linear(a__ ,a__)
if encoder_hid_proj_type is None:
_lowerCAmelCase:Tuple = None
elif encoder_hid_proj_type == "linear":
_lowerCAmelCase:int = nn.Linear(a__ ,a__)
else:
raise ValueError(F'unsupported encoder_hid_proj_type: {encoder_hid_proj_type}')
_lowerCAmelCase:Union[str, Any] = nn.Parameter(torch.zeros(1 ,num_embeddings + additional_embeddings ,a__))
if added_emb_type == "prd":
_lowerCAmelCase:str = nn.Parameter(torch.zeros(1 ,1 ,a__))
elif added_emb_type is None:
_lowerCAmelCase:Optional[Any] = None
else:
raise ValueError(
F'`added_emb_type`: {added_emb_type} is not supported. Make sure to choose one of `\'prd\'` or `None`.')
_lowerCAmelCase:Dict = nn.ModuleList(
[
BasicTransformerBlock(
a__ ,a__ ,a__ ,dropout=a__ ,activation_fn='''gelu''' ,attention_bias=a__ ,)
for d in range(a__)
])
if norm_in_type == "layer":
_lowerCAmelCase:Dict = nn.LayerNorm(a__)
elif norm_in_type is None:
_lowerCAmelCase:List[Any] = None
else:
raise ValueError(F'Unsupported norm_in_type: {norm_in_type}.')
_lowerCAmelCase:Dict = nn.LayerNorm(a__)
_lowerCAmelCase:Optional[int] = nn.Linear(a__ ,a__)
_lowerCAmelCase:Any = torch.full(
[num_embeddings + additional_embeddings, num_embeddings + additional_embeddings] ,-10000.0)
causal_attention_mask.triu_(1)
_lowerCAmelCase:int = causal_attention_mask[None, ...]
self.register_buffer('''causal_attention_mask''' ,a__ ,persistent=a__)
_lowerCAmelCase:Tuple = nn.Parameter(torch.zeros(1 ,a__))
_lowerCAmelCase:Tuple = nn.Parameter(torch.zeros(1 ,a__))
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def __UpperCamelCase ( self : List[str]) -> Dict[str, AttentionProcessor]:
"""simple docstring"""
_lowerCAmelCase:Union[str, Any] = {}
def fn_recursive_add_processors(a__ : str ,a__ : torch.nn.Module ,a__ : Dict[str, AttentionProcessor]):
if hasattr(a__ ,'''set_processor'''):
_lowerCAmelCase:int = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(F'{name}.{sub_name}' ,a__ ,a__)
return processors
for name, module in self.named_children():
fn_recursive_add_processors(a__ ,a__ ,a__)
return processors
def __UpperCamelCase ( self : Any ,a__ : Union[AttentionProcessor, Dict[str, AttentionProcessor]]) -> Union[str, Any]:
"""simple docstring"""
_lowerCAmelCase:Tuple = len(self.attn_processors.keys())
if isinstance(a__ ,a__) and len(a__) != count:
raise ValueError(
F'A dict of processors was passed, but the number of processors {len(a__)} does not match the'
F' number of attention layers: {count}. Please make sure to pass {count} processor classes.')
def fn_recursive_attn_processor(a__ : str ,a__ : torch.nn.Module ,a__ : Tuple):
if hasattr(a__ ,'''set_processor'''):
if not isinstance(a__ ,a__):
module.set_processor(a__)
else:
module.set_processor(processor.pop(F'{name}.processor'))
for sub_name, child in module.named_children():
fn_recursive_attn_processor(F'{name}.{sub_name}' ,a__ ,a__)
for name, module in self.named_children():
fn_recursive_attn_processor(a__ ,a__ ,a__)
def __UpperCamelCase ( self : Union[str, Any]) -> List[str]:
"""simple docstring"""
self.set_attn_processor(AttnProcessor())
def __UpperCamelCase ( self : Optional[int] ,a__ : List[str] ,a__ : Union[torch.Tensor, float, int] ,a__ : torch.FloatTensor ,a__ : Optional[torch.FloatTensor] = None ,a__ : Optional[torch.BoolTensor] = None ,a__ : bool = True ,) -> Dict:
"""simple docstring"""
_lowerCAmelCase:Dict = hidden_states.shape[0]
_lowerCAmelCase:Any = timestep
if not torch.is_tensor(a__):
_lowerCAmelCase:List[str] = torch.tensor([timesteps] ,dtype=torch.long ,device=hidden_states.device)
elif torch.is_tensor(a__) and len(timesteps.shape) == 0:
_lowerCAmelCase:Tuple = timesteps[None].to(hidden_states.device)
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
_lowerCAmelCase:str = timesteps * torch.ones(a__ ,dtype=timesteps.dtype ,device=timesteps.device)
_lowerCAmelCase:List[str] = self.time_proj(a__)
# timesteps does not contain any weights and will always return f32 tensors
# but time_embedding might be fp16, so we need to cast here.
_lowerCAmelCase:Optional[Any] = timesteps_projected.to(dtype=self.dtype)
_lowerCAmelCase:str = self.time_embedding(a__)
if self.embedding_proj_norm is not None:
_lowerCAmelCase:Tuple = self.embedding_proj_norm(a__)
_lowerCAmelCase:Optional[int] = self.embedding_proj(a__)
if self.encoder_hidden_states_proj is not None and encoder_hidden_states is not None:
_lowerCAmelCase:List[str] = self.encoder_hidden_states_proj(a__)
elif self.encoder_hidden_states_proj is not None and encoder_hidden_states is None:
raise ValueError('''`encoder_hidden_states_proj` requires `encoder_hidden_states` to be set''')
_lowerCAmelCase:Dict = self.proj_in(a__)
_lowerCAmelCase:Union[str, Any] = self.positional_embedding.to(hidden_states.dtype)
_lowerCAmelCase:List[str] = []
_lowerCAmelCase:str = 0
if encoder_hidden_states is not None:
additional_embeds.append(a__)
additional_embeddings_len += encoder_hidden_states.shape[1]
if len(proj_embeddings.shape) == 2:
_lowerCAmelCase:Optional[Any] = proj_embeddings[:, None, :]
if len(hidden_states.shape) == 2:
_lowerCAmelCase:Optional[Any] = hidden_states[:, None, :]
_lowerCAmelCase:Optional[int] = additional_embeds + [
proj_embeddings,
time_embeddings[:, None, :],
hidden_states,
]
if self.prd_embedding is not None:
_lowerCAmelCase:Union[str, Any] = self.prd_embedding.to(hidden_states.dtype).expand(a__ ,-1 ,-1)
additional_embeds.append(a__)
_lowerCAmelCase:Dict = torch.cat(
a__ ,dim=1 ,)
# Allow positional_embedding to not include the `addtional_embeddings` and instead pad it with zeros for these additional tokens
_lowerCAmelCase:Optional[int] = additional_embeddings_len + proj_embeddings.shape[1] + 1
if positional_embeddings.shape[1] < hidden_states.shape[1]:
_lowerCAmelCase:Optional[int] = F.pad(
a__ ,(
0,
0,
additional_embeddings_len,
self.prd_embedding.shape[1] if self.prd_embedding is not None else 0,
) ,value=0.0 ,)
_lowerCAmelCase:List[str] = hidden_states + positional_embeddings
if attention_mask is not None:
_lowerCAmelCase:int = (1 - attention_mask.to(hidden_states.dtype)) * -10000.0
_lowerCAmelCase:str = F.pad(a__ ,(0, self.additional_embeddings) ,value=0.0)
_lowerCAmelCase:Optional[Any] = (attention_mask[:, None, :] + self.causal_attention_mask).to(hidden_states.dtype)
_lowerCAmelCase:str = attention_mask.repeat_interleave(self.config.num_attention_heads ,dim=0)
if self.norm_in is not None:
_lowerCAmelCase:Union[str, Any] = self.norm_in(a__)
for block in self.transformer_blocks:
_lowerCAmelCase:Any = block(a__ ,attention_mask=a__)
_lowerCAmelCase:int = self.norm_out(a__)
if self.prd_embedding is not None:
_lowerCAmelCase:List[str] = hidden_states[:, -1]
else:
_lowerCAmelCase:Any = hidden_states[:, additional_embeddings_len:]
_lowerCAmelCase:int = self.proj_to_clip_embeddings(a__)
if not return_dict:
return (predicted_image_embedding,)
return PriorTransformerOutput(predicted_image_embedding=a__)
def __UpperCamelCase ( self : Tuple ,a__ : Optional[int]) -> Dict:
"""simple docstring"""
_lowerCAmelCase:Dict = (prior_latents * self.clip_std) + self.clip_mean
return prior_latents
| 227
| 0
|
'''simple docstring'''
import inspect
import unittest
from math import floor
from transformers import CvtConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import CvtForImageClassification, CvtModel
from transformers.models.cvt.modeling_cvt import CVT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class a__ ( a__ ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
lowerCAmelCase__ = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(lowerCamelCase_ , '''embed_dim''' ) )
self.parent.assertTrue(hasattr(lowerCamelCase_ , '''num_heads''' ) )
class a__ :
'''simple docstring'''
def __init__( self , lowerCamelCase_ , lowerCamelCase_=13 , lowerCamelCase_=64 , lowerCamelCase_=3 , lowerCamelCase_=[16, 48, 96] , lowerCamelCase_=[1, 3, 6] , lowerCamelCase_=[1, 2, 10] , lowerCamelCase_=[7, 3, 3] , lowerCamelCase_=[4, 2, 2] , lowerCamelCase_=[2, 1, 1] , lowerCamelCase_=[2, 2, 2] , lowerCamelCase_=[False, False, True] , lowerCamelCase_=[0.0, 0.0, 0.0] , lowerCamelCase_=0.02 , lowerCamelCase_=1e-12 , lowerCamelCase_=True , lowerCamelCase_=True , lowerCamelCase_=2 , ) -> Optional[int]:
lowerCAmelCase__ = parent
lowerCAmelCase__ = batch_size
lowerCAmelCase__ = image_size
lowerCAmelCase__ = patch_sizes
lowerCAmelCase__ = patch_stride
lowerCAmelCase__ = patch_padding
lowerCAmelCase__ = is_training
lowerCAmelCase__ = use_labels
lowerCAmelCase__ = num_labels
lowerCAmelCase__ = num_channels
lowerCAmelCase__ = embed_dim
lowerCAmelCase__ = num_heads
lowerCAmelCase__ = stride_kv
lowerCAmelCase__ = depth
lowerCAmelCase__ = cls_token
lowerCAmelCase__ = attention_drop_rate
lowerCAmelCase__ = initializer_range
lowerCAmelCase__ = layer_norm_eps
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
lowerCAmelCase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase__ = None
if self.use_labels:
lowerCAmelCase__ = ids_tensor([self.batch_size] , self.num_labels )
lowerCAmelCase__ = self.get_config()
return config, pixel_values, labels
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
return CvtConfig(
image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Optional[Any]:
lowerCAmelCase__ = CvtModel(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCAmelCase__ = model(lowerCamelCase_ )
lowerCAmelCase__ = (self.image_size, self.image_size)
lowerCAmelCase__ , lowerCAmelCase__ = image_size[0], image_size[1]
for i in range(len(self.depth ) ):
lowerCAmelCase__ = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
lowerCAmelCase__ = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width) )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Tuple:
lowerCAmelCase__ = self.num_labels
lowerCAmelCase__ = CvtForImageClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCAmelCase__ = model(lowerCamelCase_ , labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __SCREAMING_SNAKE_CASE ( self ) -> str:
lowerCAmelCase__ = self.prepare_config_and_inputs()
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = config_and_inputs
lowerCAmelCase__ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class a__ ( a__ , a__ , unittest.TestCase ):
'''simple docstring'''
lowercase__ : Tuple = (CvtModel, CvtForImageClassification) if is_torch_available() else ()
lowercase__ : List[Any] = (
{"feature-extraction": CvtModel, "image-classification": CvtForImageClassification}
if is_torch_available()
else {}
)
lowercase__ : Optional[Any] = False
lowercase__ : Union[str, Any] = False
lowercase__ : List[Any] = False
lowercase__ : Dict = False
lowercase__ : List[Any] = False
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
lowerCAmelCase__ = CvtModelTester(self )
lowerCAmelCase__ = ConfigTester(self , config_class=lowerCamelCase_ , has_text_modality=lowerCamelCase_ , hidden_size=37 )
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
return
@unittest.skip(reason='''Cvt does not output attentions''' )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
pass
@unittest.skip(reason='''Cvt does not use inputs_embeds''' )
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
pass
@unittest.skip(reason='''Cvt does not support input and output embeddings''' )
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
pass
def __SCREAMING_SNAKE_CASE ( self ) -> int:
lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ = model_class(lowerCamelCase_ )
lowerCAmelCase__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase__ = [*signature.parameters.keys()]
lowerCAmelCase__ = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self ) -> int:
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
def check_hidden_states_output(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
lowerCAmelCase__ = model_class(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
with torch.no_grad():
lowerCAmelCase__ = model(**self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) )
lowerCAmelCase__ = outputs.hidden_states
lowerCAmelCase__ = len(self.model_tester.depth )
self.assertEqual(len(lowerCamelCase_ ) , lowerCamelCase_ )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.embed_dim[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ = True
check_hidden_states_output(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCAmelCase__ = True
check_hidden_states_output(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase_ )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
pass
@slow
def __SCREAMING_SNAKE_CASE ( self ) -> int:
for model_name in CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase__ = CvtModel.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
def _snake_case ( ) -> List[Any]:
lowerCAmelCase__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class a__ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
return AutoImageProcessor.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def __SCREAMING_SNAKE_CASE ( self ) -> str:
lowerCAmelCase__ = CvtForImageClassification.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(lowerCamelCase_ )
lowerCAmelCase__ = self.default_image_processor
lowerCAmelCase__ = prepare_img()
lowerCAmelCase__ = image_processor(images=lowerCamelCase_ , return_tensors='''pt''' ).to(lowerCamelCase_ )
# forward pass
with torch.no_grad():
lowerCAmelCase__ = model(**lowerCamelCase_ )
# verify the logits
lowerCAmelCase__ = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , lowerCamelCase_ )
lowerCAmelCase__ = torch.tensor([0.9_285, 0.9_015, -0.3_150] ).to(lowerCamelCase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCamelCase_ , atol=1e-4 ) )
| 98
|
'''simple docstring'''
from manim import *
class a__ ( a__ ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
lowerCAmelCase__ = Rectangle(height=0.5 , width=0.5 )
lowerCAmelCase__ = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
lowerCAmelCase__ = [mem.copy() for i in range(6 )]
lowerCAmelCase__ = [mem.copy() for i in range(6 )]
lowerCAmelCase__ = VGroup(*lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0 )
lowerCAmelCase__ = VGroup(*lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0 )
lowerCAmelCase__ = VGroup(lowerCamelCase_ , lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0 )
lowerCAmelCase__ = Text('''CPU''' , font_size=24 )
lowerCAmelCase__ = Group(lowerCamelCase_ , lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0.5 , aligned_edge=lowerCamelCase_ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(lowerCamelCase_ )
lowerCAmelCase__ = [mem.copy() for i in range(1 )]
lowerCAmelCase__ = VGroup(*lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0 )
lowerCAmelCase__ = Text('''GPU''' , font_size=24 )
lowerCAmelCase__ = Group(lowerCamelCase_ , lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0.5 , aligned_edge=lowerCamelCase_ )
gpu.align_to(lowerCamelCase_ , lowerCamelCase_ )
gpu.set_x(gpu.get_x() - 1 )
self.add(lowerCamelCase_ )
lowerCAmelCase__ = [mem.copy() for i in range(6 )]
lowerCAmelCase__ = VGroup(*lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0 )
lowerCAmelCase__ = Text('''Model''' , font_size=24 )
lowerCAmelCase__ = Group(lowerCamelCase_ , lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0.5 , aligned_edge=lowerCamelCase_ )
model.move_to([3, -1.0, 0] )
self.play(
Create(lowerCamelCase_ , run_time=1 ) , Create(lowerCamelCase_ , run_time=1 ) , Create(lowerCamelCase_ , run_time=1 ) , )
lowerCAmelCase__ = MarkupText(
F"""First, an empty model skeleton is loaded\ninto <span fgcolor='{YELLOW}'>memory</span> without using much RAM.""" , font_size=24 , )
lowerCAmelCase__ = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
lowerCAmelCase__ = MarkupText(
F"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
step_a.move_to([2, 2, 0] )
self.play(Write(lowerCamelCase_ , run_time=2.5 ) , Write(lowerCamelCase_ ) , Write(lowerCamelCase_ ) )
self.add(lowerCamelCase_ )
lowerCAmelCase__ = []
lowerCAmelCase__ = []
lowerCAmelCase__ = []
for i, rect in enumerate(lowerCamelCase_ ):
lowerCAmelCase__ = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(lowerCamelCase_ , opacity=0.7 )
cpu_target.move_to(lowerCamelCase_ )
cpu_target.generate_target()
lowerCAmelCase__ = 0.46 / 4
lowerCAmelCase__ = 0.46 / 3
if i == 0:
cpu_target.target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=lowerCamelCase_ )
cpu_target.target.set_x(cpu_target.target.get_x() + 0.1 )
elif i == 3:
cpu_target.target.next_to(cpu_targs[0].target , direction=lowerCamelCase_ , buff=0.0 )
else:
cpu_target.target.next_to(cpu_targs[i - 1].target , direction=lowerCamelCase_ , buff=0.0 )
cpu_targs.append(lowerCamelCase_ )
first_animations.append(rect.animate(run_time=0.5 ).set_stroke(lowerCamelCase_ ) )
second_animations.append(MoveToTarget(lowerCamelCase_ , run_time=1.5 ) )
self.play(*lowerCamelCase_ )
self.play(*lowerCamelCase_ )
self.wait()
| 98
| 1
|
import os
import tempfile
import unittest
from transformers import FlaubertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FlaubertForMultipleChoice,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertModel,
FlaubertWithLMHeadModel,
)
from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class snake_case ( UpperCamelCase_ ):
def __init__( self : str , a_ : Optional[Any] , a_ : Optional[Any]=13 , a_ : List[Any]=7 , a_ : Optional[int]=True , a_ : Any=True , a_ : Dict=True , a_ : Tuple=True , a_ : str=True , a_ : Tuple=False , a_ : Optional[int]=False , a_ : Union[str, Any]=False , a_ : int=2 , a_ : Dict=99 , a_ : Union[str, Any]=0 , a_ : str=32 , a_ : Union[str, Any]=5 , a_ : Any=4 , a_ : int=0.1 , a_ : Any=0.1 , a_ : List[str]=512 , a_ : Optional[int]=12 , a_ : Union[str, Any]=2 , a_ : List[Any]=0.02 , a_ : Tuple=3 , a_ : int=4 , a_ : Optional[int]="last" , a_ : List[str]=None , a_ : List[Any]=None , )-> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = parent
SCREAMING_SNAKE_CASE__ : Optional[int] = batch_size
SCREAMING_SNAKE_CASE__ : Optional[int] = seq_length
SCREAMING_SNAKE_CASE__ : Optional[Any] = is_training
SCREAMING_SNAKE_CASE__ : str = use_input_lengths
SCREAMING_SNAKE_CASE__ : Optional[int] = use_token_type_ids
SCREAMING_SNAKE_CASE__ : Union[str, Any] = use_labels
SCREAMING_SNAKE_CASE__ : List[str] = gelu_activation
SCREAMING_SNAKE_CASE__ : str = sinusoidal_embeddings
SCREAMING_SNAKE_CASE__ : Any = causal
SCREAMING_SNAKE_CASE__ : Optional[Any] = asm
SCREAMING_SNAKE_CASE__ : Union[str, Any] = n_langs
SCREAMING_SNAKE_CASE__ : Dict = vocab_size
SCREAMING_SNAKE_CASE__ : Union[str, Any] = n_special
SCREAMING_SNAKE_CASE__ : Any = hidden_size
SCREAMING_SNAKE_CASE__ : Optional[Any] = num_hidden_layers
SCREAMING_SNAKE_CASE__ : Dict = num_attention_heads
SCREAMING_SNAKE_CASE__ : List[str] = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : Optional[Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : int = max_position_embeddings
SCREAMING_SNAKE_CASE__ : List[Any] = type_vocab_size
SCREAMING_SNAKE_CASE__ : str = type_sequence_label_size
SCREAMING_SNAKE_CASE__ : Dict = initializer_range
SCREAMING_SNAKE_CASE__ : Dict = num_labels
SCREAMING_SNAKE_CASE__ : List[Any] = num_choices
SCREAMING_SNAKE_CASE__ : List[Any] = summary_type
SCREAMING_SNAKE_CASE__ : Optional[Any] = use_proj
SCREAMING_SNAKE_CASE__ : List[str] = scope
def __lowercase( self : Optional[int] )-> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE__ : str = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE__ : Dict = None
if self.use_input_lengths:
SCREAMING_SNAKE_CASE__ : Optional[Any] = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
SCREAMING_SNAKE_CASE__ : Tuple = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE__ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
SCREAMING_SNAKE_CASE__ : Optional[Any] = None
SCREAMING_SNAKE_CASE__ : Optional[int] = None
SCREAMING_SNAKE_CASE__ : List[Any] = None
if self.use_labels:
SCREAMING_SNAKE_CASE__ : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE__ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE__ : List[str] = ids_tensor([self.batch_size] , 2 ).float()
SCREAMING_SNAKE_CASE__ : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE__ : Dict = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def __lowercase( self : int )-> Any:
"""simple docstring"""
return FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , )
def __lowercase( self : List[Any] , a_ : Union[str, Any] , a_ : Any , a_ : Union[str, Any] , a_ : Any , a_ : Union[str, Any] , a_ : List[str] , a_ : List[Any] , a_ : int , a_ : Any , )-> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = FlaubertModel(config=a_ )
model.to(a_ )
model.eval()
SCREAMING_SNAKE_CASE__ : List[Any] = model(a_ , lengths=a_ , langs=a_ )
SCREAMING_SNAKE_CASE__ : int = model(a_ , langs=a_ )
SCREAMING_SNAKE_CASE__ : Tuple = model(a_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowercase( self : str , a_ : str , a_ : int , a_ : Union[str, Any] , a_ : List[Any] , a_ : Union[str, Any] , a_ : Any , a_ : int , a_ : Tuple , a_ : str , )-> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = FlaubertWithLMHeadModel(a_ )
model.to(a_ )
model.eval()
SCREAMING_SNAKE_CASE__ : Optional[int] = model(a_ , token_type_ids=a_ , labels=a_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowercase( self : Any , a_ : Optional[Any] , a_ : Union[str, Any] , a_ : str , a_ : int , a_ : Union[str, Any] , a_ : List[Any] , a_ : Any , a_ : Any , a_ : Union[str, Any] , )-> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = FlaubertForQuestionAnsweringSimple(a_ )
model.to(a_ )
model.eval()
SCREAMING_SNAKE_CASE__ : Dict = model(a_ )
SCREAMING_SNAKE_CASE__ : List[Any] = model(a_ , start_positions=a_ , end_positions=a_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __lowercase( self : Tuple , a_ : Tuple , a_ : int , a_ : Optional[Any] , a_ : Optional[int] , a_ : Tuple , a_ : List[Any] , a_ : int , a_ : Optional[int] , a_ : int , )-> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = FlaubertForQuestionAnswering(a_ )
model.to(a_ )
model.eval()
SCREAMING_SNAKE_CASE__ : Optional[int] = model(a_ )
SCREAMING_SNAKE_CASE__ : Tuple = model(
a_ , start_positions=a_ , end_positions=a_ , cls_index=a_ , is_impossible=a_ , p_mask=a_ , )
SCREAMING_SNAKE_CASE__ : List[Any] = model(
a_ , start_positions=a_ , end_positions=a_ , cls_index=a_ , is_impossible=a_ , )
((SCREAMING_SNAKE_CASE__) , ) : Optional[Any] = result_with_labels.to_tuple()
SCREAMING_SNAKE_CASE__ : Optional[int] = model(a_ , start_positions=a_ , end_positions=a_ )
((SCREAMING_SNAKE_CASE__) , ) : int = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def __lowercase( self : Dict , a_ : int , a_ : Optional[int] , a_ : Tuple , a_ : Dict , a_ : Any , a_ : List[str] , a_ : str , a_ : List[Any] , a_ : int , )-> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = FlaubertForSequenceClassification(a_ )
model.to(a_ )
model.eval()
SCREAMING_SNAKE_CASE__ : Optional[int] = model(a_ )
SCREAMING_SNAKE_CASE__ : List[str] = model(a_ , labels=a_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __lowercase( self : List[str] , a_ : Tuple , a_ : str , a_ : Optional[Any] , a_ : List[Any] , a_ : Tuple , a_ : List[Any] , a_ : List[Any] , a_ : Optional[int] , a_ : List[str] , )-> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = self.num_labels
SCREAMING_SNAKE_CASE__ : Any = FlaubertForTokenClassification(a_ )
model.to(a_ )
model.eval()
SCREAMING_SNAKE_CASE__ : List[str] = model(a_ , attention_mask=a_ , labels=a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __lowercase( self : Optional[int] , a_ : Union[str, Any] , a_ : str , a_ : List[str] , a_ : Any , a_ : Any , a_ : Any , a_ : List[Any] , a_ : Optional[Any] , a_ : Any , )-> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = self.num_choices
SCREAMING_SNAKE_CASE__ : Union[str, Any] = FlaubertForMultipleChoice(config=a_ )
model.to(a_ )
model.eval()
SCREAMING_SNAKE_CASE__ : int = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE__ : Optional[Any] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE__ : Optional[int] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE__ : Dict = model(
a_ , attention_mask=a_ , token_type_ids=a_ , labels=a_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __lowercase( self : Optional[Any] )-> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) ,
) : Dict = config_and_inputs
SCREAMING_SNAKE_CASE__ : Optional[int] = {
'input_ids': input_ids,
'token_type_ids': token_type_ids,
'lengths': input_lengths,
'attention_mask': input_mask,
}
return config, inputs_dict
@require_torch
class snake_case ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
lowercase_ = (
(
FlaubertModel,
FlaubertWithLMHeadModel,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertForMultipleChoice,
)
if is_torch_available()
else ()
)
lowercase_ = (
{
'feature-extraction': FlaubertModel,
'fill-mask': FlaubertWithLMHeadModel,
'question-answering': FlaubertForQuestionAnsweringSimple,
'text-classification': FlaubertForSequenceClassification,
'token-classification': FlaubertForTokenClassification,
'zero-shot': FlaubertForSequenceClassification,
}
if is_torch_available()
else {}
)
def __lowercase( self : Union[str, Any] , a_ : str , a_ : List[Any] , a_ : int , a_ : str , a_ : Union[str, Any] )-> int:
"""simple docstring"""
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('Fast' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def __lowercase( self : Optional[Any] , a_ : List[Any] , a_ : int , a_ : List[str]=False )-> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = super()._prepare_for_class(a_ , a_ , return_labels=a_ )
if return_labels:
if model_class.__name__ == "FlaubertForQuestionAnswering":
SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=a_ )
SCREAMING_SNAKE_CASE__ : List[Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=a_ )
return inputs_dict
def __lowercase( self : List[str] )-> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = FlaubertModelTester(self )
SCREAMING_SNAKE_CASE__ : Optional[int] = ConfigTester(self , config_class=a_ , emb_dim=37 )
def __lowercase( self : Dict )-> Any:
"""simple docstring"""
self.config_tester.run_common_tests()
def __lowercase( self : Tuple )-> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*a_ )
def __lowercase( self : Optional[Any] )-> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*a_ )
def __lowercase( self : Union[str, Any] )-> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_simple_qa(*a_ )
def __lowercase( self : Union[str, Any] )-> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*a_ )
def __lowercase( self : List[Any] )-> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*a_ )
def __lowercase( self : Dict )-> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_token_classif(*a_ )
def __lowercase( self : Tuple )-> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_multiple_choice(*a_ )
@slow
def __lowercase( self : int )-> List[Any]:
"""simple docstring"""
for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE__ : Tuple = FlaubertModel.from_pretrained(a_ )
self.assertIsNotNone(a_ )
@slow
@require_torch_gpu
def __lowercase( self : str )-> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# FlauBertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == FlaubertForMultipleChoice:
return
SCREAMING_SNAKE_CASE__ : Tuple = True
SCREAMING_SNAKE_CASE__ : Any = model_class(config=a_ )
SCREAMING_SNAKE_CASE__ : int = self._prepare_for_class(a_ , a_ )
SCREAMING_SNAKE_CASE__ : str = torch.jit.trace(
a_ , (inputs_dict['input_ids'].to('cpu' ), inputs_dict['attention_mask'].to('cpu' )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(a_ , os.path.join(a_ , 'traced_model.pt' ) )
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.jit.load(os.path.join(a_ , 'traced_model.pt' ) , map_location=a_ )
loaded(inputs_dict['input_ids'].to(a_ ) , inputs_dict['attention_mask'].to(a_ ) )
@require_torch
class snake_case ( unittest.TestCase ):
@slow
def __lowercase( self : Union[str, Any] )-> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = FlaubertModel.from_pretrained('flaubert/flaubert_base_cased' )
SCREAMING_SNAKE_CASE__ : List[str] = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Optional[Any] = model(a_ )[0]
SCREAMING_SNAKE_CASE__ : Dict = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , a_ )
SCREAMING_SNAKE_CASE__ : str = torch.tensor(
[[[-2.6251, -1.4298, -0.0227], [-2.8510, -1.6387, 0.2258], [-2.8114, -1.1832, -0.3066]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , a_ , atol=1e-4 ) )
| 85
|
'''simple docstring'''
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
UpperCamelCase ="\nHugging Face was founded in 2016 by French entrepreneurs Clément Delangue, Julien Chaumond, and Thomas Wolf originally as a company that developed a chatbot app targeted at teenagers.[2] After open-sourcing the model behind the chatbot, the company pivoted to focus on being a platform for machine learning.\n\nIn March 2021, Hugging Face raised $40 million in a Series B funding round.[3]\n\nOn April 28, 2021, the company launched the BigScience Research Workshop in collaboration with several other research groups to release an open large language model.[4] In 2022, the workshop concluded with the announcement of BLOOM, a multilingual large language model with 176 billion parameters.[5]\n"
class A ( unittest.TestCase, SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def _UpperCAmelCase ( self ):
UpperCamelCase_ : Optional[Any] = load_tool("""text-question-answering""" )
self.tool.setup()
UpperCamelCase_ : List[str] = load_tool("""text-question-answering""" , remote=__lowerCAmelCase )
def _UpperCAmelCase ( self ):
UpperCamelCase_ : Union[str, Any] = self.tool(__lowerCAmelCase , """What did Hugging Face do in April 2021?""" )
self.assertEqual(__lowerCAmelCase , """launched the BigScience Research Workshop""" )
def _UpperCAmelCase ( self ):
UpperCamelCase_ : Optional[Any] = self.remote_tool(__lowerCAmelCase , """What did Hugging Face do in April 2021?""" )
self.assertEqual(__lowerCAmelCase , """launched the BigScience Research Workshop""" )
def _UpperCAmelCase ( self ):
UpperCamelCase_ : List[Any] = self.tool(text=__lowerCAmelCase , question="""What did Hugging Face do in April 2021?""" )
self.assertEqual(__lowerCAmelCase , """launched the BigScience Research Workshop""" )
def _UpperCAmelCase ( self ):
UpperCamelCase_ : Dict = self.remote_tool(text=__lowerCAmelCase , question="""What did Hugging Face do in April 2021?""" )
self.assertEqual(__lowerCAmelCase , """launched the BigScience Research Workshop""" )
| 208
| 0
|
from typing import Union
import fire
import torch
from tqdm import tqdm
def _snake_case ( __snake_case , __snake_case = "cpu" , __snake_case = None ) -> Any:
'''simple docstring'''
UpperCAmelCase_ : Dict = torch.load(lowercase__ , map_location=lowercase__ )
for k, v in tqdm(state_dict.items() ):
if not isinstance(lowercase__ , torch.Tensor ):
raise TypeError("FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin" )
UpperCAmelCase_ : Dict = v.half()
if save_path is None: # overwrite src_path
UpperCAmelCase_ : List[Any] = src_path
torch.save(lowercase__ , lowercase__ )
if __name__ == "__main__":
fire.Fire(convert)
| 702
|
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class snake_case_ (lowercase__ ):
"""simple docstring"""
_lowerCamelCase = """ClapFeatureExtractor"""
_lowerCamelCase = ("""RobertaTokenizer""", """RobertaTokenizerFast""")
def __init__( self ,lowercase ,lowercase):
"""simple docstring"""
super().__init__(lowercase ,lowercase)
def __call__( self ,lowercase=None ,lowercase=None ,lowercase=None ,**lowercase):
"""simple docstring"""
UpperCAmelCase_ : Dict = kwargs.pop("sampling_rate" ,lowercase)
if text is None and audios is None:
raise ValueError("You have to specify either text or audios. Both cannot be none.")
if text is not None:
UpperCAmelCase_ : List[str] = self.tokenizer(lowercase ,return_tensors=lowercase ,**lowercase)
if audios is not None:
UpperCAmelCase_ : str = self.feature_extractor(
lowercase ,sampling_rate=lowercase ,return_tensors=lowercase ,**lowercase)
if text is not None and audios is not None:
UpperCAmelCase_ : Optional[int] = audio_features.input_features
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**lowercase) ,tensor_type=lowercase)
def A_ ( self ,*lowercase ,**lowercase):
"""simple docstring"""
return self.tokenizer.batch_decode(*lowercase ,**lowercase)
def A_ ( self ,*lowercase ,**lowercase):
"""simple docstring"""
return self.tokenizer.decode(*lowercase ,**lowercase)
@property
def A_ ( self):
"""simple docstring"""
UpperCAmelCase_ : str = self.tokenizer.model_input_names
UpperCAmelCase_ : str = self.feature_extractor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + feature_extractor_input_names))
| 455
| 0
|
import logging
import numpy as np
import pytest
from scipy.linalg import eigh
logging.basicConfig(level=logging.INFO, format="%(message)s")
def lowerCamelCase__ ( __lowerCamelCase : int ):
return input_array.reshape((input_array.size, 1) )
def lowerCamelCase__ ( __lowerCamelCase : List[str] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Union[str, Any] ):
__UpperCAmelCase : Dict = np.nan
for i in range(__lowerCamelCase ):
__UpperCAmelCase : str = features[:, labels == i]
__UpperCAmelCase : int = data.mean(1 )
# Centralize the data of class i
__UpperCAmelCase : List[Any] = data - column_reshape(__lowerCamelCase )
if i > 0:
# If covariance_sum is not None
covariance_sum += np.dot(__lowerCamelCase , centered_data.T )
else:
# If covariance_sum is np.nan (i.e. first loop)
__UpperCAmelCase : Union[str, Any] = np.dot(__lowerCamelCase , centered_data.T )
return covariance_sum / features.shape[1]
def lowerCamelCase__ ( __lowerCamelCase : List[str] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Dict ):
__UpperCAmelCase : Any = features.mean(1 )
__UpperCAmelCase : Optional[int] = np.nan
for i in range(__lowerCamelCase ):
__UpperCAmelCase : Any = features[:, labels == i]
__UpperCAmelCase : List[str] = data.shape[1]
__UpperCAmelCase : str = data.mean(1 )
if i > 0:
# If covariance_sum is not None
covariance_sum += device_data * np.dot(
column_reshape(__lowerCamelCase ) - column_reshape(__lowerCamelCase ) , (column_reshape(__lowerCamelCase ) - column_reshape(__lowerCamelCase )).T , )
else:
# If covariance_sum is np.nan (i.e. first loop)
__UpperCAmelCase : Union[str, Any] = device_data * np.dot(
column_reshape(__lowerCamelCase ) - column_reshape(__lowerCamelCase ) , (column_reshape(__lowerCamelCase ) - column_reshape(__lowerCamelCase )).T , )
return covariance_sum / features.shape[1]
def lowerCamelCase__ ( __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[str] ):
if features.any():
__UpperCAmelCase : Tuple = features.mean(1 )
# Center the dataset
__UpperCAmelCase : Any = features - np.reshape(__lowerCamelCase , (data_mean.size, 1) )
__UpperCAmelCase : int = np.dot(__lowerCamelCase , centered_data.T ) / features.shape[1]
__UpperCAmelCase : Tuple = np.linalg.eigh(__lowerCamelCase )
# Take all the columns in the reverse order (-1), and then takes only the first
__UpperCAmelCase : str = eigenvectors[:, ::-1][:, 0:dimensions]
# Project the database on the new space
__UpperCAmelCase : Optional[Any] = np.dot(filtered_eigenvectors.T , __lowerCamelCase )
logging.info("""Principal Component Analysis computed""" )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format="""%(message)s""" , force=__lowerCamelCase )
logging.error("""Dataset empty""" )
raise AssertionError
def lowerCamelCase__ ( __lowerCamelCase : str , __lowerCamelCase : Dict , __lowerCamelCase : int , __lowerCamelCase : str ):
assert classes > dimensions
# Check if features have been already loaded
if features.any:
__UpperCAmelCase : Optional[int] = eigh(
covariance_between_classes(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) , covariance_within_classes(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) , )
__UpperCAmelCase : Optional[int] = eigenvectors[:, ::-1][:, :dimensions]
__UpperCAmelCase : Tuple = np.linalg.svd(__lowerCamelCase )
__UpperCAmelCase : Optional[int] = svd_matrix[:, 0:dimensions]
__UpperCAmelCase : Dict = np.dot(filtered_svd_matrix.T , __lowerCamelCase )
logging.info("""Linear Discriminant Analysis computed""" )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format="""%(message)s""" , force=__lowerCamelCase )
logging.error("""Dataset empty""" )
raise AssertionError
def lowerCamelCase__ ( ):
__UpperCAmelCase : Optional[Any] = np.array([[1, 2, 3, 4, 5], [2, 3, 4, 5, 6], [3, 4, 5, 6, 7]] )
__UpperCAmelCase : int = np.array([0, 0, 0, 1, 1] )
__UpperCAmelCase : str = 2
__UpperCAmelCase : Union[str, Any] = 2
# Assert that the function raises an AssertionError if dimensions > classes
with pytest.raises(__lowerCamelCase ) as error_info:
__UpperCAmelCase : Union[str, Any] = linear_discriminant_analysis(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
if isinstance(__lowerCamelCase , np.ndarray ):
raise AssertionError(
"""Did not raise AssertionError for dimensions > classes""" )
assert error_info.type is AssertionError
def lowerCamelCase__ ( ):
__UpperCAmelCase : List[Any] = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]] )
__UpperCAmelCase : int = 2
__UpperCAmelCase : List[Any] = np.array([[6.9_2_8_2_0_3_2_3, 8.6_6_0_2_5_4_0_4, 1_0.3_9_2_3_0_4_8_5], [3.0, 3.0, 3.0]] )
with pytest.raises(__lowerCamelCase ) as error_info:
__UpperCAmelCase : Dict = principal_component_analysis(__lowerCamelCase , __lowerCamelCase )
if not np.allclose(__lowerCamelCase , __lowerCamelCase ):
raise AssertionError
assert error_info.type is AssertionError
if __name__ == "__main__":
import doctest
doctest.testmod()
| 63
|
"""simple docstring"""
def lowercase__(A ) ->str:
"""simple docstring"""
if isinstance(A , A ):
raise TypeError("'float' object cannot be interpreted as an integer" )
if isinstance(A , A ):
raise TypeError("'str' object cannot be interpreted as an integer" )
if num == 0:
return "0b0"
lowercase__ : Tuple= False
if num < 0:
lowercase__ : str= True
lowercase__ : Dict= -num
lowercase__ : list[int]= []
while num > 0:
binary.insert(0 , num % 2 )
num >>= 1
if negative:
return "-0b" + "".join(str(A ) for e in binary )
return "0b" + "".join(str(A ) for e in binary )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 218
| 0
|
'''simple docstring'''
import os
import tempfile
import unittest
from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter
from transformers.testing_utils import slow
from transformers.utils import cached_property
@unittest.skipUnless(os.path.exists(__lowerCamelCase ) , 'Tatoeba directory does not exist.' )
class snake_case ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def lowercase__ ( self ) -> str:
"""simple docstring"""
snake_case__ : str = tempfile.mkdtemp()
return TatoebaConverter(save_dir=lowerCamelCase )
@slow
def lowercase__ ( self ) -> Any:
"""simple docstring"""
self.resolver.convert_models(['''heb-eng'''] )
@slow
def lowercase__ ( self ) -> Tuple:
"""simple docstring"""
snake_case__ ,snake_case__ : Union[str, Any] = self.resolver.write_model_card('''opus-mt-he-en''' , dry_run=lowerCamelCase )
assert mmeta["long_pair"] == "heb-eng"
| 694
|
'''simple docstring'''
from typing import Callable, Dict, Optional, Tuple
import torch
from torch import nn
from torch.distributions import (
AffineTransform,
Distribution,
Independent,
NegativeBinomial,
Normal,
StudentT,
TransformedDistribution,
)
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
def __init__( self , lowerCamelCase , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=0 ) -> Tuple:
"""simple docstring"""
snake_case__ : Optional[Any] = 1.0 if scale is None else scale
snake_case__ : Dict = 0.0 if loc is None else loc
super().__init__(lowerCamelCase , [AffineTransform(loc=self.loc , scale=self.scale , event_dim=lowerCamelCase )] )
@property
def lowercase__ ( self ) -> Dict:
"""simple docstring"""
return self.base_dist.mean * self.scale + self.loc
@property
def lowercase__ ( self ) -> Optional[Any]:
"""simple docstring"""
return self.base_dist.variance * self.scale**2
@property
def lowercase__ ( self ) -> List[str]:
"""simple docstring"""
return self.variance.sqrt()
class snake_case ( nn.Module ):
"""simple docstring"""
def __init__( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , **lowerCamelCase ) -> None:
"""simple docstring"""
super().__init__(**lowerCamelCase )
snake_case__ : Tuple = args_dim
snake_case__ : str = nn.ModuleList([nn.Linear(lowerCamelCase , lowerCamelCase ) for dim in args_dim.values()] )
snake_case__ : Optional[int] = domain_map
def lowercase__ ( self , lowerCamelCase ) -> Tuple[torch.Tensor]:
"""simple docstring"""
snake_case__ : Any = [proj(lowerCamelCase ) for proj in self.proj]
return self.domain_map(*lowerCamelCase )
class snake_case ( nn.Module ):
"""simple docstring"""
def __init__( self , lowerCamelCase ) -> Union[str, Any]:
"""simple docstring"""
super().__init__()
snake_case__ : Tuple = function
def lowercase__ ( self , lowerCamelCase , *lowerCamelCase ) -> Union[str, Any]:
"""simple docstring"""
return self.function(lowerCamelCase , *lowerCamelCase )
class snake_case :
"""simple docstring"""
_lowerCAmelCase = 42
_lowerCAmelCase = 42
_lowerCAmelCase = 42
def __init__( self , lowerCamelCase = 1 ) -> None:
"""simple docstring"""
snake_case__ : Optional[Any] = dim
snake_case__ : Tuple = {k: dim * self.args_dim[k] for k in self.args_dim}
def lowercase__ ( self , lowerCamelCase ) -> int:
"""simple docstring"""
if self.dim == 1:
return self.distribution_class(*lowerCamelCase )
else:
return Independent(self.distribution_class(*lowerCamelCase ) , 1 )
def lowercase__ ( self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = None , ) -> Distribution:
"""simple docstring"""
snake_case__ : List[Any] = self._base_distribution(lowerCamelCase )
if loc is None and scale is None:
return distr
else:
return AffineTransformed(lowerCamelCase , loc=lowerCamelCase , scale=lowerCamelCase , event_dim=self.event_dim )
@property
def lowercase__ ( self ) -> Tuple:
"""simple docstring"""
return () if self.dim == 1 else (self.dim,)
@property
def lowercase__ ( self ) -> int:
"""simple docstring"""
return len(self.event_shape )
@property
def lowercase__ ( self ) -> float:
"""simple docstring"""
return 0.0
def lowercase__ ( self , lowerCamelCase ) -> nn.Module:
"""simple docstring"""
return ParameterProjection(
in_features=lowerCamelCase , args_dim=self.args_dim , domain_map=LambdaLayer(self.domain_map ) , )
def lowercase__ ( self , *lowerCamelCase ) -> Any:
"""simple docstring"""
raise NotImplementedError()
@staticmethod
def lowercase__ ( lowerCamelCase ) -> torch.Tensor:
"""simple docstring"""
return (x + torch.sqrt(torch.square(lowerCamelCase ) + 4.0 )) / 2.0
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
_lowerCAmelCase = {"df": 1, "loc": 1, "scale": 1}
_lowerCAmelCase = StudentT
@classmethod
def lowercase__ ( cls , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> int:
"""simple docstring"""
snake_case__ : Tuple = cls.squareplus(lowerCamelCase ).clamp_min(torch.finfo(scale.dtype ).eps )
snake_case__ : Optional[int] = 2.0 + cls.squareplus(lowerCamelCase )
return df.squeeze(-1 ), loc.squeeze(-1 ), scale.squeeze(-1 )
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
_lowerCAmelCase = {"loc": 1, "scale": 1}
_lowerCAmelCase = Normal
@classmethod
def lowercase__ ( cls , lowerCamelCase , lowerCamelCase ) -> Optional[int]:
"""simple docstring"""
snake_case__ : List[str] = cls.squareplus(lowerCamelCase ).clamp_min(torch.finfo(scale.dtype ).eps )
return loc.squeeze(-1 ), scale.squeeze(-1 )
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
_lowerCAmelCase = {"total_count": 1, "logits": 1}
_lowerCAmelCase = NegativeBinomial
@classmethod
def lowercase__ ( cls , lowerCamelCase , lowerCamelCase ) -> Dict:
"""simple docstring"""
snake_case__ : List[str] = cls.squareplus(lowerCamelCase )
return total_count.squeeze(-1 ), logits.squeeze(-1 )
def lowercase__ ( self , lowerCamelCase ) -> Distribution:
"""simple docstring"""
snake_case__ ,snake_case__ : str = distr_args
if self.dim == 1:
return self.distribution_class(total_count=lowerCamelCase , logits=lowerCamelCase )
else:
return Independent(self.distribution_class(total_count=lowerCamelCase , logits=lowerCamelCase ) , 1 )
def lowercase__ ( self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = None ) -> Distribution:
"""simple docstring"""
snake_case__ ,snake_case__ : Optional[Any] = distr_args
if scale is not None:
# See scaling property of Gamma.
logits += scale.log()
return self._base_distribution((total_count, logits) )
| 694
| 1
|
'''simple docstring'''
import argparse
import json
import os
from collections import OrderedDict
import numpy as np
import tensorflow as tf
import torch
def snake_case_ (UpperCamelCase : Tuple ):
'''simple docstring'''
_a = os.path.join(args.tf_model_dir , '''parameters.json''' )
_a = json.loads(open(UpperCamelCase ).read() )
if not params:
raise ValueError(
f'It seems that the json file at {parameter_file} is empty. Make sure you have a correct json file.' )
if not args.output.endswith('''.pt''' ):
_a = args.output + '''.pt'''
_a = OrderedDict()
with tf.device('''/CPU:0''' ):
_a = tf.train.load_checkpoint(args.tf_model_dir )
_a = reader.get_variable_to_shape_map()
for key_name in shapes.keys():
_a = reader.get_tensor(UpperCamelCase ).astype(np.floataa )
if key_name.endswith('''/adam_m''' ) or key_name.endswith('''/adam_v''' ):
continue
if key_name.startswith('''pasts/''' ):
if key_name.startswith('''pasts/mlp''' ):
_a = int(key_name[9] )
elif key_name.startswith('''pasts/out''' ):
_a = 8
_a = '''model.sqout.%d.weight''' % (player * 2) # enter to nn.Sequencial with Tanh, so 2 at a time
_a = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_a = torch.tensor(UpperCamelCase )
elif key_name.startswith('''model/moe''' ):
_a = int(key_name[9:].split('''/''' )[0] )
if key_name.endswith('''/switch_gating/kernel''' ):
_a = '''model.blocks.%d.feed_forward.mlp.router.classifier.weight''' % player
_a = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_a = torch.tensor(UpperCamelCase )
elif key_name.endswith('''/softmlp/kernel''' ):
_a = '''model.blocks.%d.feed_forward.soft_bypass_mlp.weight''' % player
_a = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_a = torch.tensor(UpperCamelCase )
elif key_name.endswith('''/wo/kernel''' ) or key_name.endswith('''/wi/kernel''' ):
_a = key_name[-9:-7]
for i in range(16 ):
_a = '''model.blocks.%d.feed_forward.mlp.experts.expert_%d.%s.weight''' % (player, i, nlayer)
_a = (
vnp[i].transpose([1, 0] ).copy()
) # In Mesh-Tensorflow, it is one array, so it is divided
_a = torch.tensor(UpperCamelCase )
elif key_name.startswith('''model/mlp''' ):
_a = int(key_name[9:].split('''/''' )[0] )
if key_name.endswith('''/p1/kernel''' ):
_a = '''model.blocks.%d.feed_forward.mlp.wi.weight''' % player
_a = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_a = torch.tensor(UpperCamelCase )
elif key_name.endswith('''/p1/bias''' ):
_a = '''model.blocks.%d.feed_forward.mlp.wi.bias''' % player
_a = vnp.copy() # same because it is one dimensional
_a = torch.tensor(UpperCamelCase )
elif key_name.endswith('''/p2/kernel''' ):
_a = '''model.blocks.%d.feed_forward.mlp.wo.weight''' % player
_a = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_a = torch.tensor(UpperCamelCase )
elif key_name.endswith('''/p2/bias''' ):
_a = '''model.blocks.%d.feed_forward.mlp.wo.bias''' % player
_a = vnp.copy() # same because it is one dimensional
_a = torch.tensor(UpperCamelCase )
elif key_name.startswith('''model/ln''' ):
_a = int(key_name[8:].split('''/''' )[0] )
if key_name.endswith('''/b''' ):
_a = '''model.blocks.%d.feed_forward.norm.bias''' % player
_a = vnp.copy() # same because it is one dimensional
_a = torch.tensor(UpperCamelCase )
elif key_name.endswith('''/g''' ):
_a = '''model.blocks.%d.feed_forward.norm.weight''' % player
_a = vnp.copy() # same because it is one dimensional
_a = torch.tensor(UpperCamelCase )
elif key_name.startswith('''model/att''' ):
_a = int(key_name[9:].split('''/''' )[0] )
if key_name.endswith('''/qkv/kernel''' ):
_a = vnp.copy() # Compute same dimension as Mesh-tensorflow using einsum
_a = state[:, 0, :, :]
_a = state[:, 1, :, :]
_a = state[:, 2, :, :]
_a = (
state_q.reshape([state_q.shape[0], state_q.shape[1] * state_q.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
_a = (
state_k.reshape([state_k.shape[0], state_k.shape[1] * state_k.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
_a = (
state_v.reshape([state_v.shape[0], state_v.shape[1] * state_v.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
_a = '''model.blocks.%d.self_attn.self_attn.q_proj.weight''' % player
_a = torch.tensor(UpperCamelCase )
_a = '''model.blocks.%d.self_attn.self_attn.k_proj.weight''' % player
_a = torch.tensor(UpperCamelCase )
_a = '''model.blocks.%d.self_attn.self_attn.v_proj.weight''' % player
_a = torch.tensor(UpperCamelCase )
elif key_name.endswith('''/o/kernel''' ):
_a = '''model.blocks.%d.self_attn.self_attn.out_proj.weight''' % player
_a = (
vnp.reshape([vnp.shape[0] * vnp.shape[1], vnp.shape[2]] ).transpose([1, 0] ).copy()
) # Mesh-Tensorflow is a diagonal matrix
_a = torch.tensor(UpperCamelCase )
elif key_name.startswith('''model/an''' ):
_a = int(key_name[8:].split('''/''' )[0] )
if key_name.endswith('''/b''' ):
_a = '''model.blocks.%d.self_attn.norm.bias''' % player
_a = vnp.copy() # same because it is one dimensional
_a = torch.tensor(UpperCamelCase )
elif key_name.endswith('''/g''' ):
_a = '''model.blocks.%d.self_attn.norm.weight''' % player
_a = vnp.copy() # same because it is one dimensional
_a = torch.tensor(UpperCamelCase )
elif (
key_name.startswith('''model/wte''' )
or key_name.startswith('''model/wpe''' )
or key_name.startswith('''model/ete''' )
):
_a = {'''wte''': '''embed_tokens''', '''wpe''': '''position_embeddings''', '''ete''': '''extra_position_embeddings'''}[
key_name[-3:]
]
_a = '''model.%s.weight''' % nlayer
_a = vnp.copy() # same in embedded
_a = torch.tensor(UpperCamelCase )
if key_name.startswith('''model/wte''' ):
_a = '''lm_head.weight'''
_a = vnp.copy() # same in embedded
_a = torch.tensor(UpperCamelCase )
elif key_name.startswith('''model/wob''' ):
_a = '''final_logits_bias'''
_a = vnp.copy() # same in embedded
_a = state.reshape((1, -1) )
_a = torch.tensor(UpperCamelCase )
elif key_name == "model/dense/kernel":
_a = '''model.last_project.weight'''
_a = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_a = torch.tensor(UpperCamelCase )
elif key_name == "model/dense_1/bias":
_a = '''model.last_project.bias'''
_a = vnp.copy() # same because it is one dimensional
_a = torch.tensor(UpperCamelCase )
torch.save(UpperCamelCase , args.output )
if __name__ == "__main__":
_snake_case : str = argparse.ArgumentParser(
description='model converter.', formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument('--tf_model_dir', metavar='PATH', type=str, required=True, help='import model')
parser.add_argument('--output', metavar='PATH', type=str, required=True, help='output model')
_snake_case : List[str] = parser.parse_args()
convert_tf_gptsan_to_pt(args)
| 22
|
def _lowerCamelCase ( __A : str ) -> list:
return [
txt[:a] + txt[a].upper() + txt[a + 1 :]
for a in range(len(__A ) )
if txt[a].isalpha()
]
if __name__ == "__main__":
__import__('doctest').testmod()
| 485
| 0
|
'''simple docstring'''
from __future__ import annotations
from collections.abc import Generator
import requests
from bsa import BeautifulSoup
snake_case_ = """https://www.indeed.co.in/jobs?q=mobile+app+development&l="""
def _lowerCamelCase( UpperCamelCase__ : str = "mumbai" ):
A : Any = BeautifulSoup(requests.get(url + location ).content , '''html.parser''' )
# This attribute finds out all the specifics listed in a job
for job in soup.find_all('''div''' , attrs={'''data-tn-component''': '''organicJob'''} ):
A : int = job.find('''a''' , attrs={'''data-tn-element''': '''jobTitle'''} ).text.strip()
A : List[str] = job.find('''span''' , {'''class''': '''company'''} ).text.strip()
yield job_title, company_name
if __name__ == "__main__":
for i, job in enumerate(fetch_jobs("""Bangalore"""), 1):
print(f'''Job {i:>2} is {job[0]} at {job[1]}''')
| 717
|
'''simple docstring'''
from math import pi
def _lowerCamelCase( UpperCamelCase__ : int , UpperCamelCase__ : int ) -> float:
return 2 * pi * radius * (angle / 360)
if __name__ == "__main__":
print(arc_length(90, 10))
| 537
| 0
|
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MgpstrProcessor, ViTImageProcessor
@require_torch
@require_vision
class __UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase_ = ViTImageProcessor if is_vision_available() else None
@property
def UpperCAmelCase__ ( self : List[str] ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase__ ( self : List[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = (3, 32, 128)
__SCREAMING_SNAKE_CASE : int = tempfile.mkdtemp()
# fmt: off
__SCREAMING_SNAKE_CASE : List[Any] = ['''[GO]''', '''[s]''', '''0''', '''1''', '''2''', '''3''', '''4''', '''5''', '''6''', '''7''', '''8''', '''9''', '''a''', '''b''', '''c''', '''d''', '''e''', '''f''', '''g''', '''h''', '''i''', '''j''', '''k''', '''l''', '''m''', '''n''', '''o''', '''p''', '''q''', '''r''', '''s''', '''t''', '''u''', '''v''', '''w''', '''x''', '''y''', '''z''']
# fmt: on
__SCREAMING_SNAKE_CASE : Optional[int] = dict(zip(_A , range(len(_A ) ) ) )
__SCREAMING_SNAKE_CASE : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_A ) + '''\n''' )
__SCREAMING_SNAKE_CASE : Optional[Any] = {
'''do_normalize''': False,
'''do_resize''': True,
'''image_processor_type''': '''ViTImageProcessor''',
'''resample''': 3,
'''size''': {'''height''': 32, '''width''': 128},
}
__SCREAMING_SNAKE_CASE : Dict = os.path.join(self.tmpdirname , _A )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(_A , _A )
def UpperCAmelCase__ ( self : Optional[Any] , **_A : Optional[Any] ):
"""simple docstring"""
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **_A )
def UpperCAmelCase__ ( self : int , **_A : List[str] ):
"""simple docstring"""
return ViTImageProcessor.from_pretrained(self.tmpdirname , **_A )
def UpperCAmelCase__ ( self : Union[str, Any] ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase__ ( self : Tuple ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[Any] = np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )
__SCREAMING_SNAKE_CASE : Dict = Image.fromarray(np.moveaxis(_A , 0 , -1 ) )
return image_input
def UpperCAmelCase__ ( self : Dict ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Any = self.get_tokenizer()
__SCREAMING_SNAKE_CASE : Optional[Any] = self.get_image_processor()
__SCREAMING_SNAKE_CASE : Tuple = MgpstrProcessor(tokenizer=_A , image_processor=_A )
processor.save_pretrained(self.tmpdirname )
__SCREAMING_SNAKE_CASE : Any = MgpstrProcessor.from_pretrained(self.tmpdirname , use_fast=_A )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , _A )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , _A )
def UpperCAmelCase__ ( self : Tuple ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = self.get_tokenizer()
__SCREAMING_SNAKE_CASE : int = self.get_image_processor()
__SCREAMING_SNAKE_CASE : str = MgpstrProcessor(tokenizer=_A , image_processor=_A )
processor.save_pretrained(self.tmpdirname )
__SCREAMING_SNAKE_CASE : int = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
__SCREAMING_SNAKE_CASE : str = self.get_image_processor(do_normalize=_A , padding_value=1.0 )
__SCREAMING_SNAKE_CASE : Optional[int] = MgpstrProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=_A , padding_value=1.0 )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , _A )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _A )
def UpperCAmelCase__ ( self : Tuple ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Tuple = self.get_image_processor()
__SCREAMING_SNAKE_CASE : List[str] = self.get_tokenizer()
__SCREAMING_SNAKE_CASE : str = MgpstrProcessor(tokenizer=_A , image_processor=_A )
__SCREAMING_SNAKE_CASE : Tuple = self.prepare_image_inputs()
__SCREAMING_SNAKE_CASE : Any = image_processor(_A , return_tensors='''np''' )
__SCREAMING_SNAKE_CASE : int = processor(images=_A , return_tensors='''np''' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def UpperCAmelCase__ ( self : Optional[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[Any] = self.get_image_processor()
__SCREAMING_SNAKE_CASE : int = self.get_tokenizer()
__SCREAMING_SNAKE_CASE : Any = MgpstrProcessor(tokenizer=_A , image_processor=_A )
__SCREAMING_SNAKE_CASE : str = '''test'''
__SCREAMING_SNAKE_CASE : Union[str, Any] = processor(text=_A )
__SCREAMING_SNAKE_CASE : Any = tokenizer(_A )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def UpperCAmelCase__ ( self : List[str] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_image_processor()
__SCREAMING_SNAKE_CASE : str = self.get_tokenizer()
__SCREAMING_SNAKE_CASE : Optional[int] = MgpstrProcessor(tokenizer=_A , image_processor=_A )
__SCREAMING_SNAKE_CASE : Optional[Any] = '''test'''
__SCREAMING_SNAKE_CASE : str = self.prepare_image_inputs()
__SCREAMING_SNAKE_CASE : str = processor(text=_A , images=_A )
self.assertListEqual(list(inputs.keys() ) , ['''pixel_values''', '''labels'''] )
# test if it raises when no input is passed
with pytest.raises(_A ):
processor()
def UpperCAmelCase__ ( self : Tuple ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = self.get_image_processor()
__SCREAMING_SNAKE_CASE : Any = self.get_tokenizer()
__SCREAMING_SNAKE_CASE : Optional[Any] = MgpstrProcessor(tokenizer=_A , image_processor=_A )
__SCREAMING_SNAKE_CASE : Dict = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]]
__SCREAMING_SNAKE_CASE : Dict = processor.char_decode(_A )
__SCREAMING_SNAKE_CASE : List[str] = tokenizer.batch_decode(_A )
__SCREAMING_SNAKE_CASE : Tuple = [seq.replace(''' ''' , '''''' ) for seq in decoded_tok]
self.assertListEqual(_A , _A )
def UpperCAmelCase__ ( self : List[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = self.get_image_processor()
__SCREAMING_SNAKE_CASE : Optional[Any] = self.get_tokenizer()
__SCREAMING_SNAKE_CASE : Optional[int] = MgpstrProcessor(tokenizer=_A , image_processor=_A )
__SCREAMING_SNAKE_CASE : List[Any] = None
__SCREAMING_SNAKE_CASE : List[Any] = self.prepare_image_inputs()
__SCREAMING_SNAKE_CASE : List[str] = processor(text=_A , images=_A )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
def UpperCAmelCase__ ( self : Any ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = self.get_image_processor()
__SCREAMING_SNAKE_CASE : List[Any] = self.get_tokenizer()
__SCREAMING_SNAKE_CASE : Tuple = MgpstrProcessor(tokenizer=_A , image_processor=_A )
__SCREAMING_SNAKE_CASE : Union[str, Any] = torch.randn(1 , 27 , 38 )
__SCREAMING_SNAKE_CASE : Union[str, Any] = torch.randn(1 , 27 , 5_0257 )
__SCREAMING_SNAKE_CASE : str = torch.randn(1 , 27 , 3_0522 )
__SCREAMING_SNAKE_CASE : str = processor.batch_decode([char_input, bpe_input, wp_input] )
self.assertListEqual(list(results.keys() ) , ['''generated_text''', '''scores''', '''char_preds''', '''bpe_preds''', '''wp_preds'''] )
| 74
|
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTConfig, DeiTForImageClassificationWithTeacher, DeiTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__snake_case : List[Any] = logging.get_logger(__name__)
def a_ ( __a , __a=False ):
A__ = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'''blocks.{i}.norm1.weight''', f'''deit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((f'''blocks.{i}.norm1.bias''', f'''deit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append((f'''blocks.{i}.attn.proj.weight''', f'''deit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((f'''blocks.{i}.attn.proj.bias''', f'''deit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((f'''blocks.{i}.norm2.weight''', f'''deit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((f'''blocks.{i}.norm2.bias''', f'''deit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((f'''blocks.{i}.mlp.fc1.weight''', f'''deit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((f'''blocks.{i}.mlp.fc1.bias''', f'''deit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((f'''blocks.{i}.mlp.fc2.weight''', f'''deit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((f'''blocks.{i}.mlp.fc2.bias''', f'''deit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
('''cls_token''', '''deit.embeddings.cls_token'''),
('''dist_token''', '''deit.embeddings.distillation_token'''),
('''patch_embed.proj.weight''', '''deit.embeddings.patch_embeddings.projection.weight'''),
('''patch_embed.proj.bias''', '''deit.embeddings.patch_embeddings.projection.bias'''),
('''pos_embed''', '''deit.embeddings.position_embeddings'''),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('''norm.weight''', '''layernorm.weight'''),
('''norm.bias''', '''layernorm.bias'''),
('''pre_logits.fc.weight''', '''pooler.dense.weight'''),
('''pre_logits.fc.bias''', '''pooler.dense.bias'''),
] )
# if just the base model, we should remove "deit" from all keys that start with "deit"
A__ = [(pair[0], pair[1][4:]) if pair[1].startswith('''deit''' ) else pair for pair in rename_keys]
else:
# layernorm + classification heads
rename_keys.extend(
[
('''norm.weight''', '''deit.layernorm.weight'''),
('''norm.bias''', '''deit.layernorm.bias'''),
('''head.weight''', '''cls_classifier.weight'''),
('''head.bias''', '''cls_classifier.bias'''),
('''head_dist.weight''', '''distillation_classifier.weight'''),
('''head_dist.bias''', '''distillation_classifier.bias'''),
] )
return rename_keys
def a_ ( __a , __a , __a=False ):
for i in range(config.num_hidden_layers ):
if base_model:
A__ = ''''''
else:
A__ = '''deit.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
A__ = state_dict.pop(f'''blocks.{i}.attn.qkv.weight''' )
A__ = state_dict.pop(f'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
A__ = in_proj_weight[
: config.hidden_size, :
]
A__ = in_proj_bias[: config.hidden_size]
A__ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
A__ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
A__ = in_proj_weight[
-config.hidden_size :, :
]
A__ = in_proj_bias[-config.hidden_size :]
def a_ ( __a , __a , __a ):
A__ = dct.pop(__a )
A__ = val
def a_ ( ):
A__ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
A__ = Image.open(requests.get(__a , stream=__a ).raw )
return im
@torch.no_grad()
def a_ ( __a , __a ):
A__ = DeiTConfig()
# all deit models have fine-tuned heads
A__ = False
# dataset (fine-tuned on ImageNet 2012), patch_size and image_size
A__ = 1000
A__ = '''huggingface/label-files'''
A__ = '''imagenet-1k-id2label.json'''
A__ = json.load(open(hf_hub_download(__a , __a , repo_type='''dataset''' ) , '''r''' ) )
A__ = {int(__a ): v for k, v in idalabel.items()}
A__ = idalabel
A__ = {v: k for k, v in idalabel.items()}
A__ = int(deit_name[-6:-4] )
A__ = int(deit_name[-3:] )
# size of the architecture
if deit_name[9:].startswith('''tiny''' ):
A__ = 192
A__ = 768
A__ = 12
A__ = 3
elif deit_name[9:].startswith('''small''' ):
A__ = 384
A__ = 1536
A__ = 12
A__ = 6
if deit_name[9:].startswith('''base''' ):
pass
elif deit_name[4:].startswith('''large''' ):
A__ = 1024
A__ = 4096
A__ = 24
A__ = 16
# load original model from timm
A__ = timm.create_model(__a , pretrained=__a )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
A__ = timm_model.state_dict()
A__ = create_rename_keys(__a , __a )
for src, dest in rename_keys:
rename_key(__a , __a , __a )
read_in_q_k_v(__a , __a , __a )
# load HuggingFace model
A__ = DeiTForImageClassificationWithTeacher(__a ).eval()
model.load_state_dict(__a )
# Check outputs on an image, prepared by DeiTImageProcessor
A__ = int(
(256 / 224) * config.image_size ) # to maintain same ratio w.r.t. 224 images, see https://github.com/facebookresearch/deit/blob/ab5715372db8c6cad5740714b2216d55aeae052e/datasets.py#L103
A__ = DeiTImageProcessor(size=__a , crop_size=config.image_size )
A__ = image_processor(images=prepare_img() , return_tensors='''pt''' )
A__ = encoding['''pixel_values''']
A__ = model(__a )
A__ = timm_model(__a )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(__a , outputs.logits , atol=1e-3 )
Path(__a ).mkdir(exist_ok=__a )
print(f'''Saving model {deit_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(__a )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(__a )
if __name__ == "__main__":
__snake_case : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--deit_name',
default='vit_deit_base_distilled_patch16_224',
type=str,
help='Name of the DeiT timm model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
__snake_case : List[Any] = parser.parse_args()
convert_deit_checkpoint(args.deit_name, args.pytorch_dump_folder_path)
| 571
| 0
|
import numpy as np
from nltk.translate import meteor_score
import datasets
from datasets.config import importlib_metadata, version
UpperCamelCase__ : Dict = version.parse(importlib_metadata.version('''nltk'''))
if NLTK_VERSION >= version.Version('''3.6.4'''):
from nltk import word_tokenize
UpperCamelCase__ : Dict = "\\n@inproceedings{banarjee2005,\n title = {{METEOR}: An Automatic Metric for {MT} Evaluation with Improved Correlation with Human Judgments},\n author = {Banerjee, Satanjeev and Lavie, Alon},\n booktitle = {Proceedings of the {ACL} Workshop on Intrinsic and Extrinsic Evaluation Measures for Machine Translation and/or Summarization},\n month = jun,\n year = {2005},\n address = {Ann Arbor, Michigan},\n publisher = {Association for Computational Linguistics},\n url = {https://www.aclweb.org/anthology/W05-0909},\n pages = {65--72},\n}\n"
UpperCamelCase__ : Optional[int] = "\\nMETEOR, an automatic metric for machine translation evaluation\nthat is based on a generalized concept of unigram matching between the\nmachine-produced translation and human-produced reference translations.\nUnigrams can be matched based on their surface forms, stemmed forms,\nand meanings; furthermore, METEOR can be easily extended to include more\nadvanced matching strategies. Once all generalized unigram matches\nbetween the two strings have been found, METEOR computes a score for\nthis matching using a combination of unigram-precision, unigram-recall, and\na measure of fragmentation that is designed to directly capture how\nwell-ordered the matched words in the machine translation are in relation\nto the reference.\n\nMETEOR gets an R correlation value of 0.347 with human evaluation on the Arabic\ndata and 0.331 on the Chinese data. This is shown to be an improvement on\nusing simply unigram-precision, unigram-recall and their harmonic F1\ncombination.\n"
UpperCamelCase__ : Tuple = "\nComputes METEOR score of translated segments against one or more references.\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n alpha: Parameter for controlling relative weights of precision and recall. default: 0.9\n beta: Parameter for controlling shape of penalty as a function of fragmentation. default: 3\n gamma: Relative weight assigned to fragmentation penalty. default: 0.5\nReturns:\n 'meteor': meteor score.\nExamples:\n\n >>> meteor = datasets.load_metric('meteor')\n >>> predictions = [\"It is a guide to action which ensures that the military always obeys the commands of the party\"]\n >>> references = [\"It is a guide to action that ensures that the military will forever heed Party commands\"]\n >>> results = meteor.compute(predictions=predictions, references=references)\n >>> print(round(results[\"meteor\"], 4))\n 0.6944\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase_ ( datasets.Metric ):
def snake_case ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'predictions': datasets.Value('string' ,id='sequence' ),
'references': datasets.Value('string' ,id='sequence' ),
} ) ,codebase_urls=['https://github.com/nltk/nltk/blob/develop/nltk/translate/meteor_score.py'] ,reference_urls=[
'https://www.nltk.org/api/nltk.translate.html#module-nltk.translate.meteor_score',
'https://en.wikipedia.org/wiki/METEOR',
] ,)
def snake_case ( self ,snake_case__ ):
import nltk
nltk.download('wordnet' )
if NLTK_VERSION >= version.Version('3.6.5' ):
nltk.download('punkt' )
if NLTK_VERSION >= version.Version('3.6.6' ):
nltk.download('omw-1.4' )
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__=0.9 ,snake_case__=3 ,snake_case__=0.5 ):
if NLTK_VERSION >= version.Version('3.6.5' ):
SCREAMING_SNAKE_CASE_ : List[Any] = [
meteor_score.single_meteor_score(
word_tokenize(lowerCamelCase_ ) ,word_tokenize(lowerCamelCase_ ) ,alpha=lowerCamelCase_ ,beta=lowerCamelCase_ ,gamma=lowerCamelCase_ )
for ref, pred in zip(lowerCamelCase_ ,lowerCamelCase_ )
]
else:
SCREAMING_SNAKE_CASE_ : Dict = [
meteor_score.single_meteor_score(lowerCamelCase_ ,lowerCamelCase_ ,alpha=lowerCamelCase_ ,beta=lowerCamelCase_ ,gamma=lowerCamelCase_ )
for ref, pred in zip(lowerCamelCase_ ,lowerCamelCase_ )
]
return {"meteor": np.mean(lowerCamelCase_ )}
| 718
|
import os
import sys
import tempfile
import torch
from .state import AcceleratorState
from .utils import PrecisionType, PrepareForLaunch, is_mps_available, patch_environment
def __UpperCAmelCase ( lowerCamelCase_ : Tuple , lowerCamelCase_ : int=() , lowerCamelCase_ : Union[str, Any]=None , lowerCamelCase_ : Optional[int]="no" , lowerCamelCase_ : Optional[Any]="29500" ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = False
SCREAMING_SNAKE_CASE_ : Optional[Any] = False
if any(key.startswith('KAGGLE' ) for key in os.environ.keys() ):
SCREAMING_SNAKE_CASE_ : str = True
elif "IPython" in sys.modules:
SCREAMING_SNAKE_CASE_ : Dict = 'google.colab' in str(sys.modules['IPython'].get_ipython() )
try:
SCREAMING_SNAKE_CASE_ : Optional[int] = PrecisionType(mixed_precision.lower() )
except ValueError:
raise ValueError(
F'Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}.' )
if (in_colab or in_kaggle) and (os.environ.get('TPU_NAME' , lowerCamelCase_ ) is not None):
# TPU launch
import torch_xla.distributed.xla_multiprocessing as xmp
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
'To train on TPU in Colab or Kaggle Kernel, the `Accelerator` should only be initialized inside '
'your training function. Restart your notebook and make sure no cells initializes an '
'`Accelerator`.' )
if num_processes is None:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 8
SCREAMING_SNAKE_CASE_ : int = PrepareForLaunch(lowerCamelCase_ , distributed_type='TPU' )
print(F'Launching a training on {num_processes} TPU cores.' )
xmp.spawn(lowerCamelCase_ , args=lowerCamelCase_ , nprocs=lowerCamelCase_ , start_method='fork' )
elif in_colab:
# No need for a distributed launch otherwise as it's either CPU or one GPU.
if torch.cuda.is_available():
print('Launching training on one GPU.' )
else:
print('Launching training on one CPU.' )
function(*lowerCamelCase_ )
else:
if num_processes is None:
raise ValueError(
'You have to specify the number of GPUs you would like to use, add `num_processes=...` to your call.' )
if num_processes > 1:
# Multi-GPU launch
from torch.multiprocessing import start_processes
from torch.multiprocessing.spawn import ProcessRaisedException
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
'To launch a multi-GPU training from your notebook, the `Accelerator` should only be initialized '
'inside your training function. Restart your notebook and make sure no cells initializes an '
'`Accelerator`.' )
if torch.cuda.is_initialized():
raise ValueError(
'To launch a multi-GPU training from your notebook, you need to avoid running any instruction '
'using `torch.cuda` in any cell. Restart your notebook and make sure no cells use any CUDA '
'function.' )
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=lowerCamelCase_ , master_addr='127.0.01' , master_port=lowerCamelCase_ , mixed_precision=lowerCamelCase_ ):
SCREAMING_SNAKE_CASE_ : int = PrepareForLaunch(lowerCamelCase_ , distributed_type='MULTI_GPU' )
print(F'Launching training on {num_processes} GPUs.' )
try:
start_processes(lowerCamelCase_ , args=lowerCamelCase_ , nprocs=lowerCamelCase_ , start_method='fork' )
except ProcessRaisedException as e:
if "Cannot re-initialize CUDA in forked subprocess" in e.args[0]:
raise RuntimeError(
'CUDA has been initialized before the `notebook_launcher` could create a forked subprocess. '
'This likely stems from an outside import causing issues once the `notebook_launcher()` is called. '
'Please review your imports and test them when running the `notebook_launcher()` to identify '
'which one is problematic.' ) from e
else:
# No need for a distributed launch otherwise as it's either CPU, GPU or MPS.
if is_mps_available():
SCREAMING_SNAKE_CASE_ : Optional[Any] = '1'
print('Launching training on MPS.' )
elif torch.cuda.is_available():
print('Launching training on one GPU.' )
else:
print('Launching training on CPU.' )
function(*lowerCamelCase_ )
def __UpperCAmelCase ( lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Tuple=() , lowerCamelCase_ : str=2 ) -> Union[str, Any]:
"""simple docstring"""
from torch.multiprocessing import start_processes
with tempfile.NamedTemporaryFile() as tmp_file:
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=lowerCamelCase_ , master_addr='127.0.01' , master_port='29500' , accelerate_mixed_precision='no' , accelerate_debug_rdv_file=tmp_file.name , accelerate_use_cpu='yes' , ):
SCREAMING_SNAKE_CASE_ : Tuple = PrepareForLaunch(lowerCamelCase_ , debug=lowerCamelCase_ )
start_processes(lowerCamelCase_ , args=lowerCamelCase_ , nprocs=lowerCamelCase_ , start_method='fork' )
| 685
| 0
|
from typing import List, Optional, Tuple, Union
import torch
from ...utils import logging, randn_tensor
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
__lowerCamelCase : Optional[int] = logging.get_logger(__name__) # pylint: disable=invalid-name
class __magic_name__ ( A__ ):
def __init__( self : List[Any] , UpperCamelCase__ : str , UpperCamelCase__ : List[Any] ) -> Any:
'''simple docstring'''
super().__init__()
self.register_modules(unet=UpperCamelCase__ , scheduler=UpperCamelCase__ )
@torch.no_grad()
def __call__( self : Union[str, Any] , UpperCamelCase__ : int = 1 , UpperCamelCase__ : int = 1_00 , UpperCamelCase__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCamelCase__ : Optional[float] = None , UpperCamelCase__ : bool = True , ) -> Union[AudioPipelineOutput, Tuple]:
'''simple docstring'''
if audio_length_in_s is None:
UpperCAmelCase = self.unet.config.sample_size / self.unet.config.sample_rate
UpperCAmelCase = audio_length_in_s * self.unet.config.sample_rate
UpperCAmelCase = 2 ** len(self.unet.up_blocks )
if sample_size < 3 * down_scale_factor:
raise ValueError(
F'{audio_length_in_s} is too small. Make sure it\'s bigger or equal to'
F' {3 * down_scale_factor / self.unet.config.sample_rate}.' )
UpperCAmelCase = int(UpperCamelCase__ )
if sample_size % down_scale_factor != 0:
UpperCAmelCase = (
(audio_length_in_s * self.unet.config.sample_rate) // down_scale_factor + 1
) * down_scale_factor
logger.info(
F'{audio_length_in_s} is increased to {sample_size / self.unet.config.sample_rate} so that it can be handled'
F' by the model. It will be cut to {original_sample_size / self.unet.config.sample_rate} after the denoising'
" process." )
UpperCAmelCase = int(UpperCamelCase__ )
UpperCAmelCase = next(iter(self.unet.parameters() ) ).dtype
UpperCAmelCase = (batch_size, self.unet.config.in_channels, sample_size)
if isinstance(UpperCamelCase__ , UpperCamelCase__ ) and len(UpperCamelCase__ ) != batch_size:
raise ValueError(
F'You have passed a list of generators of length {len(UpperCamelCase__ )}, but requested an effective batch'
F' size of {batch_size}. Make sure the batch size matches the length of the generators.' )
UpperCAmelCase = randn_tensor(UpperCamelCase__ , generator=UpperCamelCase__ , device=self.device , dtype=UpperCamelCase__ )
# set step values
self.scheduler.set_timesteps(UpperCamelCase__ , device=audio.device )
UpperCAmelCase = self.scheduler.timesteps.to(UpperCamelCase__ )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
UpperCAmelCase = self.unet(UpperCamelCase__ , UpperCamelCase__ ).sample
# 2. compute previous image: x_t -> t_t-1
UpperCAmelCase = self.scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ).prev_sample
UpperCAmelCase = audio.clamp(-1 , 1 ).float().cpu().numpy()
UpperCAmelCase = audio[:, :, :original_sample_size]
if not return_dict:
return (audio,)
return AudioPipelineOutput(audios=UpperCamelCase__ )
| 323
|
import itertools
import random
import unittest
import numpy as np
from transformers import BatchFeature, SpeechTaFeatureExtractor
from transformers.testing_utils import require_torch
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
__lowerCamelCase : List[Any] = random.Random()
def lowerCamelCase_(lowerCamelCase_ , lowerCamelCase_=1.0 , lowerCamelCase_=None , lowerCamelCase_=None ) -> Tuple:
if rng is None:
UpperCAmelCase = global_rng
UpperCAmelCase = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
class __magic_name__ ( unittest.TestCase ):
def __init__( self : Union[str, Any] , UpperCamelCase__ : int , UpperCamelCase__ : Any=7 , UpperCamelCase__ : Union[str, Any]=4_00 , UpperCamelCase__ : Optional[Any]=20_00 , UpperCamelCase__ : str=1 , UpperCamelCase__ : Any=0.0 , UpperCamelCase__ : int=1_60_00 , UpperCamelCase__ : Tuple=True , UpperCamelCase__ : Dict=80 , UpperCamelCase__ : List[str]=16 , UpperCamelCase__ : int=64 , UpperCamelCase__ : Dict="hann_window" , UpperCamelCase__ : Dict=80 , UpperCamelCase__ : Any=76_00 , UpperCamelCase__ : List[str]=1e-1_0 , UpperCamelCase__ : Optional[int]=True , ) -> List[str]:
'''simple docstring'''
UpperCAmelCase = parent
UpperCAmelCase = batch_size
UpperCAmelCase = min_seq_length
UpperCAmelCase = max_seq_length
UpperCAmelCase = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
UpperCAmelCase = feature_size
UpperCAmelCase = padding_value
UpperCAmelCase = sampling_rate
UpperCAmelCase = do_normalize
UpperCAmelCase = num_mel_bins
UpperCAmelCase = hop_length
UpperCAmelCase = win_length
UpperCAmelCase = win_function
UpperCAmelCase = fmin
UpperCAmelCase = fmax
UpperCAmelCase = mel_floor
UpperCAmelCase = return_attention_mask
def SCREAMING_SNAKE_CASE_ ( self : Tuple ) -> Tuple:
'''simple docstring'''
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"do_normalize": self.do_normalize,
"num_mel_bins": self.num_mel_bins,
"hop_length": self.hop_length,
"win_length": self.win_length,
"win_function": self.win_function,
"fmin": self.fmin,
"fmax": self.fmax,
"mel_floor": self.mel_floor,
"return_attention_mask": self.return_attention_mask,
}
def SCREAMING_SNAKE_CASE_ ( self : List[str] , UpperCamelCase__ : List[Any]=False , UpperCamelCase__ : List[str]=False ) -> Tuple:
'''simple docstring'''
def _flatten(UpperCamelCase__ : List[Any] ):
return list(itertools.chain(*UpperCamelCase__ ) )
if equal_length:
UpperCAmelCase = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
UpperCAmelCase = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
UpperCAmelCase = [np.asarray(UpperCamelCase__ ) for x in speech_inputs]
return speech_inputs
def SCREAMING_SNAKE_CASE_ ( self : Dict , UpperCamelCase__ : List[Any]=False , UpperCamelCase__ : int=False ) -> Dict:
'''simple docstring'''
if equal_length:
UpperCAmelCase = [floats_list((self.max_seq_length, self.num_mel_bins) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
UpperCAmelCase = [
floats_list((x, self.num_mel_bins) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
UpperCAmelCase = [np.asarray(UpperCamelCase__ ) for x in speech_inputs]
return speech_inputs
@require_torch
class __magic_name__ ( A__, unittest.TestCase ):
lowercase : Optional[Any] =SpeechTaFeatureExtractor
def SCREAMING_SNAKE_CASE_ ( self : str ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase = SpeechTaFeatureExtractionTester(self )
def SCREAMING_SNAKE_CASE_ ( self : str , UpperCamelCase__ : Dict ) -> Union[str, Any]:
'''simple docstring'''
self.assertTrue(np.all(np.mean(UpperCamelCase__ , axis=0 ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(UpperCamelCase__ , axis=0 ) - 1 ) < 1e-3 ) )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
UpperCAmelCase = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
UpperCAmelCase = [np.asarray(UpperCamelCase__ ) for speech_input in speech_inputs]
# Test not batched input
UpperCAmelCase = feat_extract(speech_inputs[0] , return_tensors="np" ).input_values
UpperCAmelCase = feat_extract(np_speech_inputs[0] , return_tensors="np" ).input_values
self.assertTrue(np.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-3 ) )
# Test batched
UpperCAmelCase = feat_extract(UpperCamelCase__ , return_tensors="np" ).input_values
UpperCAmelCase = feat_extract(UpperCamelCase__ , return_tensors="np" ).input_values
for enc_seq_a, enc_seq_a in zip(UpperCamelCase__ , UpperCamelCase__ ):
self.assertTrue(np.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-3 ) )
def SCREAMING_SNAKE_CASE_ ( self : Any ) -> Dict:
'''simple docstring'''
UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCAmelCase = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
UpperCAmelCase = ["longest", "max_length", "do_not_pad"]
UpperCAmelCase = [None, 16_00, None]
for max_length, padding in zip(UpperCamelCase__ , UpperCamelCase__ ):
UpperCAmelCase = feat_extract(UpperCamelCase__ , padding=UpperCamelCase__ , max_length=UpperCamelCase__ , return_tensors="np" )
UpperCAmelCase = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:8_00] )
self.assertTrue(input_values[0][8_00:].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_values[1][:10_00] )
self.assertTrue(input_values[0][10_00:].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_values[2][:12_00] )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCAmelCase = range(8_00 , 14_00 , 2_00 )
UpperCAmelCase = [floats_list((1, x) )[0] for x in lengths]
UpperCAmelCase = ["longest", "max_length", "do_not_pad"]
UpperCAmelCase = [None, 16_00, None]
for max_length, padding in zip(UpperCamelCase__ , UpperCamelCase__ ):
UpperCAmelCase = feat_extract(UpperCamelCase__ , max_length=UpperCamelCase__ , padding=UpperCamelCase__ )
UpperCAmelCase = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:8_00] )
self._check_zero_mean_unit_variance(input_values[1][:10_00] )
self._check_zero_mean_unit_variance(input_values[2][:12_00] )
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> int:
'''simple docstring'''
UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCAmelCase = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
UpperCAmelCase = feat_extract(
UpperCamelCase__ , truncation=UpperCamelCase__ , max_length=10_00 , padding="max_length" , return_tensors="np" )
UpperCAmelCase = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_00] )
self._check_zero_mean_unit_variance(input_values[1] )
self._check_zero_mean_unit_variance(input_values[2] )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCAmelCase = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
UpperCAmelCase = feat_extract(
UpperCamelCase__ , truncation=UpperCamelCase__ , max_length=10_00 , padding="longest" , return_tensors="np" )
UpperCAmelCase = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_00] )
self._check_zero_mean_unit_variance(input_values[1, :10_00] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertTrue(input_values.shape == (3, 10_00) )
UpperCAmelCase = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
UpperCAmelCase = feat_extract(
UpperCamelCase__ , truncation=UpperCamelCase__ , max_length=20_00 , padding="longest" , return_tensors="np" )
UpperCAmelCase = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_00] )
self._check_zero_mean_unit_variance(input_values[1, :10_00] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length > longest -> then pad to longest
self.assertTrue(input_values.shape == (3, 12_00) )
def SCREAMING_SNAKE_CASE_ ( self : Dict ) -> str:
'''simple docstring'''
UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCAmelCase = np.random.rand(1_00 ).astype(np.floataa )
UpperCAmelCase = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
UpperCAmelCase = feature_extractor.pad([{"input_values": inputs}] , return_tensors="np" )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
UpperCAmelCase = feature_extractor.pad([{"input_values": inputs}] , return_tensors="pt" )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> Any:
'''simple docstring'''
UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
UpperCAmelCase = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
UpperCAmelCase = [np.asarray(UpperCamelCase__ ) for speech_input in speech_inputs]
# Test feature size
UpperCAmelCase = feature_extractor(audio_target=UpperCamelCase__ , padding=UpperCamelCase__ , return_tensors="np" ).input_values
self.assertTrue(input_values.ndim == 3 )
self.assertTrue(input_values.shape[-1] == feature_extractor.num_mel_bins )
# Test not batched input
UpperCAmelCase = feature_extractor(speech_inputs[0] , return_tensors="np" ).input_values
UpperCAmelCase = feature_extractor(np_speech_inputs[0] , return_tensors="np" ).input_values
self.assertTrue(np.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-3 ) )
# Test batched
UpperCAmelCase = feature_extractor(UpperCamelCase__ , return_tensors="np" ).input_values
UpperCAmelCase = feature_extractor(UpperCamelCase__ , return_tensors="np" ).input_values
for enc_seq_a, enc_seq_a in zip(UpperCamelCase__ , UpperCamelCase__ ):
self.assertTrue(np.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
UpperCAmelCase = [floats_list((1, x) )[0] for x in (8_00, 8_00, 8_00)]
UpperCAmelCase = np.asarray(UpperCamelCase__ )
UpperCAmelCase = feature_extractor(UpperCamelCase__ , return_tensors="np" ).input_values
UpperCAmelCase = feature_extractor(UpperCamelCase__ , return_tensors="np" ).input_values
for enc_seq_a, enc_seq_a in zip(UpperCamelCase__ , UpperCamelCase__ ):
self.assertTrue(np.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-3 ) )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ) -> List[str]:
'''simple docstring'''
UpperCAmelCase = self.feat_extract_tester.prepare_inputs_for_target()
UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_dict )
UpperCAmelCase = feat_extract.model_input_names[0]
UpperCAmelCase = BatchFeature({input_name: speech_inputs} )
self.assertTrue(all(len(UpperCamelCase__ ) == len(UpperCamelCase__ ) for x, y in zip(UpperCamelCase__ , processed_features[input_name] ) ) )
UpperCAmelCase = self.feat_extract_tester.prepare_inputs_for_target(equal_length=UpperCamelCase__ )
UpperCAmelCase = BatchFeature({input_name: speech_inputs} , tensor_type="np" )
UpperCAmelCase = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
UpperCAmelCase = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
UpperCAmelCase = self.feat_extract_tester.prepare_inputs_for_target(equal_length=UpperCamelCase__ )
UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_dict )
UpperCAmelCase = feat_extract.model_input_names[0]
UpperCAmelCase = BatchFeature({input_name: speech_inputs} , tensor_type="pt" )
UpperCAmelCase = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
UpperCAmelCase = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def SCREAMING_SNAKE_CASE_ ( self : int ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_dict )
UpperCAmelCase = self.feat_extract_tester.prepare_inputs_for_target()
UpperCAmelCase = feat_extract.model_input_names[0]
UpperCAmelCase = BatchFeature({input_name: speech_inputs} )
UpperCAmelCase = feat_extract.num_mel_bins # hack!
UpperCAmelCase = feat_extract.pad(UpperCamelCase__ , padding="longest" , return_tensors="np" )[input_name]
UpperCAmelCase = feat_extract.pad(UpperCamelCase__ , padding="longest" , return_tensors="pt" )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1e-2 )
def SCREAMING_SNAKE_CASE_ ( self : Dict ) -> List[str]:
'''simple docstring'''
UpperCAmelCase = self.feat_extract_dict
UpperCAmelCase = True
UpperCAmelCase = self.feature_extraction_class(**UpperCamelCase__ )
UpperCAmelCase = self.feat_extract_tester.prepare_inputs_for_target()
UpperCAmelCase = [len(UpperCamelCase__ ) for x in speech_inputs]
UpperCAmelCase = feat_extract.model_input_names[0]
UpperCAmelCase = BatchFeature({input_name: speech_inputs} )
UpperCAmelCase = feat_extract.num_mel_bins # hack!
UpperCAmelCase = feat_extract.pad(UpperCamelCase__ , padding="longest" , return_tensors="np" )
self.assertIn("attention_mask" , UpperCamelCase__ )
self.assertListEqual(list(processed.attention_mask.shape ) , list(processed[input_name].shape[:2] ) )
self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() , UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase = self.feat_extract_dict
UpperCAmelCase = True
UpperCAmelCase = self.feature_extraction_class(**UpperCamelCase__ )
UpperCAmelCase = self.feat_extract_tester.prepare_inputs_for_target()
UpperCAmelCase = [len(UpperCamelCase__ ) for x in speech_inputs]
UpperCAmelCase = feat_extract.model_input_names[0]
UpperCAmelCase = BatchFeature({input_name: speech_inputs} )
UpperCAmelCase = min(UpperCamelCase__ )
UpperCAmelCase = feat_extract.num_mel_bins # hack!
UpperCAmelCase = feat_extract.pad(
UpperCamelCase__ , padding="max_length" , max_length=UpperCamelCase__ , truncation=UpperCamelCase__ , return_tensors="np" )
self.assertIn("attention_mask" , UpperCamelCase__ )
self.assertListEqual(
list(processed_pad.attention_mask.shape ) , [processed_pad[input_name].shape[0], max_length] )
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() , [max_length for x in speech_inputs] )
def SCREAMING_SNAKE_CASE_ ( self : List[str] , UpperCamelCase__ : str ) -> Any:
'''simple docstring'''
from datasets import load_dataset
UpperCAmelCase = load_dataset("hf-internal-testing/librispeech_asr_dummy" , "clean" , split="validation" )
# automatic decoding with librispeech
UpperCAmelCase = ds.sort("id" ).select(range(UpperCamelCase__ ) )[:num_samples]["audio"]
return [x["array"] for x in speech_samples]
def SCREAMING_SNAKE_CASE_ ( self : Any ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase = torch.tensor(
[2.3_8_0_4e-0_3, 2.0_7_5_2e-0_3, 1.9_8_3_6e-0_3, 2.1_0_5_7e-0_3, 1.6_1_7_4e-0_3,
3.0_5_1_8e-0_4, 9.1_5_5_3e-0_5, 3.3_5_6_9e-0_4, 9.7_6_5_6e-0_4, 1.8_3_1_1e-0_3,
2.0_1_4_2e-0_3, 2.1_0_5_7e-0_3, 1.7_3_9_5e-0_3, 4.5_7_7_6e-0_4, -3.9_6_7_3e-0_4,
4.5_7_7_6e-0_4, 1.0_0_7_1e-0_3, 9.1_5_5_3e-0_5, 4.8_8_2_8e-0_4, 1.1_5_9_7e-0_3,
7.3_2_4_2e-0_4, 9.4_6_0_4e-0_4, 1.8_0_0_5e-0_3, 1.8_3_1_1e-0_3, 8.8_5_0_1e-0_4,
4.2_7_2_5e-0_4, 4.8_8_2_8e-0_4, 7.3_2_4_2e-0_4, 1.0_9_8_6e-0_3, 2.1_0_5_7e-0_3] )
# fmt: on
UpperCAmelCase = self._load_datasamples(1 )
UpperCAmelCase = SpeechTaFeatureExtractor()
UpperCAmelCase = feature_extractor(UpperCamelCase__ , return_tensors="pt" ).input_values
self.assertEquals(input_values.shape , (1, 9_36_80) )
self.assertTrue(torch.allclose(input_values[0, :30] , UpperCamelCase__ , atol=1e-6 ) )
def SCREAMING_SNAKE_CASE_ ( self : int ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase = torch.tensor(
[-2.68_70, -3.01_04, -3.13_56, -3.53_52, -3.00_44, -3.03_53, -3.47_19, -3.67_77,
-3.15_20, -2.94_35, -2.65_53, -2.87_95, -2.99_44, -2.59_21, -3.02_79, -3.03_86,
-3.08_64, -3.12_91, -3.23_53, -2.74_44, -2.68_31, -2.72_87, -3.17_61, -3.15_71,
-3.27_26, -3.05_82, -3.10_07, -3.45_33, -3.46_95, -3.09_98] )
# fmt: on
UpperCAmelCase = self._load_datasamples(1 )
UpperCAmelCase = SpeechTaFeatureExtractor()
UpperCAmelCase = feature_extractor(audio_target=UpperCamelCase__ , return_tensors="pt" ).input_values
self.assertEquals(input_values.shape , (1, 3_66, 80) )
self.assertTrue(torch.allclose(input_values[0, 0, :30] , UpperCamelCase__ , atol=1e-4 ) )
| 323
| 1
|
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class snake_case ( SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
@register_to_config
def __init__( self : List[str] , *,
lowerCAmelCase : int = 4 , lowerCAmelCase : int = 768 , lowerCAmelCase : int , lowerCAmelCase : List[Any] , ) -> Optional[Any]:
"""simple docstring"""
super().__init__()
_snake_case : str = nn.Parameter(torch.zeros(lowerCAmelCase))
# parameters for additional clip time embeddings
_snake_case : List[str] = nn.Linear(lowerCAmelCase , lowerCAmelCase)
_snake_case : List[str] = nn.Linear(lowerCAmelCase , lowerCAmelCase)
# parameters for encoder hidden states
_snake_case : int = clip_extra_context_tokens
_snake_case : Any = nn.Linear(
lowerCAmelCase , self.clip_extra_context_tokens * cross_attention_dim)
_snake_case : Dict = nn.Linear(lowerCAmelCase , lowerCAmelCase)
_snake_case : Union[str, Any] = nn.LayerNorm(lowerCAmelCase)
def UpperCamelCase_ ( self : str , *, lowerCAmelCase : Optional[int] , lowerCAmelCase : Tuple , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Dict) -> int:
"""simple docstring"""
if do_classifier_free_guidance:
# Add the classifier free guidance embeddings to the image embeddings
_snake_case : Dict = image_embeddings.shape[0]
_snake_case : int = self.learned_classifier_free_guidance_embeddings.unsqueeze(0)
_snake_case : str = classifier_free_guidance_embeddings.expand(
lowerCAmelCase , -1)
_snake_case : Union[str, Any] = torch.cat([classifier_free_guidance_embeddings, image_embeddings] , dim=0)
# The image embeddings batch size and the text embeddings batch size are equal
assert image_embeddings.shape[0] == prompt_embeds.shape[0]
_snake_case : List[Any] = prompt_embeds.shape[0]
# "Specifically, we modify the architecture described in Nichol et al. (2021) by projecting and
# adding CLIP embeddings to the existing timestep embedding, ...
_snake_case : List[Any] = self.embedding_proj(lowerCAmelCase)
_snake_case : Union[str, Any] = self.clip_image_embeddings_project_to_time_embeddings(lowerCAmelCase)
_snake_case : Optional[Any] = time_projected_image_embeddings + time_projected_prompt_embeds
# ... and by projecting CLIP embeddings into four
# extra tokens of context that are concatenated to the sequence of outputs from the GLIDE text encoder"
_snake_case : Tuple = self.clip_extra_context_tokens_proj(lowerCAmelCase)
_snake_case : Union[str, Any] = clip_extra_context_tokens.reshape(lowerCAmelCase , -1 , self.clip_extra_context_tokens)
_snake_case : str = clip_extra_context_tokens.permute(0 , 2 , 1)
_snake_case : str = self.encoder_hidden_states_proj(lowerCAmelCase)
_snake_case : Optional[int] = self.text_encoder_hidden_states_norm(lowerCAmelCase)
_snake_case : Optional[Any] = torch.cat([clip_extra_context_tokens, text_encoder_hidden_states] , dim=1)
return text_encoder_hidden_states, additive_clip_time_embeddings
| 198
|
import sys
a__ = (
"""73167176531330624919225119674426574742355349194934"""
"""96983520312774506326239578318016984801869478851843"""
"""85861560789112949495459501737958331952853208805511"""
"""12540698747158523863050715693290963295227443043557"""
"""66896648950445244523161731856403098711121722383113"""
"""62229893423380308135336276614282806444486645238749"""
"""30358907296290491560440772390713810515859307960866"""
"""70172427121883998797908792274921901699720888093776"""
"""65727333001053367881220235421809751254540594752243"""
"""52584907711670556013604839586446706324415722155397"""
"""53697817977846174064955149290862569321978468622482"""
"""83972241375657056057490261407972968652414535100474"""
"""82166370484403199890008895243450658541227588666881"""
"""16427171479924442928230863465674813919123162824586"""
"""17866458359124566529476545682848912883142607690042"""
"""24219022671055626321111109370544217506941658960408"""
"""07198403850962455444362981230987879927244284909188"""
"""84580156166097919133875499200524063689912560717606"""
"""05886116467109405077541002256983155200055935729725"""
"""71636269561882670428252483600823257530420752963450"""
)
def lowercase ( SCREAMING_SNAKE_CASE__ : str = N ) -> int:
_snake_case : int = -sys.maxsize - 1
for i in range(len(SCREAMING_SNAKE_CASE__ ) - 12 ):
_snake_case : Optional[int] = 1
for j in range(13 ):
product *= int(n[i + j] )
if product > largest_product:
_snake_case : List[str] = product
return largest_product
if __name__ == "__main__":
print(F'''{solution() = }''')
| 198
| 1
|
'''simple docstring'''
import unittest
from parameterized import parameterized
from transformers import AutoTokenizer, GPTNeoXConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXModel,
)
class _lowercase :
'''simple docstring'''
def __init__( self ,lowerCamelCase_ ,lowerCamelCase_=13 ,lowerCamelCase_=7 ,lowerCamelCase_=True ,lowerCamelCase_=True ,lowerCamelCase_=True ,lowerCamelCase_=True ,lowerCamelCase_=99 ,lowerCamelCase_=64 ,lowerCamelCase_=5 ,lowerCamelCase_=4 ,lowerCamelCase_=37 ,lowerCamelCase_="gelu" ,lowerCamelCase_=0.1 ,lowerCamelCase_=0.1 ,lowerCamelCase_=512 ,lowerCamelCase_=16 ,lowerCamelCase_=2 ,lowerCamelCase_=0.02 ,lowerCamelCase_=3 ,lowerCamelCase_=4 ,lowerCamelCase_=None ,) -> str:
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = parent
UpperCAmelCase__ : int = batch_size
UpperCAmelCase__ : int = seq_length
UpperCAmelCase__ : Union[str, Any] = is_training
UpperCAmelCase__ : Optional[Any] = use_input_mask
UpperCAmelCase__ : Union[str, Any] = use_token_type_ids
UpperCAmelCase__ : Tuple = use_labels
UpperCAmelCase__ : int = vocab_size
UpperCAmelCase__ : Dict = hidden_size
UpperCAmelCase__ : Optional[int] = num_hidden_layers
UpperCAmelCase__ : Tuple = num_attention_heads
UpperCAmelCase__ : List[Any] = intermediate_size
UpperCAmelCase__ : List[Any] = hidden_act
UpperCAmelCase__ : Optional[Any] = hidden_dropout_prob
UpperCAmelCase__ : Optional[int] = attention_probs_dropout_prob
UpperCAmelCase__ : Optional[Any] = max_position_embeddings
UpperCAmelCase__ : Any = type_vocab_size
UpperCAmelCase__ : Union[str, Any] = type_sequence_label_size
UpperCAmelCase__ : str = initializer_range
UpperCAmelCase__ : Union[str, Any] = num_labels
UpperCAmelCase__ : Optional[Any] = num_choices
UpperCAmelCase__ : List[Any] = scope
UpperCAmelCase__ : Tuple = vocab_size - 1
def lowerCAmelCase__ ( self ) -> Any:
'''simple docstring'''
UpperCAmelCase__ : List[str] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
UpperCAmelCase__ : Dict = None
if self.use_input_mask:
UpperCAmelCase__ : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase__ : Optional[Any] = None
if self.use_labels:
UpperCAmelCase__ : Dict = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
UpperCAmelCase__ : List[str] = self.get_config()
return config, input_ids, input_mask, token_labels
def lowerCAmelCase__ ( self ) -> int:
'''simple docstring'''
return GPTNeoXConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=lowerCamelCase_ ,initializer_range=self.initializer_range ,pad_token_id=self.pad_token_id ,)
def lowerCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : List[str] = self.prepare_config_and_inputs()
UpperCAmelCase__ : List[str] = True
return config, input_ids, input_mask, token_labels
def lowerCAmelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ) -> List[str]:
'''simple docstring'''
UpperCAmelCase__ : List[str] = GPTNeoXModel(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase__ : Optional[int] = model(lowerCamelCase_ ,attention_mask=lowerCamelCase_ )
UpperCAmelCase__ : List[str] = model(lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ) -> int:
'''simple docstring'''
UpperCAmelCase__ : List[Any] = True
UpperCAmelCase__ : Dict = GPTNeoXModel(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase__ : Tuple = model(lowerCamelCase_ ,attention_mask=lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ) -> List[str]:
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = GPTNeoXForCausalLM(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase__ : Dict = model(lowerCamelCase_ ,attention_mask=lowerCamelCase_ ,labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ) -> List[str]:
'''simple docstring'''
UpperCAmelCase__ : str = self.num_labels
UpperCAmelCase__ : Union[str, Any] = GPTNeoXForQuestionAnswering(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase__ : List[Any] = model(lowerCamelCase_ ,attention_mask=lowerCamelCase_ )
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def lowerCAmelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ) -> Tuple:
'''simple docstring'''
UpperCAmelCase__ : int = self.num_labels
UpperCAmelCase__ : Optional[int] = GPTNeoXForSequenceClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase__ : str = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
UpperCAmelCase__ : Dict = model(lowerCamelCase_ ,attention_mask=lowerCamelCase_ ,labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def lowerCAmelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = self.num_labels
UpperCAmelCase__ : Dict = GPTNeoXForTokenClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase__ : Dict = model(lowerCamelCase_ ,attention_mask=lowerCamelCase_ ,labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ) -> int:
'''simple docstring'''
UpperCAmelCase__ : List[str] = True
UpperCAmelCase__ : List[str] = GPTNeoXForCausalLM(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
# first forward pass
UpperCAmelCase__ : str = model(lowerCamelCase_ ,attention_mask=lowerCamelCase_ ,use_cache=lowerCamelCase_ )
UpperCAmelCase__ : List[Any] = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
UpperCAmelCase__ : Optional[int] = ids_tensor((self.batch_size, 3) ,config.vocab_size )
UpperCAmelCase__ : List[str] = ids_tensor((self.batch_size, 3) ,vocab_size=2 )
# append to next input_ids and
UpperCAmelCase__ : Optional[int] = torch.cat([input_ids, next_tokens] ,dim=-1 )
UpperCAmelCase__ : int = torch.cat([input_mask, next_mask] ,dim=-1 )
UpperCAmelCase__ : Tuple = model(lowerCamelCase_ ,attention_mask=lowerCamelCase_ ,output_hidden_states=lowerCamelCase_ )
UpperCAmelCase__ : Union[str, Any] = output_from_no_past['''hidden_states'''][0]
UpperCAmelCase__ : int = model(
lowerCamelCase_ ,attention_mask=lowerCamelCase_ ,past_key_values=lowerCamelCase_ ,output_hidden_states=lowerCamelCase_ ,)['''hidden_states'''][0]
# select random slice
UpperCAmelCase__ : Optional[int] = ids_tensor((1,) ,output_from_past.shape[-1] ).item()
UpperCAmelCase__ : Optional[Any] = output_from_no_past[:, -3:, random_slice_idx].detach()
UpperCAmelCase__ : Union[str, Any] = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowerCamelCase_ ,lowerCamelCase_ ,atol=1e-3 ) )
def lowerCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase__ : List[str] = self.prepare_config_and_inputs()
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = config_and_inputs
UpperCAmelCase__ : Dict = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class _lowercase ( lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase ,unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = (
(
GPTNeoXModel,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
)
if is_torch_available()
else ()
)
UpperCAmelCase_ : Any = (GPTNeoXForCausalLM,) if is_torch_available() else ()
UpperCAmelCase_ : Dict = (
{
'''feature-extraction''': GPTNeoXModel,
'''question-answering''': GPTNeoXForQuestionAnswering,
'''text-classification''': GPTNeoXForSequenceClassification,
'''text-generation''': GPTNeoXForCausalLM,
'''token-classification''': GPTNeoXForTokenClassification,
'''zero-shot''': GPTNeoXForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCAmelCase_ : Union[str, Any] = False
UpperCAmelCase_ : List[str] = False
UpperCAmelCase_ : Dict = False
UpperCAmelCase_ : Union[str, Any] = False
def lowerCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase__ : Tuple = GPTNeoXModelTester(self )
UpperCAmelCase__ : int = ConfigTester(self ,config_class=lowerCamelCase_ ,hidden_size=64 ,num_attention_heads=8 )
def lowerCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCAmelCase__ ( self ) -> int:
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ )
def lowerCAmelCase__ ( self ) -> Any:
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ )
def lowerCAmelCase__ ( self ) -> Any:
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : str = self.model_tester.prepare_config_and_inputs_for_decoder()
UpperCAmelCase__ : List[str] = None
self.model_tester.create_and_check_model_as_decoder(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ )
def lowerCAmelCase__ ( self ) -> Any:
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ )
def lowerCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*lowerCamelCase_ )
def lowerCAmelCase__ ( self ) -> int:
'''simple docstring'''
UpperCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCamelCase_ )
def lowerCAmelCase__ ( self ) -> int:
'''simple docstring'''
UpperCAmelCase__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowerCamelCase_ )
def lowerCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCamelCase_ )
@unittest.skip(reason='''Feed forward chunking is not implemented''' )
def lowerCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
pass
@parameterized.expand([('''linear''',), ('''dynamic''',)] )
def lowerCAmelCase__ ( self ,lowerCamelCase_ ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase__ : Tuple = ids_tensor([1, 10] ,config.vocab_size )
UpperCAmelCase__ : Union[str, Any] = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] ,config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
UpperCAmelCase__ : Union[str, Any] = GPTNeoXModel(lowerCamelCase_ )
original_model.to(lowerCamelCase_ )
original_model.eval()
UpperCAmelCase__ : Optional[int] = original_model(lowerCamelCase_ ).last_hidden_state
UpperCAmelCase__ : int = original_model(lowerCamelCase_ ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
UpperCAmelCase__ : Union[str, Any] = {'''type''': scaling_type, '''factor''': 10.0}
UpperCAmelCase__ : Union[str, Any] = GPTNeoXModel(lowerCamelCase_ )
scaled_model.to(lowerCamelCase_ )
scaled_model.eval()
UpperCAmelCase__ : int = scaled_model(lowerCamelCase_ ).last_hidden_state
UpperCAmelCase__ : str = scaled_model(lowerCamelCase_ ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(lowerCamelCase_ ,lowerCamelCase_ ,atol=1e-5 ) )
else:
self.assertFalse(torch.allclose(lowerCamelCase_ ,lowerCamelCase_ ,atol=1e-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(lowerCamelCase_ ,lowerCamelCase_ ,atol=1e-5 ) )
@require_torch
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowerCAmelCase__ ( self ) -> Any:
'''simple docstring'''
UpperCAmelCase__ : List[str] = AutoTokenizer.from_pretrained('''EleutherAI/pythia-410m-deduped''' )
for checkpointing in [True, False]:
UpperCAmelCase__ : str = GPTNeoXForCausalLM.from_pretrained('''EleutherAI/pythia-410m-deduped''' )
if checkpointing:
model.gradient_checkpointing_enable()
else:
model.gradient_checkpointing_disable()
model.to(lowerCamelCase_ )
UpperCAmelCase__ : Any = tokenizer('''My favorite food is''' ,return_tensors='''pt''' ).to(lowerCamelCase_ )
# The hub repo. is updated on 2023-04-04, resulting in poor outputs.
# See: https://github.com/huggingface/transformers/pull/24193
UpperCAmelCase__ : Tuple = '''My favorite food is a good old-fashioned, old-fashioned, old-fashioned.\n\nI\'m not sure'''
UpperCAmelCase__ : Any = model.generate(**lowerCamelCase_ ,do_sample=lowerCamelCase_ ,max_new_tokens=20 )
UpperCAmelCase__ : Optional[int] = tokenizer.batch_decode(lowerCamelCase_ )[0]
self.assertEqual(lowerCamelCase_ ,lowerCamelCase_ )
| 614
|
'''simple docstring'''
# Note: if you intend to run this script make sure you look under scripts/fsmt/
# to locate the appropriate script to do the work correctly. There is a set of scripts to:
# - download and prepare data and run the conversion script
# - perform eval to get the best hparam into the config
# - generate model_cards - useful if you have multiple models from the same paper
import argparse
import json
import os
import re
from collections import OrderedDict
from os.path import basename, dirname
import fairseq
import torch
from fairseq import hub_utils
from fairseq.data.dictionary import Dictionary
from transformers import FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
UpperCamelCase__ : Optional[Any] = 2
# based on the results of a search on a range of `num_beams`, `length_penalty` and `early_stopping`
# values against wmt19 test data to obtain the best BLEU scores, we will use the following defaults:
#
# * `num_beams`: 5 (higher scores better, but requires more memory/is slower, can be adjusted by users)
# * `early_stopping`: `False` consistently scored better
# * `length_penalty` varied, so will assign the best one depending on the model
UpperCamelCase__ : str = {
# fairseq:
'wmt19-ru-en': {'length_penalty': 1.1},
'wmt19-en-ru': {'length_penalty': 1.15},
'wmt19-en-de': {'length_penalty': 1.0},
'wmt19-de-en': {'length_penalty': 1.1},
# allenai:
'wmt16-en-de-dist-12-1': {'length_penalty': 0.6},
'wmt16-en-de-dist-6-1': {'length_penalty': 0.6},
'wmt16-en-de-12-1': {'length_penalty': 0.8},
'wmt19-de-en-6-6-base': {'length_penalty': 0.6},
'wmt19-de-en-6-6-big': {'length_penalty': 0.6},
}
# this remaps the different models to their organization names
UpperCamelCase__ : Tuple = {}
for m in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
UpperCamelCase__ : Tuple = 'facebook'
for m in [
"wmt16-en-de-dist-12-1",
"wmt16-en-de-dist-6-1",
"wmt16-en-de-12-1",
"wmt19-de-en-6-6-base",
"wmt19-de-en-6-6-big",
]:
UpperCamelCase__ : Tuple = 'allenai'
def __UpperCamelCase( _A : int ):
'''simple docstring'''
# (1) remove word breaking symbol, (2) add word ending symbol where the word is not broken up,
# e.g.: d = {'le@@': 5, 'tt@@': 6, 'er': 7} => {'le': 5, 'tt': 6, 'er</w>': 7}
UpperCAmelCase__ : int = dict((re.sub(R'''@@$''' , '''''' , _A ), v) if k.endswith('''@@''' ) else (re.sub(R'''$''' , '''</w>''' , _A ), v) for k, v in d.items() )
UpperCAmelCase__ : List[str] = '''<s> <pad> </s> <unk>'''.split()
# restore the special tokens
for k in keep_keys:
del da[F'''{k}</w>''']
UpperCAmelCase__ : Optional[Any] = d[k] # restore
return da
def __UpperCamelCase( _A : Tuple , _A : List[Any] ):
'''simple docstring'''
# prep
assert os.path.exists(_A )
os.makedirs(_A , exist_ok=_A )
print(F'''Writing results to {pytorch_dump_folder_path}''' )
# handle various types of models
UpperCAmelCase__ : Dict = basename(_A )
UpperCAmelCase__ : List[str] = dirname(_A )
UpperCAmelCase__ : Union[str, Any] = fairseq.model_parallel.models.transformer.ModelParallelTransformerModel
UpperCAmelCase__ : Dict = cls.hub_models()
UpperCAmelCase__ : Dict = {'''bpe''': '''fastbpe''', '''tokenizer''': '''moses'''}
UpperCAmelCase__ : int = '''.'''
# note: since the model dump is old, fairseq has upgraded its model some
# time later, and it does a whole lot of rewrites and splits on the saved
# weights, therefore we can't use torch.load() directly on the model file.
# see: upgrade_state_dict(state_dict) in fairseq_model.py
print(F'''using checkpoint {checkpoint_file}''' )
UpperCAmelCase__ : Union[str, Any] = hub_utils.from_pretrained(
_A , _A , _A , archive_map=_A , **_A )
UpperCAmelCase__ : List[Any] = vars(chkpt['''args''']['''model'''] )
UpperCAmelCase__ : str = args['''source_lang''']
UpperCAmelCase__ : Tuple = args['''target_lang''']
UpperCAmelCase__ : Optional[int] = dirname(_A )
UpperCAmelCase__ : int = basename(_A )
# dicts
UpperCAmelCase__ : List[Any] = os.path.join(_A , F'''dict.{src_lang}.txt''' )
UpperCAmelCase__ : Tuple = os.path.join(_A , F'''dict.{tgt_lang}.txt''' )
UpperCAmelCase__ : int = Dictionary.load(_A )
UpperCAmelCase__ : Optional[int] = rewrite_dict_keys(src_dict.indices )
UpperCAmelCase__ : List[Any] = len(_A )
UpperCAmelCase__ : str = os.path.join(_A , '''vocab-src.json''' )
print(F'''Generating {src_vocab_file} of {src_vocab_size} of {src_lang} records''' )
with open(_A , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(_A , ensure_ascii=_A , indent=_A ) )
# detect whether this is a do_lower_case situation, which can be derived by checking whether we
# have at least one uppercase letter in the source vocab
UpperCAmelCase__ : Tuple = True
for k in src_vocab.keys():
if not k.islower():
UpperCAmelCase__ : int = False
break
UpperCAmelCase__ : str = Dictionary.load(_A )
UpperCAmelCase__ : Tuple = rewrite_dict_keys(tgt_dict.indices )
UpperCAmelCase__ : List[str] = len(_A )
UpperCAmelCase__ : Dict = os.path.join(_A , '''vocab-tgt.json''' )
print(F'''Generating {tgt_vocab_file} of {tgt_vocab_size} of {tgt_lang} records''' )
with open(_A , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(_A , ensure_ascii=_A , indent=_A ) )
# merges_file (bpecodes)
UpperCAmelCase__ : Tuple = os.path.join(_A , VOCAB_FILES_NAMES['''merges_file'''] )
for fn in ["bpecodes", "code"]: # older fairseq called the merges file "code"
UpperCAmelCase__ : Dict = os.path.join(_A , _A )
if os.path.exists(_A ):
break
with open(_A , encoding='''utf-8''' ) as fin:
UpperCAmelCase__ : Optional[Any] = fin.read()
UpperCAmelCase__ : List[str] = re.sub(R''' \d+$''' , '''''' , _A , 0 , re.M ) # remove frequency number
print(F'''Generating {merges_file}''' )
with open(_A , '''w''' , encoding='''utf-8''' ) as fout:
fout.write(_A )
# model config
UpperCAmelCase__ : Optional[int] = os.path.join(_A , '''config.json''' )
# validate bpe/tokenizer config, as currently it's hardcoded to moses+fastbpe -
# may have to modify the tokenizer if a different type is used by a future model
assert args["bpe"] == "fastbpe", F'''need to extend tokenizer to support bpe={args['bpe']}'''
assert args["tokenizer"] == "moses", F'''need to extend tokenizer to support bpe={args['tokenizer']}'''
UpperCAmelCase__ : Optional[Any] = {
'''architectures''': ['''FSMTForConditionalGeneration'''],
'''model_type''': '''fsmt''',
'''activation_dropout''': args['''activation_dropout'''],
'''activation_function''': '''relu''',
'''attention_dropout''': args['''attention_dropout'''],
'''d_model''': args['''decoder_embed_dim'''],
'''dropout''': args['''dropout'''],
'''init_std''': 0.0_2,
'''max_position_embeddings''': args['''max_source_positions'''],
'''num_hidden_layers''': args['''encoder_layers'''],
'''src_vocab_size''': src_vocab_size,
'''tgt_vocab_size''': tgt_vocab_size,
'''langs''': [src_lang, tgt_lang],
'''encoder_attention_heads''': args['''encoder_attention_heads'''],
'''encoder_ffn_dim''': args['''encoder_ffn_embed_dim'''],
'''encoder_layerdrop''': args['''encoder_layerdrop'''],
'''encoder_layers''': args['''encoder_layers'''],
'''decoder_attention_heads''': args['''decoder_attention_heads'''],
'''decoder_ffn_dim''': args['''decoder_ffn_embed_dim'''],
'''decoder_layerdrop''': args['''decoder_layerdrop'''],
'''decoder_layers''': args['''decoder_layers'''],
'''bos_token_id''': 0,
'''pad_token_id''': 1,
'''eos_token_id''': 2,
'''is_encoder_decoder''': True,
'''scale_embedding''': not args['''no_scale_embedding'''],
'''tie_word_embeddings''': args['''share_all_embeddings'''],
}
# good hparam defaults to start with
UpperCAmelCase__ : Optional[int] = 5
UpperCAmelCase__ : Optional[Any] = False
if model_dir in best_score_hparams and "length_penalty" in best_score_hparams[model_dir]:
UpperCAmelCase__ : str = best_score_hparams[model_dir]['''length_penalty''']
else:
UpperCAmelCase__ : Dict = 1.0
print(F'''Generating {fsmt_model_config_file}''' )
with open(_A , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(_A , ensure_ascii=_A , indent=_A ) )
# tokenizer config
UpperCAmelCase__ : List[str] = os.path.join(_A , _A )
UpperCAmelCase__ : Any = {
'''langs''': [src_lang, tgt_lang],
'''model_max_length''': 10_24,
'''do_lower_case''': do_lower_case,
}
print(F'''Generating {fsmt_tokenizer_config_file}''' )
with open(_A , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(_A , ensure_ascii=_A , indent=_A ) )
# model
UpperCAmelCase__ : Dict = chkpt['''models'''][0]
UpperCAmelCase__ : Any = model.state_dict()
# rename keys to start with 'model.'
UpperCAmelCase__ : Tuple = OrderedDict(('''model.''' + k, v) for k, v in model_state_dict.items() )
# remove unneeded keys
UpperCAmelCase__ : Union[str, Any] = [
'''model.model''',
'''model.encoder.version''',
'''model.decoder.version''',
'''model.encoder_embed_tokens.weight''',
'''model.decoder_embed_tokens.weight''',
'''model.encoder.embed_positions._float_tensor''',
'''model.decoder.embed_positions._float_tensor''',
]
for k in ignore_keys:
model_state_dict.pop(_A , _A )
UpperCAmelCase__ : List[str] = FSMTConfig.from_pretrained(_A )
UpperCAmelCase__ : int = FSMTForConditionalGeneration(_A )
# check that it loads ok
model_new.load_state_dict(_A , strict=_A )
# save
UpperCAmelCase__ : List[str] = os.path.join(_A , _A )
print(F'''Generating {pytorch_weights_dump_path}''' )
torch.save(_A , _A )
print('''Conversion is done!''' )
print('''\nLast step is to upload the files to s3''' )
print(F'''cd {data_root}''' )
print(F'''transformers-cli upload {model_dir}''' )
if __name__ == "__main__":
UpperCamelCase__ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--fsmt_checkpoint_path',
default=None,
type=str,
required=True,
help=(
'Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,'
' bpecodes, etc.'
),
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
UpperCamelCase__ : List[str] = parser.parse_args()
convert_fsmt_checkpoint_to_pytorch(args.fsmt_checkpoint_path, args.pytorch_dump_folder_path)
| 614
| 1
|
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import torch
from ...utils import logging, randn_tensor
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
A_ : Dict = logging.get_logger(__name__) # pylint: disable=invalid-name
class a_ ( snake_case_ ):
'''simple docstring'''
def __init__(self, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
super().__init__()
self.register_modules(unet=lowerCamelCase_, scheduler=lowerCamelCase_ )
@torch.no_grad()
def __call__(self, lowerCamelCase_ = 1, lowerCamelCase_ = 1_0_0, lowerCamelCase_ = None, lowerCamelCase_ = None, lowerCamelCase_ = True, ):
'''simple docstring'''
if audio_length_in_s is None:
lowerCamelCase__ : str = self.unet.config.sample_size / self.unet.config.sample_rate
lowerCamelCase__ : Optional[Any] = audio_length_in_s * self.unet.config.sample_rate
lowerCamelCase__ : str = 2 ** len(self.unet.up_blocks )
if sample_size < 3 * down_scale_factor:
raise ValueError(
f'''{audio_length_in_s} is too small. Make sure it\'s bigger or equal to'''
f''' {3 * down_scale_factor / self.unet.config.sample_rate}.''' )
lowerCamelCase__ : Dict = int(lowerCamelCase_ )
if sample_size % down_scale_factor != 0:
lowerCamelCase__ : Union[str, Any] = (
(audio_length_in_s * self.unet.config.sample_rate) // down_scale_factor + 1
) * down_scale_factor
logger.info(
f'''{audio_length_in_s} is increased to {sample_size / self.unet.config.sample_rate} so that it can be handled'''
f''' by the model. It will be cut to {original_sample_size / self.unet.config.sample_rate} after the denoising'''
' process.' )
lowerCamelCase__ : Optional[Any] = int(lowerCamelCase_ )
lowerCamelCase__ : List[str] = next(iter(self.unet.parameters() ) ).dtype
lowerCamelCase__ : Union[str, Any] = (batch_size, self.unet.config.in_channels, sample_size)
if isinstance(lowerCamelCase_, lowerCamelCase_ ) and len(lowerCamelCase_ ) != batch_size:
raise ValueError(
f'''You have passed a list of generators of length {len(lowerCamelCase_ )}, but requested an effective batch'''
f''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
lowerCamelCase__ : Union[str, Any] = randn_tensor(lowerCamelCase_, generator=lowerCamelCase_, device=self.device, dtype=lowerCamelCase_ )
# set step values
self.scheduler.set_timesteps(lowerCamelCase_, device=audio.device )
lowerCamelCase__ : int = self.scheduler.timesteps.to(lowerCamelCase_ )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
lowerCamelCase__ : List[Any] = self.unet(lowerCamelCase_, lowerCamelCase_ ).sample
# 2. compute previous image: x_t -> t_t-1
lowerCamelCase__ : List[str] = self.scheduler.step(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ).prev_sample
lowerCamelCase__ : Union[str, Any] = audio.clamp(-1, 1 ).float().cpu().numpy()
lowerCamelCase__ : Tuple = audio[:, :, :original_sample_size]
if not return_dict:
return (audio,)
return AudioPipelineOutput(audios=lowerCamelCase_ )
| 711
|
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class a_ ( snake_case_ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : int = KandinskyVaaControlnetImgaImgPipeline
lowerCamelCase__ : Optional[int] = ['image_embeds', 'negative_image_embeds', 'image', 'hint']
lowerCamelCase__ : Dict = ['image_embeds', 'negative_image_embeds', 'image', 'hint']
lowerCamelCase__ : str = [
'generator',
'height',
'width',
'strength',
'guidance_scale',
'num_inference_steps',
'return_dict',
'guidance_scale',
'num_images_per_prompt',
'output_type',
'return_dict',
]
lowerCamelCase__ : Any = False
@property
def a__ (self ):
'''simple docstring'''
return 3_2
@property
def a__ (self ):
'''simple docstring'''
return 3_2
@property
def a__ (self ):
'''simple docstring'''
return self.time_input_dim
@property
def a__ (self ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def a__ (self ):
'''simple docstring'''
return 1_0_0
@property
def a__ (self ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCamelCase__ : Optional[int] = {
'in_channels': 8,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'image_hint',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
lowerCamelCase__ : int = UNetaDConditionModel(**lowerCamelCase_ )
return model
@property
def a__ (self ):
'''simple docstring'''
return {
"block_out_channels": [3_2, 3_2, 6_4, 6_4],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 1_2,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def a__ (self ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCamelCase__ : Optional[Any] = VQModel(**self.dummy_movq_kwargs )
return model
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Dict = self.dummy_unet
lowerCamelCase__ : List[Any] = self.dummy_movq
lowerCamelCase__ : Tuple = {
'num_train_timesteps': 1_0_0_0,
'beta_schedule': 'linear',
'beta_start': 0.00_085,
'beta_end': 0.012,
'clip_sample': False,
'set_alpha_to_one': False,
'steps_offset': 0,
'prediction_type': 'epsilon',
'thresholding': False,
}
lowerCamelCase__ : Optional[Any] = DDIMScheduler(**lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = {
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def a__ (self, lowerCamelCase_, lowerCamelCase_=0 ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = floats_tensor((1, self.text_embedder_hidden_size), rng=random.Random(lowerCamelCase_ ) ).to(lowerCamelCase_ )
lowerCamelCase__ : int = floats_tensor((1, self.text_embedder_hidden_size), rng=random.Random(seed + 1 ) ).to(
lowerCamelCase_ )
# create init_image
lowerCamelCase__ : Any = floats_tensor((1, 3, 6_4, 6_4), rng=random.Random(lowerCamelCase_ ) ).to(lowerCamelCase_ )
lowerCamelCase__ : Dict = image.cpu().permute(0, 2, 3, 1 )[0]
lowerCamelCase__ : Optional[Any] = Image.fromarray(np.uinta(lowerCamelCase_ ) ).convert('RGB' ).resize((2_5_6, 2_5_6) )
# create hint
lowerCamelCase__ : Dict = floats_tensor((1, 3, 6_4, 6_4), rng=random.Random(lowerCamelCase_ ) ).to(lowerCamelCase_ )
if str(lowerCamelCase_ ).startswith('mps' ):
lowerCamelCase__ : int = torch.manual_seed(lowerCamelCase_ )
else:
lowerCamelCase__ : Any = torch.Generator(device=lowerCamelCase_ ).manual_seed(lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = {
'image': init_image,
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'hint': hint,
'generator': generator,
'height': 6_4,
'width': 6_4,
'num_inference_steps': 1_0,
'guidance_scale': 7.0,
'strength': 0.2,
'output_type': 'np',
}
return inputs
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[str] = 'cpu'
lowerCamelCase__ : List[Any] = self.get_dummy_components()
lowerCamelCase__ : List[Any] = self.pipeline_class(**lowerCamelCase_ )
lowerCamelCase__ : Dict = pipe.to(lowerCamelCase_ )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCamelCase__ : Any = pipe(**self.get_dummy_inputs(lowerCamelCase_ ) )
lowerCamelCase__ : List[Any] = output.images
lowerCamelCase__ : str = pipe(
**self.get_dummy_inputs(lowerCamelCase_ ), return_dict=lowerCamelCase_, )[0]
lowerCamelCase__ : int = image[0, -3:, -3:, -1]
lowerCamelCase__ : Dict = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
lowerCamelCase__ : List[str] = np.array(
[0.54_985_034, 0.55_509_365, 0.52_561_504, 0.5_570_494, 0.5_593_818, 0.5_263_979, 0.50_285_643, 0.5_069_846, 0.51_196_736] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class a_ ( unittest.TestCase ):
'''simple docstring'''
def a__ (self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Dict = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/kandinskyv22_controlnet_img2img_robotcat_fp16.npy' )
lowerCamelCase__ : Any = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/cat.png' )
lowerCamelCase__ : Any = init_image.resize((5_1_2, 5_1_2) )
lowerCamelCase__ : List[str] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/hint_image_cat.png' )
lowerCamelCase__ : Any = torch.from_numpy(np.array(lowerCamelCase_ ) ).float() / 255.0
lowerCamelCase__ : Optional[int] = hint.permute(2, 0, 1 ).unsqueeze(0 )
lowerCamelCase__ : Union[str, Any] = 'A robot, 4k photo'
lowerCamelCase__ : Any = KandinskyVaaPriorEmbaEmbPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-prior', torch_dtype=torch.floataa )
pipe_prior.to(lowerCamelCase_ )
lowerCamelCase__ : List[Any] = KandinskyVaaControlnetImgaImgPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-controlnet-depth', torch_dtype=torch.floataa )
lowerCamelCase__ : int = pipeline.to(lowerCamelCase_ )
pipeline.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCamelCase__ : str = torch.Generator(device='cpu' ).manual_seed(0 )
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = pipe_prior(
lowerCamelCase_, image=lowerCamelCase_, strength=0.85, generator=lowerCamelCase_, negative_prompt='', ).to_tuple()
lowerCamelCase__ : Union[str, Any] = pipeline(
image=lowerCamelCase_, image_embeds=lowerCamelCase_, negative_image_embeds=lowerCamelCase_, hint=lowerCamelCase_, generator=lowerCamelCase_, num_inference_steps=1_0_0, height=5_1_2, width=5_1_2, strength=0.5, output_type='np', )
lowerCamelCase__ : Dict = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert_mean_pixel_difference(lowerCamelCase_, lowerCamelCase_ )
| 696
| 0
|
import inspect
import unittest
from transformers import BitConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel
from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class lowercase_ :
"""simple docstring"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=3 , _UpperCAmelCase=32 , _UpperCAmelCase=3 , _UpperCAmelCase=10 , _UpperCAmelCase=[8, 16, 32, 64] , _UpperCAmelCase=[1, 1, 2, 1] , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase="relu" , _UpperCAmelCase=3 , _UpperCAmelCase=None , _UpperCAmelCase=["stage2", "stage3", "stage4"] , _UpperCAmelCase=[2, 3, 4] , _UpperCAmelCase=1 , ):
"""simple docstring"""
a_ = parent
a_ = batch_size
a_ = image_size
a_ = num_channels
a_ = embeddings_size
a_ = hidden_sizes
a_ = depths
a_ = is_training
a_ = use_labels
a_ = hidden_act
a_ = num_labels
a_ = scope
a_ = len(_UpperCAmelCase )
a_ = out_features
a_ = out_indices
a_ = num_groups
def lowercase__ ( self ):
"""simple docstring"""
a_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
a_ = None
if self.use_labels:
a_ = ids_tensor([self.batch_size] , self.num_labels )
a_ = self.get_config()
return config, pixel_values, labels
def lowercase__ ( self ):
"""simple docstring"""
return BitConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , )
def lowercase__ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
a_ = BitModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
a_ = model(_UpperCAmelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def lowercase__ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
a_ = self.num_labels
a_ = BitForImageClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
a_ = model(_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase__ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
a_ = BitBackbone(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
a_ = model(_UpperCAmelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
a_ = None
a_ = BitBackbone(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
a_ = model(_UpperCAmelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def lowercase__ ( self ):
"""simple docstring"""
a_ = self.prepare_config_and_inputs()
a_ , a_ , a_ = config_and_inputs
a_ = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class lowercase_ ( UpperCamelCase__ ,UpperCamelCase__ ,unittest.TestCase):
"""simple docstring"""
snake_case_ = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else ()
snake_case_ = (
{'''feature-extraction''': BitModel, '''image-classification''': BitForImageClassification}
if is_torch_available()
else {}
)
snake_case_ = False
snake_case_ = False
snake_case_ = False
snake_case_ = False
snake_case_ = False
def lowercase__ ( self ):
"""simple docstring"""
a_ = BitModelTester(self )
a_ = ConfigTester(self , config_class=_UpperCAmelCase , has_text_modality=_UpperCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowercase__ ( self ):
"""simple docstring"""
return
@unittest.skip(reason="""Bit does not output attentions""" )
def lowercase__ ( self ):
"""simple docstring"""
pass
@unittest.skip(reason="""Bit does not use inputs_embeds""" )
def lowercase__ ( self ):
"""simple docstring"""
pass
@unittest.skip(reason="""Bit does not support input and output embeddings""" )
def lowercase__ ( self ):
"""simple docstring"""
pass
def lowercase__ ( self ):
"""simple docstring"""
a_ , a_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a_ = model_class(_UpperCAmelCase )
a_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a_ = [*signature.parameters.keys()]
a_ = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _UpperCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*_UpperCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
a_ , a_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a_ = model_class(config=_UpperCAmelCase )
for name, module in model.named_modules():
if isinstance(_UpperCAmelCase , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=f"Parameter {name} of model {model_class} seems not properly initialized" , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=f"Parameter {name} of model {model_class} seems not properly initialized" , )
def lowercase__ ( self ):
"""simple docstring"""
def check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
a_ = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
with torch.no_grad():
a_ = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
a_ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
a_ = self.model_tester.num_stages
self.assertEqual(len(_UpperCAmelCase ) , expected_num_stages + 1 )
# Bit's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
a_ , a_ = self.model_tester.prepare_config_and_inputs_for_common()
a_ = ["""preactivation""", """bottleneck"""]
for model_class in self.all_model_classes:
for layer_type in layers_type:
a_ = layer_type
a_ = True
check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
a_ = True
check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
@unittest.skip(reason="""Bit does not use feedforward chunking""" )
def lowercase__ ( self ):
"""simple docstring"""
pass
def lowercase__ ( self ):
"""simple docstring"""
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_UpperCAmelCase )
@slow
def lowercase__ ( self ):
"""simple docstring"""
for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a_ = BitModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
def lowerCamelCase_ ( ):
"""simple docstring"""
a_ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class lowercase_ ( unittest.TestCase):
"""simple docstring"""
@cached_property
def lowercase__ ( self ):
"""simple docstring"""
return (
BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None
)
@slow
def lowercase__ ( self ):
"""simple docstring"""
a_ = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(_UpperCAmelCase )
a_ = self.default_image_processor
a_ = prepare_img()
a_ = image_processor(images=_UpperCAmelCase , return_tensors="""pt""" ).to(_UpperCAmelCase )
# forward pass
with torch.no_grad():
a_ = model(**_UpperCAmelCase )
# verify the logits
a_ = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , _UpperCAmelCase )
a_ = torch.tensor([[-0.6_5_2_6, -0.5_2_6_3, -1.4_3_9_8]] ).to(_UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _UpperCAmelCase , atol=1e-4 ) )
@require_torch
class lowercase_ ( UpperCamelCase__ ,unittest.TestCase):
"""simple docstring"""
snake_case_ = (BitBackbone,) if is_torch_available() else ()
snake_case_ = BitConfig
snake_case_ = False
def lowercase__ ( self ):
"""simple docstring"""
a_ = BitModelTester(self )
| 483
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ : List[str] =logging.get_logger(__name__)
A_ : Union[str, Any] ={}
class lowercase_ ( UpperCamelCase__):
"""simple docstring"""
snake_case_ = '''llama'''
snake_case_ = ['''past_key_values''']
def __init__( self , _UpperCAmelCase=32_000 , _UpperCAmelCase=4_096 , _UpperCAmelCase=11_008 , _UpperCAmelCase=32 , _UpperCAmelCase=32 , _UpperCAmelCase=None , _UpperCAmelCase="silu" , _UpperCAmelCase=2_048 , _UpperCAmelCase=0.0_2 , _UpperCAmelCase=1e-6 , _UpperCAmelCase=True , _UpperCAmelCase=0 , _UpperCAmelCase=1 , _UpperCAmelCase=2 , _UpperCAmelCase=1 , _UpperCAmelCase=False , _UpperCAmelCase=None , **_UpperCAmelCase , ):
"""simple docstring"""
a_ = vocab_size
a_ = max_position_embeddings
a_ = hidden_size
a_ = intermediate_size
a_ = num_hidden_layers
a_ = num_attention_heads
# for backward compatibility
if num_key_value_heads is None:
a_ = num_attention_heads
a_ = num_key_value_heads
a_ = hidden_act
a_ = initializer_range
a_ = rms_norm_eps
a_ = pretraining_tp
a_ = use_cache
a_ = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , tie_word_embeddings=_UpperCAmelCase , **_UpperCAmelCase , )
def lowercase__ ( self ):
"""simple docstring"""
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , _UpperCAmelCase ) or len(self.rope_scaling ) != 2:
raise ValueError(
"""`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, """
f"got {self.rope_scaling}" )
a_ = self.rope_scaling.get("""type""" , _UpperCAmelCase )
a_ = self.rope_scaling.get("""factor""" , _UpperCAmelCase )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f"`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}" )
if rope_scaling_factor is None or not isinstance(_UpperCAmelCase , _UpperCAmelCase ) or rope_scaling_factor <= 1.0:
raise ValueError(f"`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}" )
| 483
| 1
|
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
__a : str = ""
__a : List[Any] = ""
__a : Union[str, Any] = ""
__a : Optional[Any] = 1 # (0 is vertical, 1 is horizontal)
def _SCREAMING_SNAKE_CASE ( ) -> None:
"""simple docstring"""
__A , __A = get_dataset(__lowercase , __lowercase )
print("""Processing...""" )
__A , __A , __A = update_image_and_anno(__lowercase , __lowercase , __lowercase )
for index, image in enumerate(__lowercase ):
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
__A = random_chars(3_2 )
__A = paths[index].split(os.sep )[-1].rsplit(""".""" , 1 )[0]
__A = f"{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}"
cva.imwrite(f"/{file_root}.jpg" , __lowercase , [cva.IMWRITE_JPEG_QUALITY, 8_5] )
print(f"Success {index+1}/{len(__lowercase )} with {file_name}" )
__A = []
for anno in new_annos[index]:
__A = f"{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}"
annos_list.append(__lowercase )
with open(f"/{file_root}.txt" , """w""" ) as outfile:
outfile.write("""\n""".join(line for line in annos_list ) )
def _SCREAMING_SNAKE_CASE ( __lowercase : str , __lowercase : str ) -> tuple[list, list]:
"""simple docstring"""
__A = []
__A = []
for label_file in glob.glob(os.path.join(__lowercase , """*.txt""" ) ):
__A = label_file.split(os.sep )[-1].rsplit(""".""" , 1 )[0]
with open(__lowercase ) as in_file:
__A = in_file.readlines()
__A = os.path.join(__lowercase , f"{label_name}.jpg" )
__A = []
for obj_list in obj_lists:
__A = obj_list.rstrip("""\n""" ).split(""" """ )
boxes.append(
[
int(obj[0] ),
float(obj[1] ),
float(obj[2] ),
float(obj[3] ),
float(obj[4] ),
] )
if not boxes:
continue
img_paths.append(__lowercase )
labels.append(__lowercase )
return img_paths, labels
def _SCREAMING_SNAKE_CASE ( __lowercase : list , __lowercase : list , __lowercase : int = 1 ) -> tuple[list, list, list]:
"""simple docstring"""
__A = []
__A = []
__A = []
for idx in range(len(__lowercase ) ):
__A = []
__A = img_list[idx]
path_list.append(__lowercase )
__A = anno_list[idx]
__A = cva.imread(__lowercase )
if flip_type == 1:
__A = cva.flip(__lowercase , __lowercase )
for bbox in img_annos:
__A = 1 - bbox[1]
new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]] )
elif flip_type == 0:
__A = cva.flip(__lowercase , __lowercase )
for bbox in img_annos:
__A = 1 - bbox[2]
new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]] )
new_annos_lists.append(__lowercase )
new_imgs_list.append(__lowercase )
return new_imgs_list, new_annos_lists, path_list
def _SCREAMING_SNAKE_CASE ( __lowercase : int = 3_2 ) -> str:
"""simple docstring"""
assert number_char > 1, "The number of character should greater than 1"
__A = ascii_lowercase + digits
return "".join(random.choice(__lowercase ) for _ in range(__lowercase ) )
if __name__ == "__main__":
main()
print("DONE ✅")
| 716
|
import argparse
import requests
import torch
from PIL import Image
from transformers import ViTMAEConfig, ViTMAEForPreTraining, ViTMAEImageProcessor
def _SCREAMING_SNAKE_CASE ( __lowercase : str ) -> str:
"""simple docstring"""
if "cls_token" in name:
__A = name.replace("""cls_token""" , """vit.embeddings.cls_token""" )
if "mask_token" in name:
__A = name.replace("""mask_token""" , """decoder.mask_token""" )
if "decoder_pos_embed" in name:
__A = name.replace("""decoder_pos_embed""" , """decoder.decoder_pos_embed""" )
if "pos_embed" in name and "decoder" not in name:
__A = name.replace("""pos_embed""" , """vit.embeddings.position_embeddings""" )
if "patch_embed.proj" in name:
__A = name.replace("""patch_embed.proj""" , """vit.embeddings.patch_embeddings.projection""" )
if "patch_embed.norm" in name:
__A = name.replace("""patch_embed.norm""" , """vit.embeddings.norm""" )
if "decoder_blocks" in name:
__A = name.replace("""decoder_blocks""" , """decoder.decoder_layers""" )
if "blocks" in name:
__A = name.replace("""blocks""" , """vit.encoder.layer""" )
if "attn.proj" in name:
__A = name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name:
__A = name.replace("""attn""" , """attention.self""" )
if "norm1" in name:
__A = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
__A = name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
__A = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
__A = name.replace("""mlp.fc2""" , """output.dense""" )
if "decoder_embed" in name:
__A = name.replace("""decoder_embed""" , """decoder.decoder_embed""" )
if "decoder_norm" in name:
__A = name.replace("""decoder_norm""" , """decoder.decoder_norm""" )
if "decoder_pred" in name:
__A = name.replace("""decoder_pred""" , """decoder.decoder_pred""" )
if "norm.weight" in name and "decoder" not in name:
__A = name.replace("""norm.weight""" , """vit.layernorm.weight""" )
if "norm.bias" in name and "decoder" not in name:
__A = name.replace("""norm.bias""" , """vit.layernorm.bias""" )
return name
def _SCREAMING_SNAKE_CASE ( __lowercase : Any , __lowercase : Dict ) -> str:
"""simple docstring"""
for key in orig_state_dict.copy().keys():
__A = orig_state_dict.pop(__lowercase )
if "qkv" in key:
__A = key.split(""".""" )
__A = int(key_split[1] )
if "decoder_blocks" in key:
__A = config.decoder_hidden_size
__A = """decoder.decoder_layers."""
if "weight" in key:
__A = val[:dim, :]
__A = val[dim : dim * 2, :]
__A = val[-dim:, :]
elif "bias" in key:
__A = val[:dim]
__A = val[dim : dim * 2]
__A = val[-dim:]
else:
__A = config.hidden_size
__A = """vit.encoder.layer."""
if "weight" in key:
__A = val[:dim, :]
__A = val[dim : dim * 2, :]
__A = val[-dim:, :]
elif "bias" in key:
__A = val[:dim]
__A = val[dim : dim * 2]
__A = val[-dim:]
else:
__A = val
return orig_state_dict
def _SCREAMING_SNAKE_CASE ( __lowercase : Tuple , __lowercase : str ) -> Optional[Any]:
"""simple docstring"""
__A = ViTMAEConfig()
if "large" in checkpoint_url:
__A = 1_0_2_4
__A = 4_0_9_6
__A = 2_4
__A = 1_6
elif "huge" in checkpoint_url:
__A = 1_4
__A = 1_2_8_0
__A = 5_1_2_0
__A = 3_2
__A = 1_6
__A = ViTMAEForPreTraining(__lowercase )
__A = torch.hub.load_state_dict_from_url(__lowercase , map_location="""cpu""" )["""model"""]
__A = ViTMAEImageProcessor(size=config.image_size )
__A = convert_state_dict(__lowercase , __lowercase )
model.load_state_dict(__lowercase )
model.eval()
__A = """https://user-images.githubusercontent.com/11435359/147738734-196fd92f-9260-48d5-ba7e-bf103d29364d.jpg"""
__A = Image.open(requests.get(__lowercase , stream=__lowercase ).raw )
__A = ViTMAEImageProcessor(size=config.image_size )
__A = image_processor(images=__lowercase , return_tensors="""pt""" )
# forward pass
torch.manual_seed(2 )
__A = model(**__lowercase )
__A = outputs.logits
if "large" in checkpoint_url:
__A = torch.tensor(
[[-0.7_309, -0.7_128, -1.0_169], [-1.0_161, -0.9_058, -1.1_878], [-1.0_478, -0.9_411, -1.1_911]] )
elif "huge" in checkpoint_url:
__A = torch.tensor(
[[-1.1_599, -0.9_199, -1.2_221], [-1.1_952, -0.9_269, -1.2_307], [-1.2_143, -0.9_337, -1.2_262]] )
else:
__A = torch.tensor(
[[-0.9_192, -0.8_481, -1.1_259], [-1.1_349, -1.0_034, -1.2_599], [-1.1_757, -1.0_429, -1.2_726]] )
# verify logits
assert torch.allclose(logits[0, :3, :3] , __lowercase , atol=1E-4 )
print(f"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(__lowercase )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(__lowercase )
if __name__ == "__main__":
__a : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint_url",
default="https://dl.fbaipublicfiles.com/mae/visualize/mae_visualize_vit_base.pth",
type=str,
help="URL of the checkpoint you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
__a : List[str] = parser.parse_args()
convert_vit_mae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 199
| 0
|
from typing import Dict, Iterable, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
__snake_case = logging.get_logger(__name__)
def _lowercase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> Tuple:
'''simple docstring'''
return [
int(1000 * (box[0] / width) ),
int(1000 * (box[1] / height) ),
int(1000 * (box[2] / width) ),
int(1000 * (box[3] / height) ),
]
def _lowercase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = to_pil_image(UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = pil_image.size
SCREAMING_SNAKE_CASE__ = pytesseract.image_to_data(UpperCamelCase_ , lang=UpperCamelCase_ , output_type='dict' , config=UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = data['text'], data['left'], data['top'], data['width'], data['height']
# filter empty words and corresponding coordinates
SCREAMING_SNAKE_CASE__ = [idx for idx, word in enumerate(UpperCamelCase_ ) if not word.strip()]
SCREAMING_SNAKE_CASE__ = [word for idx, word in enumerate(UpperCamelCase_ ) if idx not in irrelevant_indices]
SCREAMING_SNAKE_CASE__ = [coord for idx, coord in enumerate(UpperCamelCase_ ) if idx not in irrelevant_indices]
SCREAMING_SNAKE_CASE__ = [coord for idx, coord in enumerate(UpperCamelCase_ ) if idx not in irrelevant_indices]
SCREAMING_SNAKE_CASE__ = [coord for idx, coord in enumerate(UpperCamelCase_ ) if idx not in irrelevant_indices]
SCREAMING_SNAKE_CASE__ = [coord for idx, coord in enumerate(UpperCamelCase_ ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
SCREAMING_SNAKE_CASE__ = []
for x, y, w, h in zip(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
SCREAMING_SNAKE_CASE__ = [x, y, x + w, y + h]
actual_boxes.append(UpperCamelCase_ )
# finally, normalize the bounding boxes
SCREAMING_SNAKE_CASE__ = []
for box in actual_boxes:
normalized_boxes.append(normalize_box(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) )
assert len(UpperCamelCase_ ) == len(UpperCamelCase_ ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class lowercase__ ( _UpperCAmelCase ):
A__ : List[str] =["""pixel_values"""]
def __init__( self : Any , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Dict[str, int] = None , UpperCAmelCase_ : PILImageResampling = PILImageResampling.BILINEAR , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : float = 1 / 255 , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Union[float, Iterable[float]] = None , UpperCAmelCase_ : Union[float, Iterable[float]] = None , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Optional[str] = None , UpperCAmelCase_ : Optional[str] = "" , **UpperCAmelCase_ : List[Any] , ):
super().__init__(**UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = size if size is not None else {'height': 224, 'width': 224}
SCREAMING_SNAKE_CASE__ = get_size_dict(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = do_resize
SCREAMING_SNAKE_CASE__ = size
SCREAMING_SNAKE_CASE__ = resample
SCREAMING_SNAKE_CASE__ = do_rescale
SCREAMING_SNAKE_CASE__ = rescale_value
SCREAMING_SNAKE_CASE__ = do_normalize
SCREAMING_SNAKE_CASE__ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
SCREAMING_SNAKE_CASE__ = image_std if image_std is not None else IMAGENET_STANDARD_STD
SCREAMING_SNAKE_CASE__ = apply_ocr
SCREAMING_SNAKE_CASE__ = ocr_lang
SCREAMING_SNAKE_CASE__ = tesseract_config
def A_ ( self : Dict , UpperCAmelCase_ : np.ndarray , UpperCAmelCase_ : Dict[str, int] , UpperCAmelCase_ : PILImageResampling = PILImageResampling.BILINEAR , UpperCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase_ : str , ):
SCREAMING_SNAKE_CASE__ = get_size_dict(UpperCAmelCase_ )
if "height" not in size or "width" not in size:
raise ValueError(F'The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}' )
SCREAMING_SNAKE_CASE__ = (size['height'], size['width'])
return resize(UpperCAmelCase_ , size=UpperCAmelCase_ , resample=UpperCAmelCase_ , data_format=UpperCAmelCase_ , **UpperCAmelCase_ )
def A_ ( self : str , UpperCAmelCase_ : np.ndarray , UpperCAmelCase_ : Union[int, float] , UpperCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase_ : Optional[Any] , ):
return rescale(UpperCAmelCase_ , scale=UpperCAmelCase_ , data_format=UpperCAmelCase_ , **UpperCAmelCase_ )
def A_ ( self : Dict , UpperCAmelCase_ : np.ndarray , UpperCAmelCase_ : Union[float, Iterable[float]] , UpperCAmelCase_ : Union[float, Iterable[float]] , UpperCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase_ : int , ):
return normalize(UpperCAmelCase_ , mean=UpperCAmelCase_ , std=UpperCAmelCase_ , data_format=UpperCAmelCase_ , **UpperCAmelCase_ )
def A_ ( self : Optional[Any] , UpperCAmelCase_ : ImageInput , UpperCAmelCase_ : bool = None , UpperCAmelCase_ : Dict[str, int] = None , UpperCAmelCase_ : Any=None , UpperCAmelCase_ : bool = None , UpperCAmelCase_ : float = None , UpperCAmelCase_ : bool = None , UpperCAmelCase_ : Union[float, Iterable[float]] = None , UpperCAmelCase_ : Union[float, Iterable[float]] = None , UpperCAmelCase_ : bool = None , UpperCAmelCase_ : Optional[str] = None , UpperCAmelCase_ : Optional[str] = None , UpperCAmelCase_ : Optional[Union[str, TensorType]] = None , UpperCAmelCase_ : ChannelDimension = ChannelDimension.FIRST , **UpperCAmelCase_ : Dict , ):
SCREAMING_SNAKE_CASE__ = do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE__ = size if size is not None else self.size
SCREAMING_SNAKE_CASE__ = get_size_dict(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE__ = do_rescale if do_rescale is not None else self.do_rescale
SCREAMING_SNAKE_CASE__ = rescale_factor if rescale_factor is not None else self.rescale_factor
SCREAMING_SNAKE_CASE__ = do_normalize if do_normalize is not None else self.do_normalize
SCREAMING_SNAKE_CASE__ = image_mean if image_mean is not None else self.image_mean
SCREAMING_SNAKE_CASE__ = image_std if image_std is not None else self.image_std
SCREAMING_SNAKE_CASE__ = apply_ocr if apply_ocr is not None else self.apply_ocr
SCREAMING_SNAKE_CASE__ = ocr_lang if ocr_lang is not None else self.ocr_lang
SCREAMING_SNAKE_CASE__ = tesseract_config if tesseract_config is not None else self.tesseract_config
SCREAMING_SNAKE_CASE__ = make_list_of_images(UpperCAmelCase_ )
if not valid_images(UpperCAmelCase_ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('If do_normalize is True, image_mean and image_std must be specified.' )
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE__ = [to_numpy_array(UpperCAmelCase_ ) for image in images]
# Tesseract OCR to get words + normalized bounding boxes
if apply_ocr:
requires_backends(self , 'pytesseract' )
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = []
for image in images:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = apply_tesseract(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
words_batch.append(UpperCAmelCase_ )
boxes_batch.append(UpperCAmelCase_ )
if do_resize:
SCREAMING_SNAKE_CASE__ = [self.resize(image=UpperCAmelCase_ , size=UpperCAmelCase_ , resample=UpperCAmelCase_ ) for image in images]
if do_rescale:
SCREAMING_SNAKE_CASE__ = [self.rescale(image=UpperCAmelCase_ , scale=UpperCAmelCase_ ) for image in images]
if do_normalize:
SCREAMING_SNAKE_CASE__ = [self.normalize(image=UpperCAmelCase_ , mean=UpperCAmelCase_ , std=UpperCAmelCase_ ) for image in images]
SCREAMING_SNAKE_CASE__ = [to_channel_dimension_format(UpperCAmelCase_ , UpperCAmelCase_ ) for image in images]
SCREAMING_SNAKE_CASE__ = BatchFeature(data={'pixel_values': images} , tensor_type=UpperCAmelCase_ )
if apply_ocr:
SCREAMING_SNAKE_CASE__ = words_batch
SCREAMING_SNAKE_CASE__ = boxes_batch
return data
| 472
|
import inspect
import warnings
from typing import Any, Dict, Optional, Union
from packaging import version
def _lowercase ( *UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_=True , UpperCamelCase_=2 ) -> Optional[int]:
'''simple docstring'''
from .. import __version__
SCREAMING_SNAKE_CASE__ = take_from
SCREAMING_SNAKE_CASE__ = ()
if not isinstance(args[0] , UpperCamelCase_ ):
SCREAMING_SNAKE_CASE__ = (args,)
for attribute, version_name, message in args:
if version.parse(version.parse(UpperCamelCase_ ).base_version ) >= version.parse(UpperCamelCase_ ):
raise ValueError(
F'The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers\''
F' version {__version__} is >= {version_name}' )
SCREAMING_SNAKE_CASE__ = None
if isinstance(UpperCamelCase_ , UpperCamelCase_ ) and attribute in deprecated_kwargs:
values += (deprecated_kwargs.pop(UpperCamelCase_ ),)
SCREAMING_SNAKE_CASE__ = F'The `{attribute}` argument is deprecated and will be removed in version {version_name}.'
elif hasattr(UpperCamelCase_ , UpperCamelCase_ ):
values += (getattr(UpperCamelCase_ , UpperCamelCase_ ),)
SCREAMING_SNAKE_CASE__ = F'The `{attribute}` attribute is deprecated and will be removed in version {version_name}.'
elif deprecated_kwargs is None:
SCREAMING_SNAKE_CASE__ = F'`{attribute}` is deprecated and will be removed in version {version_name}.'
if warning is not None:
SCREAMING_SNAKE_CASE__ = warning + ' ' if standard_warn else ''
warnings.warn(warning + message , UpperCamelCase_ , stacklevel=UpperCamelCase_ )
if isinstance(UpperCamelCase_ , UpperCamelCase_ ) and len(UpperCamelCase_ ) > 0:
SCREAMING_SNAKE_CASE__ = inspect.getouterframes(inspect.currentframe() )[1]
SCREAMING_SNAKE_CASE__ = call_frame.filename
SCREAMING_SNAKE_CASE__ = call_frame.lineno
SCREAMING_SNAKE_CASE__ = call_frame.function
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = next(iter(deprecated_kwargs.items() ) )
raise TypeError(F'{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`' )
if len(UpperCamelCase_ ) == 0:
return
elif len(UpperCamelCase_ ) == 1:
return values[0]
return values
| 472
| 1
|
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class __lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Dict , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : List[Any]=7 , lowerCamelCase_ : str=3 , lowerCamelCase_ : str=18 , lowerCamelCase_ : Any=30 , lowerCamelCase_ : int=400 , lowerCamelCase_ : List[str]=True , lowerCamelCase_ : Any=None , lowerCamelCase_ : int=True , lowerCamelCase_ : Optional[int]=None , lowerCamelCase_ : Optional[int]=True , lowerCamelCase_ : Tuple=[0.5, 0.5, 0.5] , lowerCamelCase_ : Optional[int]=[0.5, 0.5, 0.5] , ):
_lowerCAmelCase =size if size is not None else {"""shortest_edge""": 18}
_lowerCAmelCase =crop_size if crop_size is not None else {"""height""": 18, """width""": 18}
_lowerCAmelCase =parent
_lowerCAmelCase =batch_size
_lowerCAmelCase =num_channels
_lowerCAmelCase =image_size
_lowerCAmelCase =min_resolution
_lowerCAmelCase =max_resolution
_lowerCAmelCase =do_resize
_lowerCAmelCase =size
_lowerCAmelCase =do_center_crop
_lowerCAmelCase =crop_size
_lowerCAmelCase =do_normalize
_lowerCAmelCase =image_mean
_lowerCAmelCase =image_std
def lowerCAmelCase__ ( self : str ):
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"do_center_crop": self.do_center_crop,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class __lowerCamelCase ( lowerCamelCase_ , unittest.TestCase ):
"""simple docstring"""
a_: Union[str, Any] = LevitImageProcessor if is_vision_available() else None
def lowerCAmelCase__ ( self : int ):
_lowerCAmelCase =LevitImageProcessingTester(self )
@property
def lowerCAmelCase__ ( self : Optional[Any] ):
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase__ ( self : Union[str, Any] ):
_lowerCAmelCase =self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase_ , """image_mean""" ) )
self.assertTrue(hasattr(lowerCamelCase_ , """image_std""" ) )
self.assertTrue(hasattr(lowerCamelCase_ , """do_normalize""" ) )
self.assertTrue(hasattr(lowerCamelCase_ , """do_resize""" ) )
self.assertTrue(hasattr(lowerCamelCase_ , """do_center_crop""" ) )
self.assertTrue(hasattr(lowerCamelCase_ , """size""" ) )
def lowerCAmelCase__ ( self : Dict ):
_lowerCAmelCase =self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 18} )
self.assertEqual(image_processor.crop_size , {"""height""": 18, """width""": 18} )
_lowerCAmelCase =self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"""shortest_edge""": 42} )
self.assertEqual(image_processor.crop_size , {"""height""": 84, """width""": 84} )
def lowerCAmelCase__ ( self : Union[str, Any] ):
pass
def lowerCAmelCase__ ( self : int ):
# Initialize image_processing
_lowerCAmelCase =self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_lowerCAmelCase =prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase_ , Image.Image )
# Test not batched input
_lowerCAmelCase =image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
_lowerCAmelCase =image_processing(lowerCamelCase_ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def lowerCAmelCase__ ( self : str ):
# Initialize image_processing
_lowerCAmelCase =self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_lowerCAmelCase =prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase_ , numpify=lowerCamelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase_ , np.ndarray )
# Test not batched input
_lowerCAmelCase =image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
_lowerCAmelCase =image_processing(lowerCamelCase_ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def lowerCAmelCase__ ( self : Optional[int] ):
# Initialize image_processing
_lowerCAmelCase =self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_lowerCAmelCase =prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase_ , torchify=lowerCamelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase_ , torch.Tensor )
# Test not batched input
_lowerCAmelCase =image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
_lowerCAmelCase =image_processing(lowerCamelCase_ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
| 149
|
import argparse
import re
import torch
from CLAP import create_model
from transformers import AutoFeatureExtractor, ClapConfig, ClapModel
__SCREAMING_SNAKE_CASE : List[str] = {
'''text_branch''': '''text_model''',
'''audio_branch''': '''audio_model.audio_encoder''',
'''attn''': '''attention.self''',
'''self.proj''': '''output.dense''',
'''attention.self_mask''': '''attn_mask''',
'''mlp.fc1''': '''intermediate.dense''',
'''mlp.fc2''': '''output.dense''',
'''norm1''': '''layernorm_before''',
'''norm2''': '''layernorm_after''',
'''bn0''': '''batch_norm''',
}
__SCREAMING_SNAKE_CASE : Dict = AutoFeatureExtractor.from_pretrained('''laion/clap-htsat-unfused''', truncation='''rand_trunc''')
def snake_case_ ( lowercase__ : Any , lowercase__ : Union[str, Any]=False ):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase =create_model(
"""HTSAT-tiny""" , """roberta""" , lowercase__ , precision="""fp32""" , device="""cuda:0""" if torch.cuda.is_available() else """cpu""" , enable_fusion=lowercase__ , fusion_type="""aff_2d""" if enable_fusion else None , )
return model, model_cfg
def snake_case_ ( lowercase__ : str ):
'''simple docstring'''
_lowerCAmelCase ={}
_lowerCAmelCase =r""".*sequential.(\d+).*"""
_lowerCAmelCase =r""".*_projection.(\d+).*"""
for key, value in state_dict.items():
# check if any key needs to be modified
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
_lowerCAmelCase =key.replace(lowercase__ , lowercase__ )
if re.match(lowercase__ , lowercase__ ):
# replace sequential layers with list
_lowerCAmelCase =re.match(lowercase__ , lowercase__ ).group(1 )
_lowerCAmelCase =key.replace(f"sequential.{sequential_layer}." , f"layers.{int(lowercase__ )//3}.linear." )
elif re.match(lowercase__ , lowercase__ ):
_lowerCAmelCase =int(re.match(lowercase__ , lowercase__ ).group(1 ) )
# Because in CLAP they use `nn.Sequential`...
_lowerCAmelCase =1 if projecton_layer == 0 else 2
_lowerCAmelCase =key.replace(f"_projection.{projecton_layer}." , f"_projection.linear{transformers_projection_layer}." )
if "audio" and "qkv" in key:
# split qkv into query key and value
_lowerCAmelCase =value
_lowerCAmelCase =mixed_qkv.size(0 ) // 3
_lowerCAmelCase =mixed_qkv[:qkv_dim]
_lowerCAmelCase =mixed_qkv[qkv_dim : qkv_dim * 2]
_lowerCAmelCase =mixed_qkv[qkv_dim * 2 :]
_lowerCAmelCase =query_layer
_lowerCAmelCase =key_layer
_lowerCAmelCase =value_layer
else:
_lowerCAmelCase =value
return model_state_dict
def snake_case_ ( lowercase__ : int , lowercase__ : List[Any] , lowercase__ : Optional[int] , lowercase__ : Optional[Any]=False ):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase =init_clap(lowercase__ , enable_fusion=lowercase__ )
clap_model.eval()
_lowerCAmelCase =clap_model.state_dict()
_lowerCAmelCase =rename_state_dict(lowercase__ )
_lowerCAmelCase =ClapConfig()
_lowerCAmelCase =enable_fusion
_lowerCAmelCase =ClapModel(lowercase__ )
# ignore the spectrogram embedding layer
model.load_state_dict(lowercase__ , strict=lowercase__ )
model.save_pretrained(lowercase__ )
transformers_config.save_pretrained(lowercase__ )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : Any = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument('''--enable_fusion''', action='''store_true''', help='''Whether to enable fusion or not''')
__SCREAMING_SNAKE_CASE : int = parser.parse_args()
convert_clap_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.enable_fusion)
| 149
| 1
|
import asyncio
import os
import shutil
import subprocess
import sys
import tempfile
import unittest
from distutils.util import strtobool
from functools import partial
from pathlib import Path
from typing import List, Union
from unittest import mock
import torch
from ..state import AcceleratorState, PartialState
from ..utils import (
gather,
is_bnb_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_mps_available,
is_safetensors_available,
is_tensorboard_available,
is_torch_version,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
def lowercase ( a , a=False ):
'''simple docstring'''
try:
SCREAMING_SNAKE_CASE_ :Tuple = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
SCREAMING_SNAKE_CASE_ :Any = default
else:
# KEY is set, convert it to True or False.
try:
SCREAMING_SNAKE_CASE_ :Optional[int] = strtobool(a )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(F"If set, {key} must be yes or no." )
return _value
SCREAMING_SNAKE_CASE__ = parse_flag_from_env("RUN_SLOW", default=False)
def lowercase ( a ):
'''simple docstring'''
return unittest.skip("Test was skipped" )(a )
def lowercase ( a ):
'''simple docstring'''
return unittest.skipUnless(_run_slow_tests , "test is slow" )(a )
def lowercase ( a ):
'''simple docstring'''
return unittest.skipUnless(not torch.cuda.is_available() , "test requires only a CPU" )(a )
def lowercase ( a ):
'''simple docstring'''
return unittest.skipUnless(torch.cuda.is_available() , "test requires a GPU" )(a )
def lowercase ( a ):
'''simple docstring'''
return unittest.skipUnless(is_xpu_available() , "test requires a XPU" )(a )
def lowercase ( a ):
'''simple docstring'''
return unittest.skipUnless(is_mps_available() , "test requires a `mps` backend support in `torch`" )(a )
def lowercase ( a ):
'''simple docstring'''
return unittest.skipUnless(
is_transformers_available() and is_datasets_available() , "test requires the Hugging Face suite" )(a )
def lowercase ( a ):
'''simple docstring'''
return unittest.skipUnless(is_bnb_available() , "test requires the bitsandbytes library" )(a )
def lowercase ( a ):
'''simple docstring'''
return unittest.skipUnless(is_tpu_available() , "test requires TPU" )(a )
def lowercase ( a ):
'''simple docstring'''
return unittest.skipUnless(torch.cuda.device_count() == 1 , "test requires a GPU" )(a )
def lowercase ( a ):
'''simple docstring'''
return unittest.skipUnless(torch.xpu.device_count() == 1 , "test requires a XPU" )(a )
def lowercase ( a ):
'''simple docstring'''
return unittest.skipUnless(torch.cuda.device_count() > 1 , "test requires multiple GPUs" )(a )
def lowercase ( a ):
'''simple docstring'''
return unittest.skipUnless(torch.xpu.device_count() > 1 , "test requires multiple XPUs" )(a )
def lowercase ( a ):
'''simple docstring'''
return unittest.skipUnless(is_safetensors_available() , "test requires safetensors" )(a )
def lowercase ( a ):
'''simple docstring'''
return unittest.skipUnless(is_deepspeed_available() , "test requires DeepSpeed" )(a )
def lowercase ( a ):
'''simple docstring'''
return unittest.skipUnless(is_torch_version(">=" , "1.12.0" ) , "test requires torch version >= 1.12.0" )(a )
def lowercase ( a=None , a=None ):
'''simple docstring'''
if test_case is None:
return partial(a , version=a )
return unittest.skipUnless(is_torch_version(">=" , a ) , F"test requires torch version >= {version}" )(a )
def lowercase ( a ):
'''simple docstring'''
return unittest.skipUnless(is_tensorboard_available() , "test requires Tensorboard" )(a )
def lowercase ( a ):
'''simple docstring'''
return unittest.skipUnless(is_wandb_available() , "test requires wandb" )(a )
def lowercase ( a ):
'''simple docstring'''
return unittest.skipUnless(is_comet_ml_available() , "test requires comet_ml" )(a )
SCREAMING_SNAKE_CASE__ = (
any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available()
)
def lowercase ( a ):
'''simple docstring'''
return unittest.skipUnless(
_atleast_one_tracker_available , "test requires at least one tracker to be available and for `comet_ml` to not be installed" , )(a )
class _UpperCAmelCase ( unittest.TestCase ):
lowerCamelCase_ : str = True
@classmethod
def _snake_case ( cls : Union[str, Any]):
SCREAMING_SNAKE_CASE_ :List[str] = tempfile.mkdtemp()
@classmethod
def _snake_case ( cls : Union[str, Any]):
if os.path.exists(cls.tmpdir):
shutil.rmtree(cls.tmpdir)
def _snake_case ( self : int):
if self.clear_on_setup:
for path in Path(self.tmpdir).glob("**/*"):
if path.is_file():
path.unlink()
elif path.is_dir():
shutil.rmtree(UpperCAmelCase)
class _UpperCAmelCase ( unittest.TestCase ):
def _snake_case ( self : Optional[Any]):
super().tearDown()
# Reset the state of the AcceleratorState singleton.
AcceleratorState._reset_state()
PartialState._reset_state()
class _UpperCAmelCase ( unittest.TestCase ):
def _snake_case ( self : Any , UpperCAmelCase : Union[mock.Mock, List[mock.Mock]]):
SCREAMING_SNAKE_CASE_ :List[str] = mocks if isinstance(UpperCAmelCase , (tuple, list)) else [mocks]
for m in self.mocks:
m.start()
self.addCleanup(m.stop)
def lowercase ( a ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ :List[Any] = AcceleratorState()
SCREAMING_SNAKE_CASE_ :Any = tensor[None].clone().to(state.device )
SCREAMING_SNAKE_CASE_ :Union[str, Any] = gather(a ).cpu()
SCREAMING_SNAKE_CASE_ :Tuple = tensor[0].cpu()
for i in range(tensors.shape[0] ):
if not torch.equal(tensors[i] , a ):
return False
return True
class _UpperCAmelCase :
def __init__( self : Optional[int] , UpperCAmelCase : List[Any] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Union[str, Any]):
SCREAMING_SNAKE_CASE_ :Any = returncode
SCREAMING_SNAKE_CASE_ :List[str] = stdout
SCREAMING_SNAKE_CASE_ :Dict = stderr
async def lowercase ( a , a ):
'''simple docstring'''
while True:
SCREAMING_SNAKE_CASE_ :Optional[int] = await stream.readline()
if line:
callback(a )
else:
break
async def lowercase ( a , a=None , a=None , a=None , a=False , a=False ):
'''simple docstring'''
if echo:
print("\nRunning: " , " ".join(a ) )
SCREAMING_SNAKE_CASE_ :str = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=a , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=a , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
SCREAMING_SNAKE_CASE_ :Optional[Any] = []
SCREAMING_SNAKE_CASE_ :Dict = []
def tee(a , a , a , a="" ):
SCREAMING_SNAKE_CASE_ :Optional[Any] = line.decode("utf-8" ).rstrip()
sink.append(a )
if not quiet:
print(a , a , file=a )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
asyncio.create_task(_read_stream(p.stdout , lambda a : tee(a , a , sys.stdout , label="stdout:" ) ) ),
asyncio.create_task(_read_stream(p.stderr , lambda a : tee(a , a , sys.stderr , label="stderr:" ) ) ),
] , timeout=a , )
return _RunOutput(await p.wait() , a , a )
def lowercase ( a , a=None , a=None , a=180 , a=False , a=True ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ :Union[str, Any] = asyncio.get_event_loop()
SCREAMING_SNAKE_CASE_ :Dict = loop.run_until_complete(
_stream_subprocess(a , env=a , stdin=a , timeout=a , quiet=a , echo=a ) )
SCREAMING_SNAKE_CASE_ :Optional[int] = " ".join(a )
if result.returncode > 0:
SCREAMING_SNAKE_CASE_ :Optional[int] = "\n".join(result.stderr )
raise RuntimeError(
F"'{cmd_str}' failed with returncode {result.returncode}\n\n"
F"The combined stderr from workers follows:\n{stderr}" )
return result
class _UpperCAmelCase ( lowercase ):
pass
def lowercase ( a , a=False ):
'''simple docstring'''
try:
SCREAMING_SNAKE_CASE_ :Union[str, Any] = subprocess.check_output(a , stderr=subprocess.STDOUT )
if return_stdout:
if hasattr(a , "decode" ):
SCREAMING_SNAKE_CASE_ :List[str] = output.decode("utf-8" )
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
F"Command `{' '.join(a )}` failed with the following error:\n\n{e.output.decode()}" ) from e
| 631
|
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
import numpy as np
# Parrameters
SCREAMING_SNAKE_CASE__ = (720, 1_280) # Height, Width
SCREAMING_SNAKE_CASE__ = (0.4, 0.6) # if height or width lower than this scale, drop it.
SCREAMING_SNAKE_CASE__ = 1 / 100
SCREAMING_SNAKE_CASE__ = ""
SCREAMING_SNAKE_CASE__ = ""
SCREAMING_SNAKE_CASE__ = ""
SCREAMING_SNAKE_CASE__ = 250
def lowercase ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ :str = get_dataset(a , a )
for index in range(a ):
SCREAMING_SNAKE_CASE_ :Any = random.sample(range(len(a ) ) , 4 )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ :Optional[Any] = update_image_and_anno(
a , a , a , a , a , filter_scale=a , )
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
SCREAMING_SNAKE_CASE_ :int = random_chars(32 )
SCREAMING_SNAKE_CASE_ :Dict = path.split(os.sep )[-1].rsplit("." , 1 )[0]
SCREAMING_SNAKE_CASE_ :Dict = F"{OUTPUT_DIR}/{file_name}_MOSAIC_{letter_code}"
cva.imwrite(F"{file_root}.jpg" , a , [cva.IMWRITE_JPEG_QUALITY, 85] )
print(F"Succeeded {index+1}/{NUMBER_IMAGES} with {file_name}" )
SCREAMING_SNAKE_CASE_ :Union[str, Any] = []
for anno in new_annos:
SCREAMING_SNAKE_CASE_ :Any = anno[3] - anno[1]
SCREAMING_SNAKE_CASE_ :Union[str, Any] = anno[4] - anno[2]
SCREAMING_SNAKE_CASE_ :Any = anno[1] + width / 2
SCREAMING_SNAKE_CASE_ :Optional[int] = anno[2] + height / 2
SCREAMING_SNAKE_CASE_ :Optional[int] = F"{anno[0]} {x_center} {y_center} {width} {height}"
annos_list.append(a )
with open(F"{file_root}.txt" , "w" ) as outfile:
outfile.write("\n".join(line for line in annos_list ) )
def lowercase ( a , a ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ :int = []
SCREAMING_SNAKE_CASE_ :Optional[Any] = []
for label_file in glob.glob(os.path.join(a , "*.txt" ) ):
SCREAMING_SNAKE_CASE_ :List[str] = label_file.split(os.sep )[-1].rsplit("." , 1 )[0]
with open(a ) as in_file:
SCREAMING_SNAKE_CASE_ :List[Any] = in_file.readlines()
SCREAMING_SNAKE_CASE_ :Optional[Any] = os.path.join(a , F"{label_name}.jpg" )
SCREAMING_SNAKE_CASE_ :Dict = []
for obj_list in obj_lists:
SCREAMING_SNAKE_CASE_ :Dict = obj_list.rstrip("\n" ).split(" " )
SCREAMING_SNAKE_CASE_ :Optional[Any] = float(obj[1] ) - float(obj[3] ) / 2
SCREAMING_SNAKE_CASE_ :Dict = float(obj[2] ) - float(obj[4] ) / 2
SCREAMING_SNAKE_CASE_ :List[Any] = float(obj[1] ) + float(obj[3] ) / 2
SCREAMING_SNAKE_CASE_ :List[str] = float(obj[2] ) + float(obj[4] ) / 2
boxes.append([int(obj[0] ), xmin, ymin, xmax, ymax] )
if not boxes:
continue
img_paths.append(a )
labels.append(a )
return img_paths, labels
def lowercase ( a , a , a , a , a , a = 0.0 , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ :int = np.zeros([output_size[0], output_size[1], 3] , dtype=np.uinta )
SCREAMING_SNAKE_CASE_ :Tuple = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
SCREAMING_SNAKE_CASE_ :List[str] = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
SCREAMING_SNAKE_CASE_ :Any = int(scale_x * output_size[1] )
SCREAMING_SNAKE_CASE_ :List[Any] = int(scale_y * output_size[0] )
SCREAMING_SNAKE_CASE_ :Any = []
SCREAMING_SNAKE_CASE_ :Optional[Any] = []
for i, index in enumerate(a ):
SCREAMING_SNAKE_CASE_ :Optional[int] = all_img_list[index]
path_list.append(a )
SCREAMING_SNAKE_CASE_ :Tuple = all_annos[index]
SCREAMING_SNAKE_CASE_ :Any = cva.imread(a )
if i == 0: # top-left
SCREAMING_SNAKE_CASE_ :int = cva.resize(a , (divid_point_x, divid_point_y) )
SCREAMING_SNAKE_CASE_ :Any = img
for bbox in img_annos:
SCREAMING_SNAKE_CASE_ :Tuple = bbox[1] * scale_x
SCREAMING_SNAKE_CASE_ :Optional[Any] = bbox[2] * scale_y
SCREAMING_SNAKE_CASE_ :List[Any] = bbox[3] * scale_x
SCREAMING_SNAKE_CASE_ :Tuple = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 1: # top-right
SCREAMING_SNAKE_CASE_ :Dict = cva.resize(a , (output_size[1] - divid_point_x, divid_point_y) )
SCREAMING_SNAKE_CASE_ :Optional[int] = img
for bbox in img_annos:
SCREAMING_SNAKE_CASE_ :Optional[Any] = scale_x + bbox[1] * (1 - scale_x)
SCREAMING_SNAKE_CASE_ :Dict = bbox[2] * scale_y
SCREAMING_SNAKE_CASE_ :Optional[int] = scale_x + bbox[3] * (1 - scale_x)
SCREAMING_SNAKE_CASE_ :Tuple = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 2: # bottom-left
SCREAMING_SNAKE_CASE_ :List[str] = cva.resize(a , (divid_point_x, output_size[0] - divid_point_y) )
SCREAMING_SNAKE_CASE_ :int = img
for bbox in img_annos:
SCREAMING_SNAKE_CASE_ :Tuple = bbox[1] * scale_x
SCREAMING_SNAKE_CASE_ :Dict = scale_y + bbox[2] * (1 - scale_y)
SCREAMING_SNAKE_CASE_ :Union[str, Any] = bbox[3] * scale_x
SCREAMING_SNAKE_CASE_ :List[str] = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
else: # bottom-right
SCREAMING_SNAKE_CASE_ :Optional[Any] = cva.resize(
a , (output_size[1] - divid_point_x, output_size[0] - divid_point_y) )
SCREAMING_SNAKE_CASE_ :Optional[Any] = img
for bbox in img_annos:
SCREAMING_SNAKE_CASE_ :Optional[int] = scale_x + bbox[1] * (1 - scale_x)
SCREAMING_SNAKE_CASE_ :Optional[Any] = scale_y + bbox[2] * (1 - scale_y)
SCREAMING_SNAKE_CASE_ :Dict = scale_x + bbox[3] * (1 - scale_x)
SCREAMING_SNAKE_CASE_ :List[Any] = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
# Remove bounding box small than scale of filter
if filter_scale > 0:
SCREAMING_SNAKE_CASE_ :Optional[int] = [
anno
for anno in new_anno
if filter_scale < (anno[3] - anno[1]) and filter_scale < (anno[4] - anno[2])
]
return output_img, new_anno, path_list[0]
def lowercase ( a ):
'''simple docstring'''
assert number_char > 1, "The number of character should greater than 1"
SCREAMING_SNAKE_CASE_ :Dict = ascii_lowercase + digits
return "".join(random.choice(a ) for _ in range(a ) )
if __name__ == "__main__":
main()
print("DONE ✅")
| 631
| 1
|
"""simple docstring"""
def _UpperCamelCase ( UpperCamelCase , UpperCamelCase ) -> str:
"""simple docstring"""
return "\n".join(
f"{number} * {i} = {number * i}" for i in range(1 , number_of_terms + 1 ) )
if __name__ == "__main__":
print(multiplication_table(number=5, number_of_terms=10))
| 487
|
"""simple docstring"""
import json
import os
import tempfile
from transformers.testing_utils import check_json_file_has_correct_format
class a__ :
lowercase_ = None
def a_ ( self : List[Any]):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = self.feature_extraction_class(**self.feat_extract_dict)
__UpperCAmelCase : Any = json.loads(feat_extract.to_json_string())
for key, value in self.feat_extract_dict.items():
self.assertEqual(obj[key] , UpperCamelCase_)
def a_ ( self : Tuple):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = self.feature_extraction_class(**self.feat_extract_dict)
with tempfile.TemporaryDirectory() as tmpdirname:
__UpperCAmelCase : Dict = os.path.join(UpperCamelCase_ , "feat_extract.json")
feat_extract_first.to_json_file(UpperCamelCase_)
__UpperCAmelCase : Optional[Any] = self.feature_extraction_class.from_json_file(UpperCamelCase_)
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict())
def a_ ( self : List[str]):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = self.feature_extraction_class(**self.feat_extract_dict)
with tempfile.TemporaryDirectory() as tmpdirname:
__UpperCAmelCase : Tuple = feat_extract_first.save_pretrained(UpperCamelCase_)[0]
check_json_file_has_correct_format(UpperCamelCase_)
__UpperCAmelCase : List[Any] = self.feature_extraction_class.from_pretrained(UpperCamelCase_)
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict())
def a_ ( self : Union[str, Any]):
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = self.feature_extraction_class()
self.assertIsNotNone(UpperCamelCase_)
| 487
| 1
|
"""simple docstring"""
import tensorflow as tf
from ...tf_utils import shape_list
class lowercase__ ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self , _A , _A , _A , _A , _A=1 , _A=False , **_A ):
'''simple docstring'''
super().__init__(**_A )
UpperCamelCase : List[str] = vocab_size
UpperCamelCase : Optional[Any] = d_embed
UpperCamelCase : List[Any] = d_proj
UpperCamelCase : List[Any] = cutoffs + [vocab_size]
UpperCamelCase : Any = [0] + self.cutoffs
UpperCamelCase : Union[str, Any] = div_val
UpperCamelCase : Dict = self.cutoffs[0]
UpperCamelCase : Any = len(self.cutoffs ) - 1
UpperCamelCase : Optional[Any] = self.shortlist_size + self.n_clusters
UpperCamelCase : int = keep_order
UpperCamelCase : Any = []
UpperCamelCase : Optional[int] = []
def _a ( self , _A ):
'''simple docstring'''
if self.n_clusters > 0:
UpperCamelCase : int = self.add_weight(
shape=(self.n_clusters, self.d_embed) , initializer="""zeros""" , trainable=_A , name="""cluster_weight""" )
UpperCamelCase : Optional[int] = self.add_weight(
shape=(self.n_clusters,) , initializer="""zeros""" , trainable=_A , name="""cluster_bias""" )
if self.div_val == 1:
for i in range(len(self.cutoffs ) ):
if self.d_proj != self.d_embed:
UpperCamelCase : Optional[int] = self.add_weight(
shape=(self.d_embed, self.d_proj) , initializer="""zeros""" , trainable=_A , name=f"""out_projs_._{i}""" , )
self.out_projs.append(_A )
else:
self.out_projs.append(_A )
UpperCamelCase : List[str] = self.add_weight(
shape=(self.vocab_size, self.d_embed) , initializer="""zeros""" , trainable=_A , name=f"""out_layers_._{i}_._weight""" , )
UpperCamelCase : int = self.add_weight(
shape=(self.vocab_size,) , initializer="""zeros""" , trainable=_A , name=f"""out_layers_._{i}_._bias""" , )
self.out_layers.append((weight, bias) )
else:
for i in range(len(self.cutoffs ) ):
UpperCamelCase , UpperCamelCase : Optional[Any] = self.cutoff_ends[i], self.cutoff_ends[i + 1]
UpperCamelCase : int = self.d_embed // (self.div_val**i)
UpperCamelCase : Any = self.add_weight(
shape=(d_emb_i, self.d_proj) , initializer="""zeros""" , trainable=_A , name=f"""out_projs_._{i}""" )
self.out_projs.append(_A )
UpperCamelCase : int = self.add_weight(
shape=(r_idx - l_idx, d_emb_i) , initializer="""zeros""" , trainable=_A , name=f"""out_layers_._{i}_._weight""" , )
UpperCamelCase : Optional[Any] = self.add_weight(
shape=(r_idx - l_idx,) , initializer="""zeros""" , trainable=_A , name=f"""out_layers_._{i}_._bias""" , )
self.out_layers.append((weight, bias) )
super().build(_A )
@staticmethod
def _a ( _A , _A , _A , _A=None ):
'''simple docstring'''
UpperCamelCase : List[str] = x
if proj is not None:
UpperCamelCase : List[str] = tf.einsum("""ibd,ed->ibe""" , _A , _A )
return tf.einsum("""ibd,nd->ibn""" , _A , _A ) + b
@staticmethod
def _a ( _A , _A ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = shape_list(_A )
UpperCamelCase : Any = tf.range(lp_size[0] , dtype=target.dtype )
UpperCamelCase : str = tf.stack([r, target] , 1 )
return tf.gather_nd(_A , _A )
def _a ( self , _A , _A , _A=True , _A=False ):
'''simple docstring'''
UpperCamelCase : int = 0
if self.n_clusters == 0:
UpperCamelCase : Dict = self._logit(_A , self.out_layers[0][0] , self.out_layers[0][1] , self.out_projs[0] )
if target is not None:
UpperCamelCase : Optional[Any] = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=_A , logits=_A )
UpperCamelCase : List[str] = tf.nn.log_softmax(_A , axis=-1 )
else:
UpperCamelCase : Any = shape_list(_A )
UpperCamelCase : Any = []
UpperCamelCase : Optional[int] = tf.zeros(hidden_sizes[:2] )
for i in range(len(self.cutoffs ) ):
UpperCamelCase , UpperCamelCase : Dict = self.cutoff_ends[i], self.cutoff_ends[i + 1]
if target is not None:
UpperCamelCase : List[str] = (target >= l_idx) & (target < r_idx)
UpperCamelCase : List[str] = tf.where(_A )
UpperCamelCase : Union[str, Any] = tf.boolean_mask(_A , _A ) - l_idx
if self.div_val == 1:
UpperCamelCase : str = self.out_layers[0][0][l_idx:r_idx]
UpperCamelCase : Union[str, Any] = self.out_layers[0][1][l_idx:r_idx]
else:
UpperCamelCase : Tuple = self.out_layers[i][0]
UpperCamelCase : Optional[Any] = self.out_layers[i][1]
if i == 0:
UpperCamelCase : str = tf.concat([cur_W, self.cluster_weight] , 0 )
UpperCamelCase : List[Any] = tf.concat([cur_b, self.cluster_bias] , 0 )
UpperCamelCase : Optional[int] = self._logit(_A , _A , _A , self.out_projs[0] )
UpperCamelCase : Optional[int] = tf.nn.log_softmax(_A )
out.append(head_logprob[..., : self.cutoffs[0]] )
if target is not None:
UpperCamelCase : Dict = tf.boolean_mask(_A , _A )
UpperCamelCase : Optional[Any] = self._gather_logprob(_A , _A )
else:
UpperCamelCase : Tuple = self._logit(_A , _A , _A , self.out_projs[i] )
UpperCamelCase : str = tf.nn.log_softmax(_A )
UpperCamelCase : List[str] = self.cutoffs[0] + i - 1 # No probability for the head cluster
UpperCamelCase : Tuple = head_logprob[..., cluster_prob_idx, None] + tail_logprob
out.append(_A )
if target is not None:
UpperCamelCase : List[Any] = tf.boolean_mask(_A , _A )
UpperCamelCase : int = tf.boolean_mask(_A , _A )
UpperCamelCase : Optional[Any] = self._gather_logprob(_A , _A )
cur_logprob += cur_head_logprob[:, self.cutoff_ends[1] + i - 1]
if target is not None:
loss += tf.scatter_nd(_A , -cur_logprob , shape_list(_A ) )
UpperCamelCase : int = tf.concat(_A , axis=-1 )
if target is not None:
if return_mean:
UpperCamelCase : Union[str, Any] = tf.reduce_mean(_A )
# Add the training-time loss value to the layer using `self.add_loss()`.
self.add_loss(_A )
# Log the loss as a metric (we could log arbitrary metrics,
# including different metrics for training and inference.
self.add_metric(_A , name=self.name , aggregation="""mean""" if return_mean else """""" )
return out
| 102
|
'''simple docstring'''
from math import ceil
from typing import List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor
from ...utils import TensorType, logging
UpperCamelCase_ = logging.get_logger(__name__)
class a_ (_a ):
__lowerCAmelCase : List[str] = ["""audio_values""", """audio_mask"""]
def __init__( self , snake_case_=2_0_4_8 , snake_case_=1 , snake_case_=[1_6, 1_6] , snake_case_=1_2_8 , snake_case_=4_4_1_0_0 , snake_case_=8_6 , snake_case_=2_0_4_8 , snake_case_=0.0 , **snake_case_ , ):
super().__init__(
feature_size=snake_case_ , sampling_rate=snake_case_ , padding_value=snake_case_ , **snake_case_ , )
_lowerCAmelCase : Optional[int] = spectrogram_length
_lowerCAmelCase : str = num_channels
_lowerCAmelCase : Union[str, Any] = patch_size
_lowerCAmelCase : Tuple = feature_size // self.patch_size[1]
_lowerCAmelCase : Optional[int] = n_fft
_lowerCAmelCase : Union[str, Any] = sampling_rate // hop_length_to_sampling_rate
_lowerCAmelCase : Optional[Any] = sampling_rate
_lowerCAmelCase : Any = padding_value
_lowerCAmelCase : Optional[int] = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=snake_case_ , min_frequency=0.0 , max_frequency=2_2050.0 , sampling_rate=snake_case_ , norm="""slaney""" , mel_scale="""slaney""" , ).T
def __UpperCamelCase ( self , snake_case_ ):
_lowerCAmelCase : int = spectrogram(
snake_case_ , window_function(self.n_fft , """hann""" ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel="""dB""" , db_range=80.0 , )
_lowerCAmelCase : int = log_spec[:, :-1]
_lowerCAmelCase : List[Any] = log_spec - 20.0
_lowerCAmelCase : Any = np.clip(log_spec / 40.0 , -2.0 , 0.0 ) + 1.0
return log_spec
def __call__( self , snake_case_ , snake_case_ = None , snake_case_ = True , snake_case_ = None , snake_case_ = False , snake_case_ = False , **snake_case_ , ):
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
"""This feature extractor is set to support sampling rate"""
f' of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled'
f' with {self.sampling_rate} and not {sampling_rate}.' )
else:
logger.warning(
"""It is strongly recommended to pass the `sampling_rate` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""" )
_lowerCAmelCase : List[Any] = isinstance(snake_case_ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'Only mono-channel audio is supported for input to {self}' )
_lowerCAmelCase : List[str] = is_batched_numpy or (
isinstance(snake_case_ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
_lowerCAmelCase : Dict = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(snake_case_ , np.ndarray ):
_lowerCAmelCase : Optional[Any] = np.asarray(snake_case_ , dtype=np.floataa )
elif isinstance(snake_case_ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
_lowerCAmelCase : Optional[Any] = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
_lowerCAmelCase : Tuple = [np.asarray([raw_speech] ).T]
# Convert audio signals to log mel spectrograms, truncate by time axis
_lowerCAmelCase : Optional[Any] = [
self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech
]
if isinstance(audio_features[0] , snake_case_ ):
_lowerCAmelCase : Optional[Any] = [np.asarray(snake_case_ , dtype=np.floataa ) for feature in audio_features]
# Create audio attention mask
_lowerCAmelCase : Any = max(
[ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch
if return_attention_mask:
_lowerCAmelCase : Union[str, Any] = [
(ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1]
+ (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0]
for feature in audio_features
]
_lowerCAmelCase : Optional[int] = np.array(snake_case_ ).astype(np.floataa )
# convert into correct format for padding
_lowerCAmelCase : Union[str, Any] = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch
_lowerCAmelCase : Union[str, Any] = np.ones([len(snake_case_ ), 1, max_time_len, self.feature_size] ).astype(np.floataa )
_lowerCAmelCase : int = padded_audio_features * self.padding_value
for i in range(len(snake_case_ ) ):
_lowerCAmelCase : Union[str, Any] = audio_features[i]
_lowerCAmelCase : List[str] = feature
# return as BatchFeature
if return_attention_mask:
_lowerCAmelCase : str = {"""audio_values""": padded_audio_features, """audio_mask""": audio_mask}
else:
_lowerCAmelCase : List[Any] = {"""audio_values""": padded_audio_features}
_lowerCAmelCase : Dict = BatchFeature(data=snake_case_ , tensor_type=snake_case_ )
return encoded_inputs
| 384
| 0
|
'''simple docstring'''
import argparse
import requests
import torch
from PIL import Image
from transformers import ViTMAEConfig, ViTMAEForPreTraining, ViTMAEImageProcessor
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> Any:
if "cls_token" in name:
A_ = name.replace("""cls_token""", """vit.embeddings.cls_token""" )
if "mask_token" in name:
A_ = name.replace("""mask_token""", """decoder.mask_token""" )
if "decoder_pos_embed" in name:
A_ = name.replace("""decoder_pos_embed""", """decoder.decoder_pos_embed""" )
if "pos_embed" in name and "decoder" not in name:
A_ = name.replace("""pos_embed""", """vit.embeddings.position_embeddings""" )
if "patch_embed.proj" in name:
A_ = name.replace("""patch_embed.proj""", """vit.embeddings.patch_embeddings.projection""" )
if "patch_embed.norm" in name:
A_ = name.replace("""patch_embed.norm""", """vit.embeddings.norm""" )
if "decoder_blocks" in name:
A_ = name.replace("""decoder_blocks""", """decoder.decoder_layers""" )
if "blocks" in name:
A_ = name.replace("""blocks""", """vit.encoder.layer""" )
if "attn.proj" in name:
A_ = name.replace("""attn.proj""", """attention.output.dense""" )
if "attn" in name:
A_ = name.replace("""attn""", """attention.self""" )
if "norm1" in name:
A_ = name.replace("""norm1""", """layernorm_before""" )
if "norm2" in name:
A_ = name.replace("""norm2""", """layernorm_after""" )
if "mlp.fc1" in name:
A_ = name.replace("""mlp.fc1""", """intermediate.dense""" )
if "mlp.fc2" in name:
A_ = name.replace("""mlp.fc2""", """output.dense""" )
if "decoder_embed" in name:
A_ = name.replace("""decoder_embed""", """decoder.decoder_embed""" )
if "decoder_norm" in name:
A_ = name.replace("""decoder_norm""", """decoder.decoder_norm""" )
if "decoder_pred" in name:
A_ = name.replace("""decoder_pred""", """decoder.decoder_pred""" )
if "norm.weight" in name and "decoder" not in name:
A_ = name.replace("""norm.weight""", """vit.layernorm.weight""" )
if "norm.bias" in name and "decoder" not in name:
A_ = name.replace("""norm.bias""", """vit.layernorm.bias""" )
return name
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ ) -> Union[str, Any]:
for key in orig_state_dict.copy().keys():
A_ = orig_state_dict.pop(UpperCAmelCase__ )
if "qkv" in key:
A_ = key.split(""".""" )
A_ = int(key_split[1] )
if "decoder_blocks" in key:
A_ = config.decoder_hidden_size
A_ = """decoder.decoder_layers."""
if "weight" in key:
A_ = val[:dim, :]
A_ = val[dim : dim * 2, :]
A_ = val[-dim:, :]
elif "bias" in key:
A_ = val[:dim]
A_ = val[dim : dim * 2]
A_ = val[-dim:]
else:
A_ = config.hidden_size
A_ = """vit.encoder.layer."""
if "weight" in key:
A_ = val[:dim, :]
A_ = val[dim : dim * 2, :]
A_ = val[-dim:, :]
elif "bias" in key:
A_ = val[:dim]
A_ = val[dim : dim * 2]
A_ = val[-dim:]
else:
A_ = val
return orig_state_dict
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ ) -> Optional[Any]:
A_ = ViTMAEConfig()
if "large" in checkpoint_url:
A_ = 10_24
A_ = 40_96
A_ = 24
A_ = 16
elif "huge" in checkpoint_url:
A_ = 14
A_ = 12_80
A_ = 51_20
A_ = 32
A_ = 16
A_ = ViTMAEForPreTraining(UpperCAmelCase__ )
A_ = torch.hub.load_state_dict_from_url(UpperCAmelCase__, map_location="""cpu""" )["""model"""]
A_ = ViTMAEImageProcessor(size=config.image_size )
A_ = convert_state_dict(UpperCAmelCase__, UpperCAmelCase__ )
model.load_state_dict(UpperCAmelCase__ )
model.eval()
A_ = """https://user-images.githubusercontent.com/11435359/147738734-196fd92f-9260-48d5-ba7e-bf103d29364d.jpg"""
A_ = Image.open(requests.get(UpperCAmelCase__, stream=UpperCAmelCase__ ).raw )
A_ = ViTMAEImageProcessor(size=config.image_size )
A_ = image_processor(images=UpperCAmelCase__, return_tensors="""pt""" )
# forward pass
torch.manual_seed(2 )
A_ = model(**UpperCAmelCase__ )
A_ = outputs.logits
if "large" in checkpoint_url:
A_ = torch.tensor(
[[-0.7_309, -0.7_128, -1.0_169], [-1.0_161, -0.9_058, -1.1_878], [-1.0_478, -0.9_411, -1.1_911]] )
elif "huge" in checkpoint_url:
A_ = torch.tensor(
[[-1.1_599, -0.9_199, -1.2_221], [-1.1_952, -0.9_269, -1.2_307], [-1.2_143, -0.9_337, -1.2_262]] )
else:
A_ = torch.tensor(
[[-0.9_192, -0.8_481, -1.1_259], [-1.1_349, -1.0_034, -1.2_599], [-1.1_757, -1.0_429, -1.2_726]] )
# verify logits
assert torch.allclose(logits[0, :3, :3], UpperCAmelCase__, atol=1e-4 )
print(F'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(UpperCAmelCase__ )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(UpperCAmelCase__ )
if __name__ == "__main__":
__lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://dl.fbaipublicfiles.com/mae/visualize/mae_visualize_vit_base.pth''',
type=str,
help='''URL of the checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
__lowerCamelCase = parser.parse_args()
convert_vit_mae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 667
|
'''simple docstring'''
import argparse
import re
from flax.traverse_util import flatten_dict, unflatten_dict
from tax import checkpoints
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
from transformers.utils import logging
logging.set_verbosity_info()
# should not include what is already done by the `from_pt` argument
__lowerCamelCase = {
'''/attention/''': '''/0/SelfAttention/''',
'''/self_attention/''': '''/0/SelfAttention/''',
'''/encoder_decoder_attention/''': '''/1/EncDecAttention/''',
'''value''': '''v''',
'''query''': '''q''',
'''key''': '''k''',
'''out''': '''o''',
'''pre_self_attention_layer_norm''': '''0/layer_norm''',
'''pre_cross_attention_layer_norm''': '''1/layer_norm''',
'''pre_attention_layer_norm''': '''0/layer_norm''', # previously 1, but seems wrong
'''token_embedder''': '''shared''',
'''encoder_norm''': '''final_layer_norm''',
'''decoder_norm''': '''final_layer_norm''',
'''relpos_bias/rel_embedding''': '''block/0/layer/0/SelfAttention/relative_attention_bias/weight''',
'''router/router_weights/w/''': '''router/classifier/''',
'''roer/roer_weights/w/''': '''router/classifier/''',
'''logits_dense''': '''lm_head''',
}
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> Tuple:
# 1. in HF T5, we have block.{x}.layer.{y}. which corresponds to layer.{x} in
# the original model
A_ = list(s_dict.keys() )
for key in keys:
A_ = r""".*/layers_(\d+)"""
A_ = key
if re.match(UpperCAmelCase__, UpperCAmelCase__ ):
A_ = re.sub(r"""layers_(\d+)""", r"""block/\1/layer""", UpperCAmelCase__ )
A_ = r"""(encoder|decoder)\/"""
if re.match(UpperCAmelCase__, UpperCAmelCase__ ):
A_ = re.match(UpperCAmelCase__, UpperCAmelCase__ ).groups()
if groups[0] == "encoder":
A_ = re.sub(r"""/mlp/""", r"""/1/mlp/""", UpperCAmelCase__ )
A_ = re.sub(r"""/pre_mlp_layer_norm/""", r"""/1/layer_norm/""", UpperCAmelCase__ )
elif groups[0] == "decoder":
A_ = re.sub(r"""/mlp/""", r"""/2/mlp/""", UpperCAmelCase__ )
A_ = re.sub(r"""/pre_mlp_layer_norm/""", r"""/2/layer_norm/""", UpperCAmelCase__ )
# 2. Convert other classic mappings
for old_key, temp_key in MOE_LAYER_NAME_MAPPING.items():
if old_key in new_key:
A_ = new_key.replace(UpperCAmelCase__, UpperCAmelCase__ )
print(F'''{key} -> {new_key}''' )
A_ = s_dict.pop(UpperCAmelCase__ )
if "encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
A_ = s_dict[
"""encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight"""
].T
if "decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
A_ = s_dict[
"""decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight"""
].T
# 3. Take extra care of the EXPERTS layer
for key in list(s_dict.keys() ):
if "expert" in key:
A_ = s_dict[key].shape[0]
A_ = s_dict[key]
for idx in range(UpperCAmelCase__ ):
A_ = expert_weihts[idx]
print(F'''{key} -> {key.replace("expert/", "nested fstring" )}''' )
s_dict.pop(UpperCAmelCase__ )
return s_dict
__lowerCamelCase = {
'''NUM_ENCODER_LAYERS''': '''num_layers''',
'''NUM_DECODER_LAYERS''': '''num_decoder_layers''',
'''NUM_HEADS''': '''num_heads''',
'''HEAD_DIM''': '''d_kv''',
'''EMBED_DIM''': '''d_model''',
'''MLP_DIM''': '''d_ff''',
'''NUM_SELECTED_EXPERTS''': '''num_selected_experts''',
'''NUM_ENCODER_SPARSE_LAYERS''': '''num_sparse_encoder_layers''',
'''NUM_DECODER_SPARSE_LAYERS''': '''num_sparse_decoder_layers''',
'''dense.MlpBlock.activations''': '''feed_forward_proj''',
}
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ ) -> List[Any]:
# Convert a google style config to the hugging face fromat
import regex as re
with open(UpperCAmelCase__, """r""" ) as f:
A_ = f.read()
A_ = re.findall(r"""(.*) = ([0-9.]*)""", UpperCAmelCase__ )
A_ = {}
for param, value in regex_match:
if param in GIN_TO_CONFIG_MAPPING and value != "":
A_ = float(UpperCAmelCase__ ) if """.""" in value else int(UpperCAmelCase__ )
A_ = re.findall(r"""(.*activations) = \(\'(.*)\',\)""", UpperCAmelCase__ )[0]
A_ = str(activation[1] )
A_ = num_experts
A_ = SwitchTransformersConfig(**UpperCAmelCase__ )
return config
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__=None, UpperCAmelCase__="./", UpperCAmelCase__=8 ) -> List[str]:
# Initialise PyTorch model
print(F'''Loading flax weights from : {flax_checkpoint_path}''' )
A_ = checkpoints.load_tax_checkpoint(UpperCAmelCase__ )
if gin_file is not None:
A_ = convert_gin_to_config(UpperCAmelCase__, UpperCAmelCase__ )
else:
A_ = SwitchTransformersConfig.from_pretrained(UpperCAmelCase__ )
A_ = SwitchTransformersForConditionalGeneration(UpperCAmelCase__ )
A_ = flax_params["""target"""]
A_ = flatten_dict(UpperCAmelCase__, sep="""/""" )
A_ = rename_keys(UpperCAmelCase__ )
A_ = unflatten_dict(UpperCAmelCase__, sep="""/""" )
# Load the flax params in the PT model
load_flax_weights_in_pytorch_model(UpperCAmelCase__, UpperCAmelCase__ )
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
pt_model.save_pretrained(UpperCAmelCase__ )
if __name__ == "__main__":
__lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--switch_t5x_checkpoint_path''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained SwitchTransformers model. \nThis specifies the'''
''' model architecture. If not provided, a `gin_file` has to be provided.'''
),
)
parser.add_argument(
'''--gin_file''',
default=None,
type=str,
required=False,
help='''Path to the gin config file. If not provided, a `config_file` has to be passed ''',
)
parser.add_argument(
'''--config_name''', default=None, type=str, required=False, help='''Config name of SwitchTransformers model.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output pytorch model.'''
)
parser.add_argument('''--num_experts''', default=8, type=int, required=False, help='''Number of experts''')
__lowerCamelCase = parser.parse_args()
convert_flax_checkpoint_to_pytorch(
args.switch_tax_checkpoint_path,
args.config_name,
args.gin_file,
args.pytorch_dump_folder_path,
args.num_experts,
)
| 667
| 1
|
'''simple docstring'''
import re
from flax.core.frozen_dict import freeze
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.experimental import PartitionSpec as P
# Sentinels
a_ : List[str] = object()
# For specifying empty leaf dict `{}`
a_ : str = object()
def a_ ( __snake_case : Any , __snake_case : Dict ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_ =tuple((re.compile(x + '''$''' ) for x in qs) )
for i in range(len(__snake_case ) - len(__snake_case ) + 1 ):
lowerCamelCase_ =[x.match(__snake_case ) for x, y in zip(__snake_case , ks[i:] )]
if matches and all(__snake_case ):
return True
return False
def a_ ( __snake_case : Tuple ) -> Optional[Any]:
"""simple docstring"""
def replace(__snake_case : int , __snake_case : Optional[Any] ):
for rule, replacement in rules:
if _match(__snake_case , __snake_case ):
return replacement
return val
return replace
def a_ ( ) -> Dict:
"""simple docstring"""
return [
# embeddings
(("transformer", "wpe", "embedding"), P('''mp''' , __snake_case )),
(("transformer", "wte", "embedding"), P('''mp''' , __snake_case )),
# atention
(("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(__snake_case , '''mp''' )),
(("attention", "out_proj", "kernel"), P('''mp''' , __snake_case )),
(("attention", "out_proj", "bias"), None),
# mlp
(("mlp", "c_fc", "kernel"), P(__snake_case , '''mp''' )),
(("mlp", "c_fc", "bias"), P('''mp''' )),
(("mlp", "c_proj", "kernel"), P('''mp''' , __snake_case )),
(("mlp", "c_proj", "bias"), None),
# layer norms
((r"ln_\d+", "bias"), None),
((r"\d+", r"ln_\d+", "scale"), None),
(("ln_f", "bias"), None),
(("ln_f", "scale"), None),
]
def a_ ( __snake_case : str ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ =_get_partition_rules()
lowerCamelCase_ =_replacement_rules(__snake_case )
lowerCamelCase_ ={k: _unmatched for k in flatten_dict(__snake_case )}
lowerCamelCase_ ={k: replace(__snake_case , __snake_case ) for k, v in initd.items()}
assert _unmatched not in result.values(), "Incomplete partition spec."
return freeze(unflatten_dict(__snake_case ) )
| 676
|
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_camembert import CamembertTokenizer
else:
lowercase__ : Optional[int] = None
lowercase__ : List[str] = logging.get_logger(__name__)
lowercase__ : Optional[Any] = {'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''}
lowercase__ : List[str] = {
'''vocab_file''': {
'''camembert-base''': '''https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model''',
},
'''tokenizer_file''': {
'''camembert-base''': '''https://huggingface.co/camembert-base/resolve/main/tokenizer.json''',
},
}
lowercase__ : Dict = {
'''camembert-base''': 5_12,
}
lowercase__ : str = '''▁'''
class SCREAMING_SNAKE_CASE (a__ ):
lowerCAmelCase = VOCAB_FILES_NAMES
lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase = ['''input_ids''', '''attention_mask''']
lowerCAmelCase = CamembertTokenizer
def __init__( self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase="<s>" , _UpperCAmelCase="</s>" , _UpperCAmelCase="</s>" , _UpperCAmelCase="<s>" , _UpperCAmelCase="<unk>" , _UpperCAmelCase="<pad>" , _UpperCAmelCase="<mask>" , _UpperCAmelCase=["<s>NOTUSED", "</s>NOTUSED"] , **_UpperCAmelCase , ):
'''simple docstring'''
__A : int = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase) if isinstance(_UpperCAmelCase , _UpperCAmelCase) else mask_token
super().__init__(
_UpperCAmelCase , tokenizer_file=_UpperCAmelCase , bos_token=_UpperCAmelCase , eos_token=_UpperCAmelCase , sep_token=_UpperCAmelCase , cls_token=_UpperCAmelCase , unk_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , mask_token=_UpperCAmelCase , additional_special_tokens=_UpperCAmelCase , **_UpperCAmelCase , )
__A : List[str] = vocab_file
__A : Optional[int] = False if not self.vocab_file else True
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase = None):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__A : Optional[Any] = [self.cls_token_id]
__A : Optional[int] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase = None):
'''simple docstring'''
__A : Optional[int] = [self.sep_token_id]
__A : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase = None):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.')
if not os.path.isdir(_UpperCAmelCase):
logger.error(F'Vocabulary path ({save_directory}) should be a directory')
return
__A : List[Any] = os.path.join(
_UpperCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
if os.path.abspath(self.vocab_file) != os.path.abspath(_UpperCAmelCase):
copyfile(self.vocab_file , _UpperCAmelCase)
return (out_vocab_file,)
| 8
| 0
|
import qiskit
def lowerCamelCase ( SCREAMING_SNAKE_CASE = 2 ):
'''simple docstring'''
__UpperCamelCase :Tuple = qubits
# Using Aer's simulator
__UpperCamelCase :Optional[int] = qiskit.Aer.get_backend('''aer_simulator''' )
# Creating a Quantum Circuit acting on the q register
__UpperCamelCase :Union[str, Any] = qiskit.QuantumCircuit(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Adding a H gate on qubit 0 (now q0 in superposition)
circuit.h(0 )
for i in range(1 , SCREAMING_SNAKE_CASE ):
# Adding CX (CNOT) gate
circuit.cx(i - 1 , SCREAMING_SNAKE_CASE )
# Mapping the quantum measurement to the classical bits
circuit.measure(list(range(SCREAMING_SNAKE_CASE ) ) , list(range(SCREAMING_SNAKE_CASE ) ) )
# Now measuring any one qubit would affect other qubits to collapse
# their super position and have same state as the measured one.
# Executing the circuit on the simulator
__UpperCamelCase :Optional[int] = qiskit.execute(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , shots=1_000 )
return job.result().get_counts(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
print(F'Total count for various states are: {quantum_entanglement(3)}')
| 452
|
import argparse
import collections
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import TaConfig, TaEncoderModel, TaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE="attention" ):
'''simple docstring'''
__UpperCamelCase :int = params[f"""{prefix}/layers_{i}/{layer_name}/key/kernel"""]
__UpperCamelCase :Optional[int] = params[f"""{prefix}/layers_{i}/{layer_name}/out/kernel"""]
__UpperCamelCase :List[str] = params[f"""{prefix}/layers_{i}/{layer_name}/query/kernel"""]
__UpperCamelCase :Union[str, Any] = params[f"""{prefix}/layers_{i}/{layer_name}/value/kernel"""]
return k, o, q, v
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False ):
'''simple docstring'''
if split_mlp_wi:
__UpperCamelCase :List[str] = params[f"""{prefix}/layers_{i}/mlp/wi_0/kernel"""]
__UpperCamelCase :Dict = params[f"""{prefix}/layers_{i}/mlp/wi_1/kernel"""]
__UpperCamelCase :Optional[Any] = (wi_a, wi_a)
else:
__UpperCamelCase :List[str] = params[f"""{prefix}/layers_{i}/mlp/wi/kernel"""]
__UpperCamelCase :int = params[f"""{prefix}/layers_{i}/mlp/wo/kernel"""]
return wi, wo
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
return params[f"""{prefix}/layers_{i}/{layer_name}/scale"""]
def lowerCamelCase ( SCREAMING_SNAKE_CASE , *, SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :Tuple = traverse_util.flatten_dict(variables['''target'''] )
__UpperCamelCase :Optional[int] = {'''/'''.join(SCREAMING_SNAKE_CASE ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
__UpperCamelCase :Union[str, Any] = '''encoder/layers_0/mlp/wi_0/kernel''' in old
print('''Split MLP:''' , SCREAMING_SNAKE_CASE )
__UpperCamelCase :Optional[Any] = collections.OrderedDict()
# Shared embeddings.
__UpperCamelCase :Any = old['''token_embedder/embedding''']
# Encoder.
for i in range(SCREAMING_SNAKE_CASE ):
# Block i, layer 0 (Self Attention).
__UpperCamelCase :Tuple = tax_layer_norm_lookup(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , '''encoder''' , '''pre_attention_layer_norm''' )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase :Dict = tax_attention_lookup(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , '''encoder''' , '''attention''' )
__UpperCamelCase :str = layer_norm
__UpperCamelCase :str = k.T
__UpperCamelCase :Optional[Any] = o.T
__UpperCamelCase :Optional[int] = q.T
__UpperCamelCase :Any = v.T
# Block i, layer 1 (MLP).
__UpperCamelCase :Union[str, Any] = tax_layer_norm_lookup(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , '''encoder''' , '''pre_mlp_layer_norm''' )
__UpperCamelCase , __UpperCamelCase :Optional[int] = tax_mlp_lookup(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , '''encoder''' , SCREAMING_SNAKE_CASE )
__UpperCamelCase :Any = layer_norm
if split_mlp_wi:
__UpperCamelCase :Optional[Any] = wi[0].T
__UpperCamelCase :int = wi[1].T
else:
__UpperCamelCase :Dict = wi.T
__UpperCamelCase :str = wo.T
__UpperCamelCase :Optional[Any] = old[
'''encoder/relpos_bias/rel_embedding'''
].T
__UpperCamelCase :Optional[int] = old['''encoder/encoder_norm/scale''']
if not is_encoder_only:
# Decoder.
for i in range(SCREAMING_SNAKE_CASE ):
# Block i, layer 0 (Self Attention).
__UpperCamelCase :int = tax_layer_norm_lookup(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , '''decoder''' , '''pre_self_attention_layer_norm''' )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase :Dict = tax_attention_lookup(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , '''decoder''' , '''self_attention''' )
__UpperCamelCase :str = layer_norm
__UpperCamelCase :Optional[Any] = k.T
__UpperCamelCase :List[str] = o.T
__UpperCamelCase :Union[str, Any] = q.T
__UpperCamelCase :int = v.T
# Block i, layer 1 (Cross Attention).
__UpperCamelCase :Optional[Any] = tax_layer_norm_lookup(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , '''decoder''' , '''pre_cross_attention_layer_norm''' )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase :int = tax_attention_lookup(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , '''decoder''' , '''encoder_decoder_attention''' )
__UpperCamelCase :List[str] = layer_norm
__UpperCamelCase :Any = k.T
__UpperCamelCase :Any = o.T
__UpperCamelCase :Optional[int] = q.T
__UpperCamelCase :Union[str, Any] = v.T
# Block i, layer 2 (MLP).
__UpperCamelCase :Tuple = tax_layer_norm_lookup(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , '''decoder''' , '''pre_mlp_layer_norm''' )
__UpperCamelCase , __UpperCamelCase :Optional[Any] = tax_mlp_lookup(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , '''decoder''' , SCREAMING_SNAKE_CASE )
__UpperCamelCase :Optional[Any] = layer_norm
if split_mlp_wi:
__UpperCamelCase :int = wi[0].T
__UpperCamelCase :Any = wi[1].T
else:
__UpperCamelCase :Optional[Any] = wi.T
__UpperCamelCase :Dict = wo.T
__UpperCamelCase :Optional[int] = old['''decoder/decoder_norm/scale''']
__UpperCamelCase :Optional[Any] = old[
'''decoder/relpos_bias/rel_embedding'''
].T
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
__UpperCamelCase :List[str] = old['''decoder/logits_dense/kernel'''].T
return new
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :Optional[Any] = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
__UpperCamelCase :Union[str, Any] = state_dict['''shared.weight''']
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
__UpperCamelCase :Any = state_dict['''shared.weight''']
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print('''Using shared word embeddings as lm_head.''' )
__UpperCamelCase :Optional[int] = state_dict['''shared.weight''']
return state_dict
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :int = checkpoints.load_tax_checkpoint(SCREAMING_SNAKE_CASE )
__UpperCamelCase :str = convert_tax_to_pytorch(SCREAMING_SNAKE_CASE , num_layers=config.num_layers , is_encoder_only=SCREAMING_SNAKE_CASE )
__UpperCamelCase :Optional[int] = make_state_dict(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
model.load_state_dict(SCREAMING_SNAKE_CASE , strict=SCREAMING_SNAKE_CASE )
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = False ):
'''simple docstring'''
__UpperCamelCase :Union[str, Any] = TaConfig.from_json_file(SCREAMING_SNAKE_CASE )
print(f"""Building PyTorch model from configuration: {config}""" )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
__UpperCamelCase :Dict = TaEncoderModel(SCREAMING_SNAKE_CASE )
else:
__UpperCamelCase :str = TaForConditionalGeneration(SCREAMING_SNAKE_CASE )
# Load weights from tf checkpoint
load_tax_weights_in_ta(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Save pytorch-model
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
model.save_pretrained(SCREAMING_SNAKE_CASE )
# Verify that we can load the checkpoint.
model.from_pretrained(SCREAMING_SNAKE_CASE )
print('''Done''' )
if __name__ == "__main__":
__lowercase = argparse.ArgumentParser(description='''Converts a native T5X checkpoint into a PyTorch checkpoint.''')
# Required parameters
parser.add_argument(
'''--t5x_checkpoint_path''', default=None, type=str, required=True, help='''Path to the T5X checkpoint.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--is_encoder_only''', action='''store_true''', help='''Check if the model is encoder-decoder model''', default=False
)
__lowercase = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path, args.config_file, args.pytorch_dump_path, args.is_encoder_only
)
| 452
| 1
|
"""simple docstring"""
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.esm.modeling_esmfold import EsmForProteinFolding
class __a :
def __init__( self : Any , UpperCAmelCase_ : Any , UpperCAmelCase_ : int=13 , UpperCAmelCase_ : Optional[int]=7 , UpperCAmelCase_ : Dict=False , UpperCAmelCase_ : List[str]=True , UpperCAmelCase_ : Any=False , UpperCAmelCase_ : List[Any]=False , UpperCAmelCase_ : Dict=19 , UpperCAmelCase_ : str=32 , UpperCAmelCase_ : str=5 , UpperCAmelCase_ : str=4 , UpperCAmelCase_ : int=37 , UpperCAmelCase_ : Optional[Any]="gelu" , UpperCAmelCase_ : List[Any]=0.1 , UpperCAmelCase_ : Optional[int]=0.1 , UpperCAmelCase_ : List[str]=512 , UpperCAmelCase_ : int=16 , UpperCAmelCase_ : int=2 , UpperCAmelCase_ : List[str]=0.02 , UpperCAmelCase_ : Any=3 , UpperCAmelCase_ : Optional[int]=4 , UpperCAmelCase_ : Any=None , )-> List[Any]:
"""simple docstring"""
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = seq_length
UpperCamelCase = is_training
UpperCamelCase = use_input_mask
UpperCamelCase = use_token_type_ids
UpperCamelCase = use_labels
UpperCamelCase = vocab_size
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_act
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = max_position_embeddings
UpperCamelCase = type_vocab_size
UpperCamelCase = type_sequence_label_size
UpperCamelCase = initializer_range
UpperCamelCase = num_labels
UpperCamelCase = num_choices
UpperCamelCase = scope
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] )-> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase = None
if self.use_input_mask:
UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = None
if self.use_labels:
UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase = ids_tensor([self.batch_size] , self.num_choices )
UpperCamelCase = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def _SCREAMING_SNAKE_CASE ( self : Optional[int] )-> Any:
"""simple docstring"""
UpperCamelCase = EsmConfig(
vocab_size=33 , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , is_folding_model=SCREAMING_SNAKE_CASE__ , esmfold_config={"trunk": {"num_blocks": 2}, "fp16_esm": False} , )
return config
def _SCREAMING_SNAKE_CASE ( self : List[str] , UpperCAmelCase_ : int , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : List[Any] )-> Optional[int]:
"""simple docstring"""
UpperCamelCase = EsmForProteinFolding(config=SCREAMING_SNAKE_CASE__ ).float()
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
UpperCamelCase = model(SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ )
UpperCamelCase = model(SCREAMING_SNAKE_CASE__ )
UpperCamelCase = model(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.positions.shape , (8, self.batch_size, self.seq_length, 14, 3) )
self.parent.assertEqual(result.angles.shape , (8, self.batch_size, self.seq_length, 7, 2) )
def _SCREAMING_SNAKE_CASE ( self : str )-> List[Any]:
"""simple docstring"""
UpperCamelCase = self.prepare_config_and_inputs()
(
(
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) ,
) = config_and_inputs
UpperCamelCase = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class __a ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
UpperCamelCase_ : Optional[Any] = False
UpperCamelCase_ : List[str] = (EsmForProteinFolding,) if is_torch_available() else ()
UpperCamelCase_ : int = ()
UpperCamelCase_ : int = {} if is_torch_available() else {}
UpperCamelCase_ : Dict = False
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] )-> Tuple:
"""simple docstring"""
UpperCamelCase = EsmFoldModelTester(self )
UpperCamelCase = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE__ , hidden_size=37 )
def _SCREAMING_SNAKE_CASE ( self : List[Any] )-> Optional[int]:
"""simple docstring"""
self.config_tester.run_common_tests()
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] )-> int:
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE__ )
@unittest.skip("Does not support attention outputs" )
def _SCREAMING_SNAKE_CASE ( self : Tuple )-> str:
"""simple docstring"""
pass
@unittest.skip
def _SCREAMING_SNAKE_CASE ( self : str )-> Any:
"""simple docstring"""
pass
@unittest.skip("Esm does not support embedding resizing" )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] )-> List[str]:
"""simple docstring"""
pass
@unittest.skip("Esm does not support embedding resizing" )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] )-> Optional[int]:
"""simple docstring"""
pass
@unittest.skip("ESMFold does not support passing input embeds!" )
def _SCREAMING_SNAKE_CASE ( self : int )-> List[str]:
"""simple docstring"""
pass
@unittest.skip("ESMFold does not support head pruning." )
def _SCREAMING_SNAKE_CASE ( self : List[Any] )-> List[Any]:
"""simple docstring"""
pass
@unittest.skip("ESMFold does not support head pruning." )
def _SCREAMING_SNAKE_CASE ( self : Tuple )-> str:
"""simple docstring"""
pass
@unittest.skip("ESMFold does not support head pruning." )
def _SCREAMING_SNAKE_CASE ( self : List[str] )-> Any:
"""simple docstring"""
pass
@unittest.skip("ESMFold does not support head pruning." )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] )-> Dict:
"""simple docstring"""
pass
@unittest.skip("ESMFold does not support head pruning." )
def _SCREAMING_SNAKE_CASE ( self : Any )-> Optional[int]:
"""simple docstring"""
pass
@unittest.skip("ESMFold does not output hidden states in the normal way." )
def _SCREAMING_SNAKE_CASE ( self : Any )-> Dict:
"""simple docstring"""
pass
@unittest.skip("ESMfold does not output hidden states in the normal way." )
def _SCREAMING_SNAKE_CASE ( self : Any )-> Any:
"""simple docstring"""
pass
@unittest.skip("ESMFold only has one output format." )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] )-> Union[str, Any]:
"""simple docstring"""
pass
@unittest.skip("This test doesn't work for ESMFold and doesn't test core functionality" )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] )-> Union[str, Any]:
"""simple docstring"""
pass
@unittest.skip("ESMFold does not support input chunking." )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] )-> str:
"""simple docstring"""
pass
@unittest.skip("ESMFold doesn't respect you and it certainly doesn't respect your initialization arguments." )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] )-> Any:
"""simple docstring"""
pass
@unittest.skip("ESMFold doesn't support torchscript compilation." )
def _SCREAMING_SNAKE_CASE ( self : int )-> Union[str, Any]:
"""simple docstring"""
pass
@unittest.skip("ESMFold doesn't support torchscript compilation." )
def _SCREAMING_SNAKE_CASE ( self : int )-> List[str]:
"""simple docstring"""
pass
@unittest.skip("ESMFold doesn't support torchscript compilation." )
def _SCREAMING_SNAKE_CASE ( self : Dict )-> List[Any]:
"""simple docstring"""
pass
@unittest.skip("ESMFold doesn't support data parallel." )
def _SCREAMING_SNAKE_CASE ( self : List[str] )-> List[str]:
"""simple docstring"""
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def _SCREAMING_SNAKE_CASE ( self : List[str] )-> int:
"""simple docstring"""
pass
@require_torch
class __a ( UpperCAmelCase__ ):
@slow
def _SCREAMING_SNAKE_CASE ( self : Dict )-> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = EsmForProteinFolding.from_pretrained("facebook/esmfold_v1" ).float()
model.eval()
UpperCamelCase = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
UpperCamelCase = model(SCREAMING_SNAKE_CASE__ )["positions"]
UpperCamelCase = torch.tensor([2.5828, 0.7993, -10.9_334] , dtype=torch.floataa )
self.assertTrue(torch.allclose(position_outputs[0, 0, 0, 0] , SCREAMING_SNAKE_CASE__ , atol=1e-4 ) )
| 554
|
'''simple docstring'''
import inspect
import unittest
from transformers import RegNetConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import RegNetForImageClassification, RegNetModel
from transformers.models.regnet.modeling_regnet import REGNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _lowercase :
'''simple docstring'''
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : str=3 , SCREAMING_SNAKE_CASE__ : str=32 , SCREAMING_SNAKE_CASE__ : Optional[Any]=3 , SCREAMING_SNAKE_CASE__ : Optional[int]=10 , SCREAMING_SNAKE_CASE__ : Tuple=[10, 20, 30, 40] , SCREAMING_SNAKE_CASE__ : Any=[1, 1, 2, 1] , SCREAMING_SNAKE_CASE__ : Optional[int]=True , SCREAMING_SNAKE_CASE__ : Optional[int]=True , SCREAMING_SNAKE_CASE__ : Union[str, Any]="relu" , SCREAMING_SNAKE_CASE__ : Tuple=3 , SCREAMING_SNAKE_CASE__ : Any=None , ) -> Optional[int]:
__lowerCAmelCase = parent
__lowerCAmelCase = batch_size
__lowerCAmelCase = image_size
__lowerCAmelCase = num_channels
__lowerCAmelCase = embeddings_size
__lowerCAmelCase = hidden_sizes
__lowerCAmelCase = depths
__lowerCAmelCase = is_training
__lowerCAmelCase = use_labels
__lowerCAmelCase = hidden_act
__lowerCAmelCase = num_labels
__lowerCAmelCase = scope
__lowerCAmelCase = len(SCREAMING_SNAKE_CASE__ )
def a ( self : Any ) -> Any:
__lowerCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowerCAmelCase = None
if self.use_labels:
__lowerCAmelCase = ids_tensor([self.batch_size] , self.num_labels )
__lowerCAmelCase = self.get_config()
return config, pixel_values, labels
def a ( self : Union[str, Any] ) -> Optional[Any]:
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , )
def a ( self : List[Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Tuple:
__lowerCAmelCase = RegNetModel(config=SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
__lowerCAmelCase = model(SCREAMING_SNAKE_CASE__ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def a ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Dict ) -> Any:
__lowerCAmelCase = self.num_labels
__lowerCAmelCase = RegNetForImageClassification(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
__lowerCAmelCase = model(SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def a ( self : Union[str, Any] ) -> Optional[Any]:
__lowerCAmelCase = self.prepare_config_and_inputs()
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = config_and_inputs
__lowerCAmelCase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class _lowercase ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE : Optional[Any] = (RegNetModel, RegNetForImageClassification) if is_torch_available() else ()
_SCREAMING_SNAKE_CASE : Tuple = (
{"""feature-extraction""": RegNetModel, """image-classification""": RegNetForImageClassification}
if is_torch_available()
else {}
)
_SCREAMING_SNAKE_CASE : Any = False
_SCREAMING_SNAKE_CASE : Optional[Any] = False
_SCREAMING_SNAKE_CASE : Union[str, Any] = False
_SCREAMING_SNAKE_CASE : int = False
def a ( self : Union[str, Any] ) -> List[Any]:
__lowerCAmelCase = RegNetModelTester(self )
__lowerCAmelCase = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE__ , has_text_modality=SCREAMING_SNAKE_CASE__ )
def a ( self : Tuple ) -> str:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def a ( self : Union[str, Any] ) -> str:
return
@unittest.skip(reason="""RegNet does not use inputs_embeds""" )
def a ( self : str ) -> Union[str, Any]:
pass
@unittest.skip(reason="""RegNet does not support input and output embeddings""" )
def a ( self : List[str] ) -> int:
pass
def a ( self : Union[str, Any] ) -> Optional[Any]:
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase = model_class(SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCAmelCase = [*signature.parameters.keys()]
__lowerCAmelCase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE__ )
def a ( self : List[Any] ) -> str:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE__ )
def a ( self : List[Any] ) -> Union[str, Any]:
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase = model_class(config=SCREAMING_SNAKE_CASE__ )
for name, module in model.named_modules():
if isinstance(SCREAMING_SNAKE_CASE__ , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
def a ( self : Union[str, Any] ) -> Any:
def check_hidden_states_output(SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[str] ):
__lowerCAmelCase = model_class(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
with torch.no_grad():
__lowerCAmelCase = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
__lowerCAmelCase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__lowerCAmelCase = self.model_tester.num_stages
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , expected_num_stages + 1 )
# RegNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , )
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase = ["""basic""", """bottleneck"""]
for model_class in self.all_model_classes:
for layer_type in layers_type:
__lowerCAmelCase = layer_type
__lowerCAmelCase = True
check_hidden_states_output(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowerCAmelCase = True
check_hidden_states_output(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def a ( self : List[str] ) -> Union[str, Any]:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE__ )
@slow
def a ( self : Any ) -> Optional[Any]:
for model_name in REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase = RegNetModel.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
def UpperCamelCase_ ( ) -> Dict:
'''simple docstring'''
__lowerCAmelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def a ( self : List[Any] ) -> Dict:
return (
AutoImageProcessor.from_pretrained(REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def a ( self : Union[str, Any] ) -> Optional[Any]:
__lowerCAmelCase = RegNetForImageClassification.from_pretrained(REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = self.default_image_processor
__lowerCAmelCase = prepare_img()
__lowerCAmelCase = image_processor(images=SCREAMING_SNAKE_CASE__ , return_tensors="""pt""" ).to(SCREAMING_SNAKE_CASE__ )
# forward pass
with torch.no_grad():
__lowerCAmelCase = model(**SCREAMING_SNAKE_CASE__ )
# verify the logits
__lowerCAmelCase = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = torch.tensor([-0.4_1_8_0, -1.5_0_5_1, -3.4_8_3_6] ).to(SCREAMING_SNAKE_CASE__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , SCREAMING_SNAKE_CASE__ , atol=1e-4 ) )
| 427
| 0
|
"""simple docstring"""
import os
import re
import sys
import traceback
import warnings
from pathlib import Path
from typing import Dict, Optional, Union
from uuid import uuida
from huggingface_hub import HfFolder, ModelCard, ModelCardData, hf_hub_download, whoami
from huggingface_hub.file_download import REGEX_COMMIT_HASH
from huggingface_hub.utils import (
EntryNotFoundError,
RepositoryNotFoundError,
RevisionNotFoundError,
is_jinja_available,
)
from packaging import version
from requests import HTTPError
from .. import __version__
from .constants import (
DEPRECATED_REVISION_ARGS,
DIFFUSERS_CACHE,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
SAFETENSORS_WEIGHTS_NAME,
WEIGHTS_NAME,
)
from .import_utils import (
ENV_VARS_TRUE_VALUES,
_flax_version,
_jax_version,
_onnxruntime_version,
_torch_version,
is_flax_available,
is_onnx_available,
is_torch_available,
)
from .logging import get_logger
__A : Any = get_logger(__name__)
__A : Union[str, Any] = Path(__file__).parent / "model_card_template.md"
__A : int = uuida().hex
__A : Optional[Any] = os.getenv("HF_HUB_OFFLINE", "").upper() in ENV_VARS_TRUE_VALUES
__A : Union[str, Any] = os.getenv("DISABLE_TELEMETRY", "").upper() in ENV_VARS_TRUE_VALUES
__A : Union[str, Any] = HUGGINGFACE_CO_RESOLVE_ENDPOINT + "/api/telemetry/"
def lowercase ( _SCREAMING_SNAKE_CASE : Union[Dict, str, None] = None ):
'''simple docstring'''
_UpperCAmelCase = f'diffusers/{__version__}; python/{sys.version.split()[0]}; session_id/{SESSION_ID}'
if DISABLE_TELEMETRY or HF_HUB_OFFLINE:
return ua + "; telemetry/off"
if is_torch_available():
ua += f'; torch/{_torch_version}'
if is_flax_available():
ua += f'; jax/{_jax_version}'
ua += f'; flax/{_flax_version}'
if is_onnx_available():
ua += f'; onnxruntime/{_onnxruntime_version}'
# CI will set this value to True
if os.environ.get('''DIFFUSERS_IS_CI''' , '''''' ).upper() in ENV_VARS_TRUE_VALUES:
ua += "; is_ci/true"
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
ua += "; " + "; ".join(f'{k}/{v}' for k, v in user_agent.items() )
elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
ua += "; " + user_agent
return ua
def lowercase ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Optional[str] = None , _SCREAMING_SNAKE_CASE : Optional[str] = None ):
'''simple docstring'''
if token is None:
_UpperCAmelCase = HfFolder.get_token()
if organization is None:
_UpperCAmelCase = whoami(_SCREAMING_SNAKE_CASE )['''name''']
return f'{username}/{model_id}'
else:
return f'{organization}/{model_id}'
def lowercase ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Optional[Any] ):
'''simple docstring'''
if not is_jinja_available():
raise ValueError(
'''Modelcard rendering is based on Jinja templates.'''
''' Please make sure to have `jinja` installed before using `create_model_card`.'''
''' To install it, please run `pip install Jinja2`.''' )
if hasattr(_SCREAMING_SNAKE_CASE , '''local_rank''' ) and args.local_rank not in [-1, 0]:
return
_UpperCAmelCase = args.hub_token if hasattr(_SCREAMING_SNAKE_CASE , '''hub_token''' ) else None
_UpperCAmelCase = get_full_repo_name(_SCREAMING_SNAKE_CASE , token=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = ModelCard.from_template(
card_data=ModelCardData( # Card metadata object that will be converted to YAML block
language='''en''' , license='''apache-2.0''' , library_name='''diffusers''' , tags=[] , datasets=args.dataset_name , metrics=[] , ) , template_path=_SCREAMING_SNAKE_CASE , model_name=_SCREAMING_SNAKE_CASE , repo_name=_SCREAMING_SNAKE_CASE , dataset_name=args.dataset_name if hasattr(_SCREAMING_SNAKE_CASE , '''dataset_name''' ) else None , learning_rate=args.learning_rate , train_batch_size=args.train_batch_size , eval_batch_size=args.eval_batch_size , gradient_accumulation_steps=(
args.gradient_accumulation_steps if hasattr(_SCREAMING_SNAKE_CASE , '''gradient_accumulation_steps''' ) else None
) , adam_betaa=args.adam_betaa if hasattr(_SCREAMING_SNAKE_CASE , '''adam_beta1''' ) else None , adam_betaa=args.adam_betaa if hasattr(_SCREAMING_SNAKE_CASE , '''adam_beta2''' ) else None , adam_weight_decay=args.adam_weight_decay if hasattr(_SCREAMING_SNAKE_CASE , '''adam_weight_decay''' ) else None , adam_epsilon=args.adam_epsilon if hasattr(_SCREAMING_SNAKE_CASE , '''adam_epsilon''' ) else None , lr_scheduler=args.lr_scheduler if hasattr(_SCREAMING_SNAKE_CASE , '''lr_scheduler''' ) else None , lr_warmup_steps=args.lr_warmup_steps if hasattr(_SCREAMING_SNAKE_CASE , '''lr_warmup_steps''' ) else None , ema_inv_gamma=args.ema_inv_gamma if hasattr(_SCREAMING_SNAKE_CASE , '''ema_inv_gamma''' ) else None , ema_power=args.ema_power if hasattr(_SCREAMING_SNAKE_CASE , '''ema_power''' ) else None , ema_max_decay=args.ema_max_decay if hasattr(_SCREAMING_SNAKE_CASE , '''ema_max_decay''' ) else None , mixed_precision=args.mixed_precision , )
_UpperCAmelCase = os.path.join(args.output_dir , '''README.md''' )
model_card.save(_SCREAMING_SNAKE_CASE )
def lowercase ( _SCREAMING_SNAKE_CASE : Optional[str] , _SCREAMING_SNAKE_CASE : Optional[str] = None ):
'''simple docstring'''
if resolved_file is None or commit_hash is not None:
return commit_hash
_UpperCAmelCase = str(Path(_SCREAMING_SNAKE_CASE ).as_posix() )
_UpperCAmelCase = re.search(r'''snapshots/([^/]+)/''' , _SCREAMING_SNAKE_CASE )
if search is None:
return None
_UpperCAmelCase = search.groups()[0]
return commit_hash if REGEX_COMMIT_HASH.match(_SCREAMING_SNAKE_CASE ) else None
# Old default cache path, potentially to be migrated.
# This logic was more or less taken from `transformers`, with the following differences:
# - Diffusers doesn't use custom environment variables to specify the cache path.
# - There is no need to migrate the cache format, just move the files to the new location.
__A : Dict = os.path.expanduser(
os.getenv("HF_HOME", os.path.join(os.getenv("XDG_CACHE_HOME", "~/.cache"), "huggingface"))
)
__A : Optional[int] = os.path.join(hf_cache_home, "diffusers")
def lowercase ( _SCREAMING_SNAKE_CASE : Optional[str] = None , _SCREAMING_SNAKE_CASE : Optional[str] = None ):
'''simple docstring'''
if new_cache_dir is None:
_UpperCAmelCase = DIFFUSERS_CACHE
if old_cache_dir is None:
_UpperCAmelCase = old_diffusers_cache
_UpperCAmelCase = Path(_SCREAMING_SNAKE_CASE ).expanduser()
_UpperCAmelCase = Path(_SCREAMING_SNAKE_CASE ).expanduser()
for old_blob_path in old_cache_dir.glob('''**/blobs/*''' ):
if old_blob_path.is_file() and not old_blob_path.is_symlink():
_UpperCAmelCase = new_cache_dir / old_blob_path.relative_to(_SCREAMING_SNAKE_CASE )
new_blob_path.parent.mkdir(parents=_SCREAMING_SNAKE_CASE , exist_ok=_SCREAMING_SNAKE_CASE )
os.replace(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
try:
os.symlink(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
except OSError:
logger.warning(
'''Could not create symlink between old cache and new cache. If you use an older version of diffusers again, files will be re-downloaded.''' )
# At this point, old_cache_dir contains symlinks to the new cache (it can still be used).
__A : Optional[Any] = os.path.join(DIFFUSERS_CACHE, "version_diffusers_cache.txt")
if not os.path.isfile(cache_version_file):
__A : Optional[Any] = 0
else:
with open(cache_version_file) as f:
try:
__A : Tuple = int(f.read())
except ValueError:
__A : int = 0
if cache_version < 1:
__A : List[Any] = os.path.isdir(old_diffusers_cache) and len(os.listdir(old_diffusers_cache)) > 0
if old_cache_is_not_empty:
logger.warning(
"The cache for model files in Diffusers v0.14.0 has moved to a new location. Moving your "
"existing cached models. This is a one-time operation, you can interrupt it or run it "
"later by calling `diffusers.utils.hub_utils.move_cache()`."
)
try:
move_cache()
except Exception as e:
__A : Optional[int] = "\n".join(traceback.format_tb(e.__traceback__))
logger.error(
f'''There was a problem when trying to move your cache:\n\n{trace}\n{e.__class__.__name__}: {e}\n\nPlease '''
"file an issue at https://github.com/huggingface/diffusers/issues/new/choose, copy paste this whole "
"message and we will do our best to help."
)
if cache_version < 1:
try:
os.makedirs(DIFFUSERS_CACHE, exist_ok=True)
with open(cache_version_file, "w") as f:
f.write("1")
except Exception:
logger.warning(
f'''There was a problem when trying to write in your cache folder ({DIFFUSERS_CACHE}). Please, ensure '''
"the directory exists and can be written to."
)
def lowercase ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Optional[str] = None ):
'''simple docstring'''
if variant is not None:
_UpperCAmelCase = weights_name.split('''.''' )
_UpperCAmelCase = splits[:-1] + [variant] + splits[-1:]
_UpperCAmelCase = '''.'''.join(_SCREAMING_SNAKE_CASE )
return weights_name
def lowercase ( _SCREAMING_SNAKE_CASE : str , *,
_SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : List[Any]=None , ):
'''simple docstring'''
_UpperCAmelCase = str(_SCREAMING_SNAKE_CASE )
if os.path.isfile(_SCREAMING_SNAKE_CASE ):
return pretrained_model_name_or_path
elif os.path.isdir(_SCREAMING_SNAKE_CASE ):
if os.path.isfile(os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ):
# Load from a PyTorch checkpoint
_UpperCAmelCase = os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return model_file
elif subfolder is not None and os.path.isfile(
os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ):
_UpperCAmelCase = os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return model_file
else:
raise EnvironmentError(
f'Error no file named {weights_name} found in directory {pretrained_model_name_or_path}.' )
else:
# 1. First check if deprecated way of loading from branches is used
if (
revision in DEPRECATED_REVISION_ARGS
and (weights_name == WEIGHTS_NAME or weights_name == SAFETENSORS_WEIGHTS_NAME)
and version.parse(version.parse(_SCREAMING_SNAKE_CASE ).base_version ) >= version.parse('''0.20.0''' )
):
try:
_UpperCAmelCase = hf_hub_download(
_SCREAMING_SNAKE_CASE , filename=_add_variant(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , cache_dir=_SCREAMING_SNAKE_CASE , force_download=_SCREAMING_SNAKE_CASE , proxies=_SCREAMING_SNAKE_CASE , resume_download=_SCREAMING_SNAKE_CASE , local_files_only=_SCREAMING_SNAKE_CASE , use_auth_token=_SCREAMING_SNAKE_CASE , user_agent=_SCREAMING_SNAKE_CASE , subfolder=_SCREAMING_SNAKE_CASE , revision=revision or commit_hash , )
warnings.warn(
f'Loading the variant {revision} from {pretrained_model_name_or_path} via `revision=\'{revision}\'` is deprecated. Loading instead from `revision=\'main\'` with `variant={revision}`. Loading model variants via `revision=\'{revision}\'` will be removed in diffusers v1. Please use `variant=\'{revision}\'` instead.' , _SCREAMING_SNAKE_CASE , )
return model_file
except: # noqa: E722
warnings.warn(
f'You are loading the variant {revision} from {pretrained_model_name_or_path} via `revision=\'{revision}\'`. This behavior is deprecated and will be removed in diffusers v1. One should use `variant=\'{revision}\'` instead. However, it appears that {pretrained_model_name_or_path} currently does not have a {_add_variant(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )} file in the \'main\' branch of {pretrained_model_name_or_path}. \n The Diffusers team and community would be very grateful if you could open an issue: https://github.com/huggingface/diffusers/issues/new with the title \'{pretrained_model_name_or_path} is missing {_add_variant(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )}\' so that the correct variant file can be added.' , _SCREAMING_SNAKE_CASE , )
try:
# 2. Load model file as usual
_UpperCAmelCase = hf_hub_download(
_SCREAMING_SNAKE_CASE , filename=_SCREAMING_SNAKE_CASE , cache_dir=_SCREAMING_SNAKE_CASE , force_download=_SCREAMING_SNAKE_CASE , proxies=_SCREAMING_SNAKE_CASE , resume_download=_SCREAMING_SNAKE_CASE , local_files_only=_SCREAMING_SNAKE_CASE , use_auth_token=_SCREAMING_SNAKE_CASE , user_agent=_SCREAMING_SNAKE_CASE , subfolder=_SCREAMING_SNAKE_CASE , revision=revision or commit_hash , )
return model_file
except RepositoryNotFoundError:
raise EnvironmentError(
f'{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier '
'''listed on \'https://huggingface.co/models\'\nIf this is a private repository, make sure to pass a '''
'''token having permission to this repo with `use_auth_token` or log in with `huggingface-cli '''
'''login`.''' )
except RevisionNotFoundError:
raise EnvironmentError(
f'{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for '
'''this model name. Check the model page at '''
f'\'https://huggingface.co/{pretrained_model_name_or_path}\' for available revisions.' )
except EntryNotFoundError:
raise EnvironmentError(
f'{pretrained_model_name_or_path} does not appear to have a file named {weights_name}.' )
except HTTPError as err:
raise EnvironmentError(
f'There was a specific connection error when trying to load {pretrained_model_name_or_path}:\n{err}' )
except ValueError:
raise EnvironmentError(
f'We couldn\'t connect to \'{HUGGINGFACE_CO_RESOLVE_ENDPOINT}\' to load this model, couldn\'t find it'
f' in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a'
f' directory containing a file named {weights_name} or'
''' \nCheckout your internet connection or see how to run the library in'''
''' offline mode at \'https://huggingface.co/docs/diffusers/installation#offline-mode\'.''' )
except EnvironmentError:
raise EnvironmentError(
f'Can\'t load the model for \'{pretrained_model_name_or_path}\'. If you were trying to load it from '
'''\'https://huggingface.co/models\', make sure you don\'t have a local directory with the same name. '''
f'Otherwise, make sure \'{pretrained_model_name_or_path}\' is the correct path to a directory '
f'containing a file named {weights_name}' )
| 718
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__A : Optional[int] = logging.get_logger(__name__)
__A : Optional[Any] = {
"google/bit-50": "https://huggingface.co/google/bit-50/resolve/main/config.json",
}
class _a ( lowerCAmelCase , lowerCAmelCase):
"""simple docstring"""
UpperCamelCase__ = """bit"""
UpperCamelCase__ = ["""preactivation""", """bottleneck"""]
UpperCamelCase__ = ["""SAME""", """VALID"""]
def __init__( self : List[Any] , __UpperCamelCase : Optional[int]=3 , __UpperCamelCase : Optional[int]=6_4 , __UpperCamelCase : List[Any]=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8] , __UpperCamelCase : Any=[3, 4, 6, 3] , __UpperCamelCase : str="preactivation" , __UpperCamelCase : Union[str, Any]="relu" , __UpperCamelCase : str=None , __UpperCamelCase : Dict=3_2 , __UpperCamelCase : List[str]=0.0 , __UpperCamelCase : Any=False , __UpperCamelCase : Union[str, Any]=3_2 , __UpperCamelCase : List[str]=1 , __UpperCamelCase : List[str]=None , __UpperCamelCase : Optional[Any]=None , **__UpperCamelCase : List[str] , )->List[Any]:
super().__init__(**__UpperCamelCase )
if layer_type not in self.layer_types:
raise ValueError(F'layer_type={layer_type} is not one of {",".join(self.layer_types )}' )
if global_padding is not None:
if global_padding.upper() in self.supported_padding:
_UpperCAmelCase = global_padding.upper()
else:
raise ValueError(F'Padding strategy {global_padding} not supported' )
_UpperCAmelCase = num_channels
_UpperCAmelCase = embedding_size
_UpperCAmelCase = hidden_sizes
_UpperCAmelCase = depths
_UpperCAmelCase = layer_type
_UpperCAmelCase = hidden_act
_UpperCAmelCase = global_padding
_UpperCAmelCase = num_groups
_UpperCAmelCase = drop_path_rate
_UpperCAmelCase = embedding_dynamic_padding
_UpperCAmelCase = output_stride
_UpperCAmelCase = width_factor
_UpperCAmelCase = ['''stem'''] + [F'stage{idx}' for idx in range(1 , len(__UpperCamelCase ) + 1 )]
_UpperCAmelCase , _UpperCAmelCase = get_aligned_output_features_output_indices(
out_features=__UpperCamelCase , out_indices=__UpperCamelCase , stage_names=self.stage_names )
| 95
| 0
|
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
_lowercase = logging.get_logger(__name__)
_lowercase = {
"""microsoft/layoutlmv3-base""": """https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json""",
}
class a_ ( UpperCAmelCase__ ):
lowercase_ : List[str] = '''layoutlmv3'''
def __init__( self : List[Any] , __lowerCAmelCase : Tuple=5_0_2_6_5 , __lowerCAmelCase : int=7_6_8 , __lowerCAmelCase : List[Any]=1_2 , __lowerCAmelCase : Optional[Any]=1_2 , __lowerCAmelCase : List[str]=3_0_7_2 , __lowerCAmelCase : Any="gelu" , __lowerCAmelCase : List[Any]=0.1 , __lowerCAmelCase : List[str]=0.1 , __lowerCAmelCase : List[Any]=5_1_2 , __lowerCAmelCase : Tuple=2 , __lowerCAmelCase : Optional[int]=0.02 , __lowerCAmelCase : Optional[Any]=1E-5 , __lowerCAmelCase : Tuple=1 , __lowerCAmelCase : str=0 , __lowerCAmelCase : List[str]=2 , __lowerCAmelCase : Any=1_0_2_4 , __lowerCAmelCase : Dict=1_2_8 , __lowerCAmelCase : Union[str, Any]=1_2_8 , __lowerCAmelCase : Union[str, Any]=True , __lowerCAmelCase : Any=3_2 , __lowerCAmelCase : Dict=1_2_8 , __lowerCAmelCase : Optional[int]=6_4 , __lowerCAmelCase : Dict=2_5_6 , __lowerCAmelCase : int=True , __lowerCAmelCase : Tuple=True , __lowerCAmelCase : List[str]=True , __lowerCAmelCase : Any=2_2_4 , __lowerCAmelCase : List[Any]=3 , __lowerCAmelCase : Tuple=1_6 , __lowerCAmelCase : str=None , **__lowerCAmelCase : Union[str, Any] , ):
super().__init__(
vocab_size=__lowerCAmelCase , hidden_size=__lowerCAmelCase , num_hidden_layers=__lowerCAmelCase , num_attention_heads=__lowerCAmelCase , intermediate_size=__lowerCAmelCase , hidden_act=__lowerCAmelCase , hidden_dropout_prob=__lowerCAmelCase , attention_probs_dropout_prob=__lowerCAmelCase , max_position_embeddings=__lowerCAmelCase , type_vocab_size=__lowerCAmelCase , initializer_range=__lowerCAmelCase , layer_norm_eps=__lowerCAmelCase , pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , **__lowerCAmelCase , )
__snake_case = max_ad_position_embeddings
__snake_case = coordinate_size
__snake_case = shape_size
__snake_case = has_relative_attention_bias
__snake_case = rel_pos_bins
__snake_case = max_rel_pos
__snake_case = has_spatial_attention_bias
__snake_case = rel_ad_pos_bins
__snake_case = max_rel_ad_pos
__snake_case = text_embed
__snake_case = visual_embed
__snake_case = input_size
__snake_case = num_channels
__snake_case = patch_size
__snake_case = classifier_dropout
class a_ ( UpperCAmelCase__ ):
lowercase_ : Any = version.parse('''1.12''' )
@property
def lowercase__ ( self : Dict ):
# The order of inputs is different for question answering and sequence classification
if self.task in ["question-answering", "sequence-classification"]:
return OrderedDict(
[
('input_ids', {0: 'batch', 1: 'sequence'}),
('attention_mask', {0: 'batch', 1: 'sequence'}),
('bbox', {0: 'batch', 1: 'sequence'}),
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
else:
return OrderedDict(
[
('input_ids', {0: 'batch', 1: 'sequence'}),
('bbox', {0: 'batch', 1: 'sequence'}),
('attention_mask', {0: 'batch', 1: 'sequence'}),
('pixel_values', {0: 'batch', 1: 'num_channels'}),
] )
@property
def lowercase__ ( self : Dict ):
return 1E-5
@property
def lowercase__ ( self : Union[str, Any] ):
return 1_2
def lowercase__ ( self : int , __lowerCAmelCase : "ProcessorMixin" , __lowerCAmelCase : int = -1 , __lowerCAmelCase : int = -1 , __lowerCAmelCase : bool = False , __lowerCAmelCase : Optional["TensorType"] = None , __lowerCAmelCase : int = 3 , __lowerCAmelCase : int = 4_0 , __lowerCAmelCase : int = 4_0 , ):
setattr(processor.image_processor , 'apply_ocr' , __lowerCAmelCase )
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
__snake_case = compute_effective_axis_dimension(
__lowerCAmelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
__snake_case = processor.tokenizer.num_special_tokens_to_add(__lowerCAmelCase )
__snake_case = compute_effective_axis_dimension(
__lowerCAmelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__lowerCAmelCase )
# Generate dummy inputs according to compute batch and sequence
__snake_case = [[' '.join([processor.tokenizer.unk_token] ) * seq_length]] * batch_size
# Generate dummy bounding boxes
__snake_case = [[[4_8, 8_4, 7_3, 1_2_8]]] * batch_size
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
# batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch)
__snake_case = self._generate_dummy_images(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
__snake_case = dict(
processor(
__lowerCAmelCase , text=__lowerCAmelCase , boxes=__lowerCAmelCase , return_tensors=__lowerCAmelCase , ) )
return inputs
| 356
|
'''simple docstring'''
def lowerCamelCase__ ( a ):
__snake_case = [0] * len(a )
__snake_case = []
__snake_case = []
__snake_case = 0
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(a ) ):
if indegree[i] == 0:
queue.append(a )
while queue:
__snake_case = queue.pop(0 )
cnt += 1
topo.append(a )
for x in graph[vertex]:
indegree[x] -= 1
if indegree[x] == 0:
queue.append(a )
if cnt != len(a ):
print('Cycle exists' )
else:
print(a )
# Adjacency List of Graph
_lowercase = {0: [1, 2], 1: [3], 2: [3], 3: [4, 5], 4: [], 5: []}
topological_sort(graph)
| 356
| 1
|
def snake_case_ ( __lowercase ):
if upper_limit < 0:
raise ValueError('''Limit for the Catalan sequence must be ≥ 0''' )
UpperCAmelCase_ : List[Any] = [0] * (upper_limit + 1)
# Base case: C(0) = C(1) = 1
UpperCAmelCase_ : Optional[Any] = 1
if upper_limit > 0:
UpperCAmelCase_ : List[str] = 1
# Recurrence relation: C(i) = sum(C(j).C(i-j-1)), from j = 0 to i
for i in range(2 , upper_limit + 1 ):
for j in range(__lowercase ):
catalan_list[i] += catalan_list[j] * catalan_list[i - j - 1]
return catalan_list
if __name__ == "__main__":
print('\n********* Catalan Numbers Using Dynamic Programming ************\n')
print('\n*** Enter -1 at any time to quit ***')
print('\nEnter the upper limit (≥ 0) for the Catalan number sequence: ', end='')
try:
while True:
__UpperCamelCase : Optional[Any] = int(input().strip())
if N < 0:
print('\n********* Goodbye!! ************')
break
else:
print(F'The Catalan numbers from 0 through {N} are:')
print(catalan_numbers(N))
print('Try another upper limit for the sequence: ', end='')
except (NameError, ValueError):
print('\n********* Invalid input, goodbye! ************\n')
import doctest
doctest.testmod()
| 641
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCamelCase : str = {
'configuration_x_clip': [
'XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP',
'XCLIPConfig',
'XCLIPTextConfig',
'XCLIPVisionConfig',
],
'processing_x_clip': ['XCLIPProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Any = [
'XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'XCLIPModel',
'XCLIPPreTrainedModel',
'XCLIPTextModel',
'XCLIPVisionModel',
]
if TYPE_CHECKING:
from .configuration_x_clip import (
XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
XCLIPConfig,
XCLIPTextConfig,
XCLIPVisionConfig,
)
from .processing_x_clip import XCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_x_clip import (
XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
XCLIPModel,
XCLIPPreTrainedModel,
XCLIPTextModel,
XCLIPVisionModel,
)
else:
import sys
__UpperCamelCase : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 641
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase : str = logging.get_logger(__name__)
__UpperCamelCase : Union[str, Any] = {
"""google/switch-base-8""": """https://huggingface.co/google/switch-base-8/blob/main/config.json""",
}
class __SCREAMING_SNAKE_CASE( __lowerCamelCase ):
_UpperCAmelCase = 'switch_transformers'
_UpperCAmelCase = ['past_key_values']
_UpperCAmelCase = {'hidden_size': 'd_model', 'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers'}
def __init__( self: Union[str, Any] , UpperCamelCase: Union[str, Any]=3_21_28 , UpperCamelCase: Dict=7_68 , UpperCamelCase: Any=64 , UpperCamelCase: Optional[Any]=20_48 , UpperCamelCase: List[Any]=64 , UpperCamelCase: Optional[Any]=12 , UpperCamelCase: List[Any]=3 , UpperCamelCase: Any=12 , UpperCamelCase: Optional[int]=3 , UpperCamelCase: List[str]=12 , UpperCamelCase: Optional[Any]=8 , UpperCamelCase: Dict=False , UpperCamelCase: str=0.01 , UpperCamelCase: List[Any]="float32" , UpperCamelCase: Union[str, Any]=False , UpperCamelCase: Any=32 , UpperCamelCase: int=1_28 , UpperCamelCase: Tuple=0.1 , UpperCamelCase: List[str]=1e-6 , UpperCamelCase: Union[str, Any]=0.001 , UpperCamelCase: Dict=0.001 , UpperCamelCase: Optional[Any]=1.0 , UpperCamelCase: Optional[Any]="relu" , UpperCamelCase: Union[str, Any]=True , UpperCamelCase: str=False , UpperCamelCase: Optional[Any]=True , UpperCamelCase: Any=0 , UpperCamelCase: Union[str, Any]=1 , **UpperCamelCase: Optional[Any] , ) -> List[str]:
snake_case__ = vocab_size
snake_case__ = d_model
snake_case__ = d_kv
snake_case__ = d_ff
snake_case__ = num_sparse_encoder_layers
snake_case__ = num_layers
snake_case__ = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
snake_case__ = num_sparse_decoder_layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_encoder_layers > 0:
snake_case__ = self.num_layers // self.num_sparse_encoder_layers
else:
snake_case__ = self.num_layers # HACK: this will create 0 sparse layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_decoder_layers > 0:
snake_case__ = self.num_decoder_layers // self.num_sparse_decoder_layers
else:
snake_case__ = self.num_decoder_layers # HACK: this will create 0 sparse layers
snake_case__ = num_heads
snake_case__ = num_experts
snake_case__ = expert_capacity
snake_case__ = router_bias
snake_case__ = router_jitter_noise
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(F'''`router_dtype` must be one of \'float32\', \'float16\' or \'bfloat16\', got {router_dtype}''' )
snake_case__ = router_dtype
snake_case__ = router_ignore_padding_tokens
snake_case__ = relative_attention_num_buckets
snake_case__ = relative_attention_max_distance
snake_case__ = dropout_rate
snake_case__ = layer_norm_epsilon
snake_case__ = initializer_factor
snake_case__ = feed_forward_proj
snake_case__ = use_cache
snake_case__ = add_router_probs
snake_case__ = router_z_loss_coef
snake_case__ = router_aux_loss_coef
snake_case__ = self.feed_forward_proj.split('-' )
snake_case__ = act_info[-1]
snake_case__ = act_info[0] == 'gated'
if len(UpperCamelCase ) > 1 and act_info[0] != "gated" or len(UpperCamelCase ) > 2:
raise ValueError(
F'''`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.'''
'Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '
'\'gated-gelu\' or \'relu\'' )
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
snake_case__ = 'gelu_new'
super().__init__(
pad_token_id=UpperCamelCase , eos_token_id=UpperCamelCase , is_encoder_decoder=UpperCamelCase , **UpperCamelCase , )
| 328
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {
'microsoft/biogpt': 'https://huggingface.co/microsoft/biogpt/resolve/main/config.json',
# See all BioGPT models at https://huggingface.co/models?filter=biogpt
}
class lowerCamelCase ( __lowerCamelCase ):
UpperCamelCase_ : Union[str, Any] = 'biogpt'
def __init__( self :Tuple , lowercase :Union[str, Any]=4_2_3_8_4 , lowercase :str=1_0_2_4 , lowercase :Any=2_4 , lowercase :List[str]=1_6 , lowercase :List[str]=4_0_9_6 , lowercase :int="gelu" , lowercase :List[Any]=0.1 , lowercase :int=0.1 , lowercase :int=1_0_2_4 , lowercase :Optional[Any]=0.02 , lowercase :Union[str, Any]=1e-12 , lowercase :Tuple=True , lowercase :int=True , lowercase :Optional[int]=0.0 , lowercase :Any=0.0 , lowercase :str=1 , lowercase :Any=0 , lowercase :int=2 , **lowercase :Tuple , ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = max_position_embeddings
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = layer_norm_eps
SCREAMING_SNAKE_CASE = scale_embedding
SCREAMING_SNAKE_CASE = use_cache
SCREAMING_SNAKE_CASE = layerdrop
SCREAMING_SNAKE_CASE = activation_dropout
super().__init__(pad_token_id=lowercase , bos_token_id=lowercase , eos_token_id=lowercase , **lowercase )
| 201
| 0
|
'''simple docstring'''
import unittest
import numpy as np
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class A ( unittest.TestCase ):
def __init__( self : int , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Any=7 , lowerCAmelCase_ : Dict=3 , lowerCAmelCase_ : List[str]=18 , lowerCAmelCase_ : Optional[int]=30 , lowerCAmelCase_ : int=4_00 , lowerCAmelCase_ : Tuple=True , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : List[str]=True , lowerCAmelCase_ : Union[str, Any]=[0.5, 0.5, 0.5] , lowerCAmelCase_ : Any=[0.5, 0.5, 0.5] , ) -> Tuple:
"""simple docstring"""
_a = size if size is not None else {"""height""": 18, """width""": 18}
_a = parent
_a = batch_size
_a = num_channels
_a = image_size
_a = min_resolution
_a = max_resolution
_a = do_resize
_a = size
_a = do_normalize
_a = image_mean
_a = image_std
def __lowerCAmelCase ( self : str ) -> Any:
"""simple docstring"""
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class A ( UpperCamelCase__ ,unittest.TestCase ):
lowercase_ = DPTImageProcessor if is_vision_available() else None
def __lowerCAmelCase ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
_a = DPTImageProcessingTester(self )
@property
def __lowerCAmelCase ( self : Dict ) -> Tuple:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def __lowerCAmelCase ( self : str ) -> Any:
"""simple docstring"""
_a = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_a , '''image_mean''' ) )
self.assertTrue(hasattr(_a , '''image_std''' ) )
self.assertTrue(hasattr(_a , '''do_normalize''' ) )
self.assertTrue(hasattr(_a , '''do_resize''' ) )
self.assertTrue(hasattr(_a , '''size''' ) )
def __lowerCAmelCase ( self : List[Any] ) -> Any:
"""simple docstring"""
_a = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 18, '''width''': 18} )
_a = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} )
def __lowerCAmelCase ( self : int ) -> List[Any]:
"""simple docstring"""
_a = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_a = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a )
for image in image_inputs:
self.assertIsInstance(_a , Image.Image )
# Test not batched input
_a = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
_a = image_processing(_a , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def __lowerCAmelCase ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
_a = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_a = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a , numpify=_a )
for image in image_inputs:
self.assertIsInstance(_a , np.ndarray )
# Test not batched input
_a = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
_a = image_processing(_a , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def __lowerCAmelCase ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
_a = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_a = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a , torchify=_a )
for image in image_inputs:
self.assertIsInstance(_a , torch.Tensor )
# Test not batched input
_a = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
_a = image_processing(_a , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
| 710
|
'''simple docstring'''
def snake_case_ (UpperCamelCase : int ):
'''simple docstring'''
if n == 1 or not isinstance(UpperCamelCase , UpperCamelCase ):
return 0
elif n == 2:
return 1
else:
_a = [0, 1]
for i in range(2 , n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def snake_case_ (UpperCamelCase : int ):
'''simple docstring'''
_a = 0
_a = 2
while digits < n:
index += 1
_a = len(str(fibonacci(UpperCamelCase ) ) )
return index
def snake_case_ (UpperCamelCase : int = 1000 ):
'''simple docstring'''
return fibonacci_digits_index(UpperCamelCase )
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 377
| 0
|
"""simple docstring"""
from pathlib import Path
import fire
from tqdm import tqdm
def _lowerCamelCase ( UpperCAmelCase_ : Tuple="ro", UpperCAmelCase_ : Any="en", UpperCAmelCase_ : List[str]="wmt16", UpperCAmelCase_ : Dict=None ) -> None:
"""simple docstring"""
try:
import datasets
except (ModuleNotFoundError, ImportError):
raise ImportError("run pip install datasets" )
A__ = F"""{src_lang}-{tgt_lang}"""
print(F"""Converting {dataset}-{pair}""" )
A__ = datasets.load_dataset(UpperCAmelCase_, UpperCAmelCase_ )
if save_dir is None:
A__ = F"""{dataset}-{pair}"""
A__ = Path(UpperCAmelCase_ )
save_dir.mkdir(exist_ok=UpperCAmelCase_ )
for split in ds.keys():
print(F"""Splitting {split} with {ds[split].num_rows} records""" )
# to save to val.source, val.target like summary datasets
A__ = "val" if split == "validation" else split
A__ = save_dir.joinpath(F"""{fn}.source""" )
A__ = save_dir.joinpath(F"""{fn}.target""" )
A__ = src_path.open("w+" )
A__ = tgt_path.open("w+" )
# reader is the bottleneck so writing one record at a time doesn't slow things down
for x in tqdm(ds[split] ):
A__ = x["translation"]
src_fp.write(ex[src_lang] + "\n" )
tgt_fp.write(ex[tgt_lang] + "\n" )
print(F"""Saved {dataset} dataset to {save_dir}""" )
if __name__ == "__main__":
fire.Fire(download_wmt_dataset)
| 104
|
import copy
from dataclasses import dataclass
from pathlib import Path
from typing import Dict, Optional, Union
@dataclass
class lowerCamelCase__ :
'''simple docstring'''
snake_case_ =None
snake_case_ =False
snake_case_ =False
snake_case_ =False
snake_case_ =None
snake_case_ =None
snake_case_ =False
snake_case_ =False
snake_case_ =False
snake_case_ =True
snake_case_ =None
snake_case_ =1
snake_case_ =None
snake_case_ =False
snake_case_ =None
snake_case_ =None
def lowerCAmelCase__ (self ) -> "DownloadConfig":
"""simple docstring"""
return self.__class__(**{k: copy.deepcopy(__lowerCamelCase ) for k, v in self.__dict__.items()} )
| 647
| 0
|
import os
import numpy
import onnx
def lowerCAmelCase_ ( __UpperCAmelCase: Any , __UpperCAmelCase: Tuple ) -> List[Any]:
UpperCamelCase__ : str = a.name
UpperCamelCase__ : str = b.name
UpperCamelCase__ : Tuple = ''''''
UpperCamelCase__ : Optional[Any] = ''''''
UpperCamelCase__ : str = a == b
UpperCamelCase__ : int = name_a
UpperCamelCase__ : Any = name_b
return res
def lowerCAmelCase_ ( __UpperCAmelCase: Optional[int] , __UpperCAmelCase: List[Any] , __UpperCAmelCase: List[Any] ) -> Tuple:
for i, input_name in enumerate(node_proto.input ):
if input_name == name:
node_proto.input.insert(__UpperCAmelCase , __UpperCAmelCase )
node_proto.input.pop(i + 1 )
if node_proto.op_type == "If":
_graph_replace_input_with(node_proto.attribute[0].g , __UpperCAmelCase , __UpperCAmelCase )
_graph_replace_input_with(node_proto.attribute[1].g , __UpperCAmelCase , __UpperCAmelCase )
if node_proto.op_type == "Loop":
_graph_replace_input_with(node_proto.attribute[0].g , __UpperCAmelCase , __UpperCAmelCase )
def lowerCAmelCase_ ( __UpperCAmelCase: Union[str, Any] , __UpperCAmelCase: Optional[Any] , __UpperCAmelCase: Dict ) -> Tuple:
for n in graph_proto.node:
_node_replace_input_with(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def lowerCAmelCase_ ( __UpperCAmelCase: str , __UpperCAmelCase: Union[str, Any] , __UpperCAmelCase: Tuple ) -> int:
UpperCamelCase__ : Union[str, Any] = list(model.graph.initializer )
UpperCamelCase__ : Union[str, Any] = list(model_without_ext.graph.initializer )
for i, ref_i in ind_to_replace:
assert inits_with_data[i].name == inits[i].name
assert inits_with_data[ref_i].name == inits[ref_i].name
assert i > ref_i
UpperCamelCase__ : List[Any] = inits[i].name
UpperCamelCase__ : Optional[int] = inits[ref_i].name
model_without_ext.graph.initializer.remove(inits[i] )
# for n in model.graph.node:
_graph_replace_input_with(model_without_ext.graph , __UpperCAmelCase , __UpperCAmelCase )
def lowerCAmelCase_ ( __UpperCAmelCase: Any ) -> List[str]:
UpperCamelCase__ : Any = os.path.dirname(__UpperCAmelCase )
UpperCamelCase__ : int = os.path.basename(__UpperCAmelCase )
UpperCamelCase__ : Tuple = onnx.load(os.path.join(__UpperCAmelCase , __UpperCAmelCase ) )
UpperCamelCase__ : int = list(model.graph.initializer )
UpperCamelCase__ : Union[str, Any] = set()
UpperCamelCase__ : Optional[int] = {}
UpperCamelCase__ : Any = []
UpperCamelCase__ : Tuple = 0
for i in range(len(__UpperCAmelCase ) ):
if i in dup_set:
continue
for j in range(i + 1 , len(__UpperCAmelCase ) ):
if j in dup_set:
continue
if _is_equal_tensor_proto(inits[i] , inits[j] ):
dup_set.add(__UpperCAmelCase )
dup_set.add(__UpperCAmelCase )
UpperCamelCase__ : Optional[Any] = inits[j].data_type
UpperCamelCase__ : List[str] = numpy.prod(inits[j].dims )
if dtype == 1:
mem_size *= 4
elif dtype == 6:
mem_size *= 4
elif dtype == 7 or dtype == 11:
mem_size *= 8
else:
print('''unexpected data type: ''' , __UpperCAmelCase )
total_reduced_size += mem_size
UpperCamelCase__ : int = inits[i].name
UpperCamelCase__ : int = inits[j].name
if name_i in dup_map:
dup_map[name_i].append(__UpperCAmelCase )
else:
UpperCamelCase__ : Any = [name_j]
ind_to_replace.append((j, i) )
print('''total reduced size: ''' , total_reduced_size / 1024 / 1024 / 1024 , '''GB''' )
UpperCamelCase__ : str = sorted(__UpperCAmelCase )
_remove_dup_initializers_from_model(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
UpperCamelCase__ : List[Any] = '''optimized_''' + model_file_name
UpperCamelCase__ : Tuple = os.path.join(__UpperCAmelCase , __UpperCAmelCase )
onnx.save(__UpperCAmelCase , __UpperCAmelCase )
return new_model
| 715
|
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto.configuration_auto import CONFIG_MAPPING
UpperCAmelCase_ = logging.get_logger(__name__)
class lowercase__ ( __lowerCamelCase ):
'''simple docstring'''
a : Any = "upernet"
def __init__( self, __magic_name__=None, __magic_name__=512, __magic_name__=0.02, __magic_name__=[1, 2, 3, 6], __magic_name__=True, __magic_name__=0.4, __magic_name__=384, __magic_name__=256, __magic_name__=1, __magic_name__=False, __magic_name__=255, **__magic_name__, ) -> Dict:
"""simple docstring"""
super().__init__(**__magic_name__ )
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
UpperCamelCase__ : str = CONFIG_MAPPING['''resnet'''](out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] )
elif isinstance(__magic_name__, __magic_name__ ):
UpperCamelCase__ : Tuple = backbone_config.get('''model_type''' )
UpperCamelCase__ : List[str] = CONFIG_MAPPING[backbone_model_type]
UpperCamelCase__ : Optional[int] = config_class.from_dict(__magic_name__ )
UpperCamelCase__ : Dict = backbone_config
UpperCamelCase__ : int = hidden_size
UpperCamelCase__ : int = initializer_range
UpperCamelCase__ : Dict = pool_scales
UpperCamelCase__ : Tuple = use_auxiliary_head
UpperCamelCase__ : Tuple = auxiliary_loss_weight
UpperCamelCase__ : Optional[int] = auxiliary_in_channels
UpperCamelCase__ : Optional[Any] = auxiliary_channels
UpperCamelCase__ : Any = auxiliary_num_convs
UpperCamelCase__ : Union[str, Any] = auxiliary_concat_input
UpperCamelCase__ : List[str] = loss_ignore_index
def UpperCamelCase__ ( self ) -> Any:
"""simple docstring"""
UpperCamelCase__ : List[str] = copy.deepcopy(self.__dict__ )
UpperCamelCase__ : int = self.backbone_config.to_dict()
UpperCamelCase__ : Any = self.__class__.model_type
return output
| 369
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.